hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
d99a1e98eccb58cbc0c0cef6e9e6702f33461b0e
| 5,886 |
py
|
Python
|
public_data/serializers.py
|
MTES-MCT/sparte
|
3b8ae6d21da81ca761d64ae9dfe2c8f54487211c
|
[
"MIT"
] | null | null | null |
public_data/serializers.py
|
MTES-MCT/sparte
|
3b8ae6d21da81ca761d64ae9dfe2c8f54487211c
|
[
"MIT"
] | 3 |
2022-02-10T11:47:58.000Z
|
2022-02-23T18:50:24.000Z
|
public_data/serializers.py
|
MTES-MCT/sparte
|
3b8ae6d21da81ca761d64ae9dfe2c8f54487211c
|
[
"MIT"
] | null | null | null |
from rest_framework_gis import serializers
from rest_framework import serializers as s
from .models import (
Artificialisee2015to2018,
Artificielle2018,
CommunesSybarval,
CouvertureSol,
EnveloppeUrbaine2018,
Ocsge,
Renaturee2018to2015,
Sybarval,
Voirie2018,
ZonesBaties2018,
UsageSol,
)
def get_label(code="", label=""):
if code is None:
code = "-"
if label is None:
label = "inconnu"
return f"{code} {label[:30]}"
class Artificialisee2015to2018Serializer(serializers.GeoFeatureModelSerializer):
usage_2015 = s.SerializerMethodField()
usage_2018 = s.SerializerMethodField()
couverture_2015 = s.SerializerMethodField()
couverture_2018 = s.SerializerMethodField()
def get_usage_2015(self, obj):
return get_label(code=obj.us_2015, label=obj.us_2015_label)
def get_usage_2018(self, obj):
return get_label(code=obj.us_2018, label=obj.us_2018_label)
def get_couverture_2015(self, obj):
return get_label(code=obj.cs_2015, label=obj.cs_2015_label)
def get_couverture_2018(self, obj):
return get_label(code=obj.cs_2018, label=obj.cs_2018_label)
class Meta:
fields = (
"id",
"surface",
"usage_2015",
"usage_2018",
"couverture_2015",
"couverture_2018",
)
geo_field = "mpoly"
model = Artificialisee2015to2018
class Artificielle2018Serializer(serializers.GeoFeatureModelSerializer):
couverture = s.SerializerMethodField()
def get_couverture(self, obj):
return get_label(code=obj.couverture, label=obj.couverture_label)
class Meta:
fields = (
"id",
"surface",
"couverture",
)
geo_field = "mpoly"
model = Artificielle2018
class CommunesSybarvalSerializer(serializers.GeoFeatureModelSerializer):
"""Marker GeoJSON serializer."""
class Meta:
"""Marker serializer meta class."""
fields = (
"nom",
"code_insee",
"surface",
)
geo_field = "mpoly"
model = CommunesSybarval
class EnveloppeUrbaine2018Serializer(serializers.GeoFeatureModelSerializer):
couverture = s.SerializerMethodField()
def get_couverture(self, obj):
return get_label(code=obj.couverture, label=obj.couverture_label)
class Meta:
fields = (
"id",
"couverture",
"surface",
)
geo_field = "mpoly"
model = EnveloppeUrbaine2018
class OcsgeSerializer(serializers.GeoFeatureModelSerializer):
couverture = s.SerializerMethodField()
usage = s.SerializerMethodField()
def get_couverture(self, obj):
return get_label(code=obj.couverture, label=obj.couverture_label)
def get_usage(self, obj):
return get_label(code=obj.usage, label=obj.usage_label)
class Meta:
fields = (
"id",
"couverture",
"usage",
"millesime",
"map_color",
"year",
)
geo_field = "mpoly"
model = Ocsge
class Renaturee2018to2015Serializer(serializers.GeoFeatureModelSerializer):
usage_2015 = s.SerializerMethodField()
usage_2018 = s.SerializerMethodField()
couverture_2015 = s.SerializerMethodField()
couverture_2018 = s.SerializerMethodField()
def get_usage_2015(self, obj):
return get_label(code=obj.us_2015, label=obj.us_2015_label)
def get_usage_2018(self, obj):
return get_label(code=obj.us_2018, label=obj.us_2018_label)
def get_couverture_2015(self, obj):
return get_label(code=obj.cs_2015, label=obj.cs_2015_label)
def get_couverture_2018(self, obj):
return get_label(code=obj.cs_2018, label=obj.cs_2018_label)
class Meta:
fields = (
"id",
"surface",
"usage_2015",
"usage_2018",
"couverture_2015",
"couverture_2018",
)
geo_field = "mpoly"
model = Renaturee2018to2015
class SybarvalSerializer(serializers.GeoFeatureModelSerializer):
class Meta:
fields = (
"id",
"surface",
)
geo_field = "mpoly"
model = Sybarval
class Voirie2018Serializer(serializers.GeoFeatureModelSerializer):
couverture = s.SerializerMethodField()
usage = s.SerializerMethodField()
def get_couverture(self, obj):
return get_label(code=obj.couverture, label=obj.couverture_label)
def get_usage(self, obj):
return get_label(code=obj.usage, label=obj.usage_label)
class Meta:
fields = (
"id",
"surface",
"couverture",
"usage",
)
geo_field = "mpoly"
model = Voirie2018
class ZonesBaties2018Serializer(serializers.GeoFeatureModelSerializer):
couverture = s.SerializerMethodField()
usage = s.SerializerMethodField()
def get_couverture(self, obj):
return get_label(code=obj.couverture, label=obj.couverture_label)
def get_usage(self, obj):
return get_label(code=obj.usage, label=obj.usage_label)
class Meta:
fields = (
"id",
"couverture",
"usage",
"surface",
)
geo_field = "mpoly"
model = ZonesBaties2018
class CouvertureSolSerializer(serializers.ModelSerializer):
class Meta:
fields = (
"id",
"parent",
"code",
"label",
"is_artificial",
)
model = CouvertureSol
class UsageSolSerializer(serializers.ModelSerializer):
class Meta:
fields = (
"id",
"parent",
"code",
"label",
)
model = UsageSol
| 25.37069 | 80 | 0.613829 | 558 | 5,886 | 6.28853 | 0.130824 | 0.029068 | 0.058136 | 0.072955 | 0.702194 | 0.668852 | 0.666002 | 0.656312 | 0.656312 | 0.623254 | 0 | 0.061826 | 0.29103 | 5,886 | 231 | 81 | 25.480519 | 0.779056 | 0.009514 | 0 | 0.664804 | 0 | 0 | 0.067194 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.094972 | false | 0 | 0.01676 | 0.089385 | 0.418994 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
0
| 1 |
d9a0daeef5f3a3455af5c2983af478cd08c74a7b
| 11,247 |
py
|
Python
|
map_download/cmd/TerrainDownloader.py
|
cugxy/map_download
|
02142b33edb2bc163f7ae971f443efe84c13e029
|
[
"MIT"
] | 27 |
2019-04-02T08:34:16.000Z
|
2022-01-11T01:48:50.000Z
|
map_download/cmd/TerrainDownloader.py
|
cugxy/map_download
|
02142b33edb2bc163f7ae971f443efe84c13e029
|
[
"MIT"
] | 8 |
2019-10-10T03:03:51.000Z
|
2021-11-14T11:01:47.000Z
|
map_download/cmd/TerrainDownloader.py
|
cugxy/map_download
|
02142b33edb2bc163f7ae971f443efe84c13e029
|
[
"MIT"
] | 7 |
2019-04-02T08:43:04.000Z
|
2020-08-11T02:14:24.000Z
|
# -*- coding: utf-8 -*-
# coding=utf-8
import json
import os
import math
import logging
import requests
import time
from map_download.cmd.BaseDownloader import DownloadEngine, BaseDownloaderThread, latlng2tile_terrain, BoundBox
def get_access_token(token):
resp = None
request_count = 0
url = "https://api.cesium.com/v1/assets/1/endpoint"
while True:
if request_count > 4:
break
try:
request_count += 1
param = {'access_token': token}
resp = requests.get(url, params=param, timeout=2)
if resp.status_code != 200:
continue
break
except Exception as e:
resp = None
time.sleep(3)
if resp is None:
return None
resp_json = resp.json()
return resp_json.get('accessToken')
class TerrainDownloaderThread(BaseDownloaderThread):
URL = "https://assets.cesium.com/1/{z}/{x}/{y}.terrain?extensions=octvertexnormals-watermask&v=1.1.0"
def __init__(self, root_dir, bbox, token, task_q, logger=None, write_db=False):
super(TerrainDownloaderThread, self).__init__(
root_dir, bbox, task_q, logger, write_db=write_db, db_file_name='Terrain.db')
self.token = token
self._init_metadata(
format='terrain',
bounds='%f,%f,%f,%f' % (self.bbox.min_lng, self.bbox.min_lat, self.bbox.max_lng, self.bbox.max_lat))
def get_url(self, x, y, z):
return self.URL.format(x=x, y=y, z=z)
def _download(self, x, y, z):
file_path = '%s/%s/%i/%i/%i.%s' % (self.root_dir, 'Terrain', z, x, y, 'terrain')
if os.path.exists(file_path):
self._data2DB(x, y, z, file_path)
return 0
os.makedirs(os.path.dirname(file_path), exist_ok=True)
resp = None
requre_count = 0
_url = ''
access_token = get_access_token(self.token)
if access_token is None:
return -1
param = {'extensions': 'octvertexnormals-watermask', 'v': '1.1.0', 'access_token': access_token}
while True:
if requre_count > 4: break
try:
_url = self.get_url(x, y, z)
resp = requests.get(_url, params=param, stream=True, timeout=2)
break
except Exception as e:
resp = None
time.sleep(3)
requre_count += 1
if resp is None:
return -1
if resp.status_code != 200:
return -1
try:
with open(file_path, 'wb') as f:
for chunk in resp.iter_content(chunk_size=1024):
if chunk:
f.write(chunk)
except Exception as e:
return -1
self._data2DB(x, y, z, file_path)
return 1
class TerrainDownloadEngine(DownloadEngine):
root_dir = ''
def __init__(self, root_dir, bbox, token, thread_num, logger=None, write_db=False):
super(TerrainDownloadEngine, self).__init__(bbox, thread_num, logger, write_db=write_db)
self.root_dir = root_dir
self.token = token
def bbox2xyz(self, bbox, z):
min_x, min_y = latlng2tile_terrain(bbox.min_lat, bbox.min_lng, z)
max_x, max_y = latlng2tile_terrain(bbox.max_lat, bbox.max_lng, z)
return math.floor(min_x), math.floor(min_y), math.ceil(max_x) + 1, math.ceil(max_y) + 1
def generate_metadata(self):
try:
metadatas = {
"attribution": "© Analytical Graphics Inc., © CGIAR-CSI, Produced using Copernicus data and "
"information funded by the European Union - EU-DEM layers",
"available": [
[
{
"endX": 1,
"endY": 0,
"startX": 0,
"startY": 0
}
],
[
{
"endX": 3,
"endY": 1,
"startX": 0,
"startY": 0
}
],
[
{
"endX": 7,
"endY": 3,
"startX": 0,
"startY": 0
}
],
[
{
"endX": 15,
"endY": 7,
"startX": 0,
"startY": 0
}
],
[
{
"endX": 31,
"endY": 15,
"startX": 0,
"startY": 0
}
],
[
{
"endX": 63,
"endY": 31,
"startX": 0,
"startY": 0
}
],
[
{
"endX": 127,
"endY": 63,
"startX": 0,
"startY": 0
}
],
[
{
"endX": 255,
"endY": 127,
"startX": 0,
"startY": 0
}
],
[
{
"endX": 511,
"endY": 255,
"startX": 0,
"startY": 0
}
],
[
{
"endX": 1023,
"endY": 511,
"startX": 0,
"startY": 0
}
],
[
{
"endX": 2047,
"endY": 1023,
"startX": 0,
"startY": 0
}
],
[
{
"endX": 4095,
"endY": 2047,
"startX": 0,
"startY": 0
}
],
[
{
"endX": 8191,
"endY": 4095,
"startX": 0,
"startY": 0
}
],
[
{
"endX": 16383,
"endY": 8191,
"startX": 0,
"startY": 0
}
],
[
{
"endX": 32767,
"endY": 16383,
"startX": 0,
"startY": 0
}
]
],
"bounds": [-180, -90, 180, 90, ],
"description": "STK World Terrain Premium Tileset, v1.3. 10m - 30m resolution CONUS, 30m resolution "
"SRTM between 60N and 60S, 30m Europe. Minimum global coverage of 1000m.",
"extensions": ["watermask", "vertexnormals", "octvertexnormals", ],
"format": "quantized-mesh-1.0",
"maxzoom": 13,
"minzoom": 0,
"name": "world",
"projection": "EPSG:4326",
"scheme": "tms",
"tilejson": "2.1.0",
"tiles": ["{z}/{x}/{y}.terrain?v={version}", ],
"version": "1.31376.0"
}
_dir = os.path.join(self.root_dir, 'Terrain')
os.makedirs(_dir, exist_ok=True)
metadatas_path = os.path.join(_dir, 'layer.json')
with open(metadatas_path, 'w') as f:
json.dump(metadatas, f)
except Exception as e:
if self.logger is not None:
self.logger.exception(e)
def run(self):
try:
self.generate_metadata()
count = 0
bboxs = self.cut_bbox()
for bbox in bboxs:
_count = self.get_task_count(bbox)
count += _count
self.division_done_signal.emit(count)
for bbox in bboxs:
while True:
if not self.running:
time.sleep(0.01)
else:
break
task_q = self.get_task_queue(bbox)
self.threads = []
for i in range(self.thread_num):
thread = TerrainDownloaderThread(self.root_dir, self.bbox, self.token, task_q, self.logger,
write_db=self.write_db)
thread.sub_progressBar_updated_signal.connect(self.sub_update_progressBar)
self.threads.append(thread)
for thread in self.threads:
thread.start()
for thread in self.threads:
thread.wait()
for t in self.threads:
t.stop()
t.quit()
self.threads = []
self.download_done_signal.emit()
except Exception as e:
if self.logger is not None:
self.logger.error(e)
if __name__ == '__main__':
if 1:
logger = logging.getLogger('down')
try:
root = r'/Users/cugxy/Documents/data/downloader'
formatter = logging.Formatter('%(levelname)s-%(message)s')
hdlr = logging.StreamHandler()
log_file = os.path.join(root, 'down.log')
file_hdlr = logging.FileHandler(log_file)
file_hdlr.setFormatter(formatter)
logger.addHandler(file_hdlr)
logger.addHandler(hdlr)
logger.setLevel(logging.INFO)
min_lng = -180.0
max_lng = 180.0
min_lat = -90.0
max_lat = 90.0
start_zoom = 0
end_zoom = 5
bbox = BoundBox(max_lat, max_lng, min_lat, min_lng, start_zoom, end_zoom)
d = TerrainDownloadEngine(root, bbox, 8, logger)
d.start()
time.sleep(10000)
logger.error('main thread out')
except Exception as e:
logger.error(e)
if 0:
accessToken = get_access_token()
pass
| 35.479495 | 117 | 0.384992 | 980 | 11,247 | 4.266327 | 0.257143 | 0.025114 | 0.04664 | 0.050227 | 0.218369 | 0.128199 | 0.088017 | 0.056446 | 0.043052 | 0.043052 | 0 | 0.043237 | 0.518805 | 11,247 | 316 | 118 | 35.591772 | 0.728936 | 0.003112 | 0 | 0.30303 | 0 | 0.003367 | 0.107423 | 0.010707 | 0 | 0 | 0 | 0 | 0 | 1 | 0.026936 | false | 0.003367 | 0.023569 | 0.003367 | 0.097643 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
0
| 1 |
d9aeee22298fa03239ef3d63fdcaa4984d37ba63
| 3,030 |
py
|
Python
|
content/test/gpu/gpu_tests/pixel_expectations.py
|
metux/chromium-deb
|
3c08e9b89a1b6f95f103a61ff4f528dbcd57fc42
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null |
content/test/gpu/gpu_tests/pixel_expectations.py
|
metux/chromium-deb
|
3c08e9b89a1b6f95f103a61ff4f528dbcd57fc42
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null |
content/test/gpu/gpu_tests/pixel_expectations.py
|
metux/chromium-deb
|
3c08e9b89a1b6f95f103a61ff4f528dbcd57fc42
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null |
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from gpu_tests.gpu_test_expectations import GpuTestExpectations
# See the GpuTestExpectations class for documentation.
class PixelExpectations(GpuTestExpectations):
def SetExpectations(self):
# Sample Usage:
# self.Fail('Pixel_Canvas2DRedBox',
# ['mac', 'amd', ('nvidia', 0x1234)], bug=123)
# Seems to be flaky on the new AMD R7 240 drivers.
self.Flaky('Pixel_GpuRasterization_BlueBox',
['win', ('amd', 0x6613)], bug=653538)
# Software compositing is not supported on Android; so we skip these tests
# that disables gpu compositing on Android platforms.
self.Skip('Pixel_OffscreenCanvasUnaccelerated2D', ['android'])
self.Skip('Pixel_OffscreenCanvasUnaccelerated2DWorker', ['android'])
self.Skip('Pixel_OffscreenCanvasWebGLSoftwareCompositing', ['android'])
self.Skip('Pixel_OffscreenCanvasWebGLSoftwareCompositingWorker',
['android'])
self.Skip('Pixel_CanvasDisplayLinearRGBUnaccelerated2D', ['android'])
self.Fail('Pixel_ScissorTestWithPreserveDrawingBuffer',
['android'], bug=521588)
# TODO(ccameron) fix these on Mac Retina
self.Fail('Pixel_CSS3DBlueBox', ['mac'], bug=533690)
# TODO(vmiura) check / generate reference images for Android devices
self.Fail('Pixel_SolidColorBackground', ['mac', 'android'], bug=624256)
self.Fail('Pixel_OffscreenCanvasUnaccelerated2DGPUCompositingWorker',
['mac', ('nvidia', 0xfe9)], bug=706016)
self.Fail('Pixel_CSSFilterEffects',
['mac', ('nvidia', 0xfe9)], bug=690277)
# TODO(kbr): flakily timing out on this configuration.
self.Flaky('*', ['linux', 'intel', 'debug'], bug=648369)
self.Flaky('Pixel_Video_MP4', ['android', 'nvidia'], bug=716564)
# Flaky for unknown reasons only on macOS. Not planning to investigate
# further.
self.Flaky('Pixel_ScissorTestWithPreserveDrawingBuffer', ['mac'],
bug=660461)
self.Flaky('Pixel_OffscreenCanvas2DResizeOnWorker',
['win10', ('intel', 0x1912)], bug=690663)
# TODO(zakerinasab): check / generate reference images.
self.Fail('Pixel_Canvas2DUntagged', bug=713632)
self.Flaky('Pixel_OffscreenCanvasTransferBeforeStyleResize',
['mac', 'linux', 'win', 'android'], bug=735228)
self.Flaky('Pixel_OffscreenCanvasTransferAfterStyleResize',
['mac', 'linux', 'win', 'android'], bug=735171)
# TODO(junov): update reference images
self.Fail('Pixel_CSSFilterEffects', ['mac'], bug=721727)
self.Fail('Pixel_CSSFilterEffects_NoOverlays', ['mac'], bug=721727)
# TODO(dshwang): remove these after new reference images are generated.
self.Fail('Pixel_DirectComposition_Video_MP4', bug=615325)
self.Fail('Pixel_DirectComposition_Video_VP9', bug=615325)
self.Fail('Pixel_Video_MP4', bug=615325)
self.Fail('Pixel_Video_VP9', bug=615325)
| 42.083333 | 78 | 0.706931 | 325 | 3,030 | 6.48 | 0.446154 | 0.049383 | 0.080247 | 0.037987 | 0.136752 | 0.041311 | 0.02849 | 0 | 0 | 0 | 0 | 0.062896 | 0.165677 | 3,030 | 71 | 79 | 42.676056 | 0.770174 | 0.289439 | 0 | 0 | 0 | 0 | 0.436739 | 0.330834 | 0 | 0 | 0.010309 | 0.014085 | 0 | 1 | 0.027778 | false | 0 | 0.027778 | 0 | 0.083333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
0
| 1 |
d9b0c3d32e07c56a0732f0fca454740538a940fe
| 451 |
py
|
Python
|
setup.py
|
Kaslanarian/PythonSVM
|
715eeef2a245736167addf45a6aee8b40b54d0c7
|
[
"MIT"
] | 2 |
2021-09-25T01:00:37.000Z
|
2021-09-27T12:13:24.000Z
|
setup.py
|
Kaslanarian/PythonSVM
|
715eeef2a245736167addf45a6aee8b40b54d0c7
|
[
"MIT"
] | 1 |
2021-09-17T12:08:14.000Z
|
2021-09-17T12:08:14.000Z
|
setup.py
|
Kaslanarian/PythonSVM
|
715eeef2a245736167addf45a6aee8b40b54d0c7
|
[
"MIT"
] | null | null | null |
import setuptools #enables develop
setuptools.setup(
name='pysvm',
version='0.1',
description='PySVM : A NumPy implementation of SVM based on SMO algorithm',
author_email="191300064@smail.nju.edu.cn",
packages=['pysvm'],
license='MIT License',
long_description=open('README.md', encoding='utf-8').read(),
install_requires=[ #自动安装依赖
'numpy', 'sklearn'
],
url='https://github.com/Kaslanarian/PySVM',
)
| 28.1875 | 79 | 0.660754 | 54 | 451 | 5.462963 | 0.87037 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.03252 | 0.181818 | 451 | 15 | 80 | 30.066667 | 0.766938 | 0.046563 | 0 | 0 | 0 | 0 | 0.401869 | 0.060748 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0.071429 | 0 | 0.071429 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 |
0
| 1 |
d9b2e0c418fbf0ff7ba59e80c34fb2974714b1c9
| 398 |
py
|
Python
|
polling_stations/apps/data_collection/management/commands/import_torbay.py
|
chris48s/UK-Polling-Stations
|
4742b527dae94f0276d35c80460837be743b7d17
|
[
"BSD-3-Clause"
] | null | null | null |
polling_stations/apps/data_collection/management/commands/import_torbay.py
|
chris48s/UK-Polling-Stations
|
4742b527dae94f0276d35c80460837be743b7d17
|
[
"BSD-3-Clause"
] | null | null | null |
polling_stations/apps/data_collection/management/commands/import_torbay.py
|
chris48s/UK-Polling-Stations
|
4742b527dae94f0276d35c80460837be743b7d17
|
[
"BSD-3-Clause"
] | null | null | null |
from data_collection.management.commands import BaseXpressDemocracyClubCsvImporter
class Command(BaseXpressDemocracyClubCsvImporter):
council_id = 'E06000027'
addresses_name = 'parl.2017-06-08/Version 1/Torbay Democracy_Club__08June2017.tsv'
stations_name = 'parl.2017-06-08/Version 1/Torbay Democracy_Club__08June2017.tsv'
elections = ['parl.2017-06-08']
csv_delimiter = '\t'
| 44.222222 | 86 | 0.788945 | 47 | 398 | 6.446809 | 0.638298 | 0.079208 | 0.09901 | 0.118812 | 0.369637 | 0.369637 | 0.369637 | 0.369637 | 0.369637 | 0.369637 | 0 | 0.130682 | 0.115578 | 398 | 8 | 87 | 49.75 | 0.730114 | 0 | 0 | 0 | 0 | 0 | 0.38191 | 0.266332 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.285714 | 0 | 1.142857 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
0
| 1 |
d9b79f86fa592dbe24c72c454192af966a916a5a
| 12,444 |
py
|
Python
|
eth2/beacon/chains/base.py
|
mhchia/trinity
|
e40e475064ca4605887706e9b0e4f8e2349b10cd
|
[
"MIT"
] | null | null | null |
eth2/beacon/chains/base.py
|
mhchia/trinity
|
e40e475064ca4605887706e9b0e4f8e2349b10cd
|
[
"MIT"
] | null | null | null |
eth2/beacon/chains/base.py
|
mhchia/trinity
|
e40e475064ca4605887706e9b0e4f8e2349b10cd
|
[
"MIT"
] | null | null | null |
from abc import (
ABC,
abstractmethod,
)
import logging
from typing import (
TYPE_CHECKING,
Tuple,
Type,
)
from eth._utils.datatypes import (
Configurable,
)
from eth.db.backends.base import (
BaseAtomicDB,
)
from eth.exceptions import (
BlockNotFound,
)
from eth.validation import (
validate_word,
)
from eth_typing import (
Hash32,
)
from eth_utils import (
ValidationError,
encode_hex,
)
from eth2._utils.ssz import (
validate_imported_block_unchanged,
)
from eth2.beacon.db.chain import (
BaseBeaconChainDB,
BeaconChainDB,
)
from eth2.beacon.exceptions import (
BlockClassError,
StateMachineNotFound,
)
from eth2.beacon.types.blocks import (
BaseBeaconBlock,
)
from eth2.beacon.types.states import (
BeaconState,
)
from eth2.beacon.typing import (
FromBlockParams,
Slot,
)
from eth2.beacon.validation import (
validate_slot,
)
if TYPE_CHECKING:
from eth2.beacon.state_machines.base import ( # noqa: F401
BaseBeaconStateMachine,
)
class BaseBeaconChain(Configurable, ABC):
"""
The base class for all BeaconChain objects
"""
chaindb = None # type: BaseBeaconChainDB
chaindb_class = None # type: Type[BaseBeaconChainDB]
sm_configuration = None # type: Tuple[Tuple[Slot, Type[BaseBeaconStateMachine]], ...]
chain_id = None # type: int
#
# Helpers
#
@classmethod
@abstractmethod
def get_chaindb_class(cls) -> Type[BaseBeaconChainDB]:
pass
#
# Chain API
#
@classmethod
@abstractmethod
def from_genesis(cls,
base_db: BaseAtomicDB,
genesis_state: BeaconState,
genesis_block: BaseBeaconBlock) -> 'BaseBeaconChain':
pass
#
# State Machine API
#
@classmethod
@abstractmethod
def get_state_machine_class(
cls,
block: BaseBeaconBlock) -> Type['BaseBeaconStateMachine']:
pass
@abstractmethod
def get_state_machine(self, at_block: BaseBeaconBlock=None) -> 'BaseBeaconStateMachine':
pass
@classmethod
@abstractmethod
def get_state_machine_class_for_block_slot(
cls,
slot: Slot) -> Type['BaseBeaconStateMachine']:
pass
#
# Block API
#
@abstractmethod
def get_block_class(self, block_root: Hash32) -> Type[BaseBeaconBlock]:
pass
@abstractmethod
def create_block_from_parent(self,
parent_block: BaseBeaconBlock,
block_params: FromBlockParams) -> BaseBeaconBlock:
pass
@abstractmethod
def get_block_by_root(self, block_root: Hash32) -> BaseBeaconBlock:
pass
@abstractmethod
def get_canonical_head(self) -> BaseBeaconBlock:
pass
@abstractmethod
def get_score(self, block_root: Hash32) -> int:
pass
@abstractmethod
def ensure_block(self, block: BaseBeaconBlock=None) -> BaseBeaconBlock:
pass
@abstractmethod
def get_block(self) -> BaseBeaconBlock:
pass
@abstractmethod
def get_canonical_block_by_slot(self, slot: Slot) -> BaseBeaconBlock:
pass
@abstractmethod
def get_canonical_block_root(self, slot: Slot) -> Hash32:
pass
@abstractmethod
def import_block(
self,
block: BaseBeaconBlock,
perform_validation: bool=True
) -> Tuple[BaseBeaconBlock, Tuple[BaseBeaconBlock, ...], Tuple[BaseBeaconBlock, ...]]:
pass
class BeaconChain(BaseBeaconChain):
"""
A Chain is a combination of one or more ``StateMachine`` classes. Each ``StateMachine``
is associated with a range of slots. The Chain class acts as a wrapper around these other
StateMachine classes, delegating operations to the appropriate StateMachine depending on the
current block slot number.
"""
logger = logging.getLogger("eth2.beacon.chains.BeaconChain")
chaindb_class = BeaconChainDB # type: Type[BaseBeaconChainDB]
def __init__(self, base_db: BaseAtomicDB) -> None:
if not self.sm_configuration:
raise ValueError(
"The Chain class cannot be instantiated with an empty `sm_configuration`"
)
else:
# TODO implment validate_sm_configuration(self.sm_configuration)
# validate_sm_configuration(self.sm_configuration)
pass
self.chaindb = self.get_chaindb_class()(base_db)
#
# Helpers
#
@classmethod
def get_chaindb_class(cls) -> Type['BaseBeaconChainDB']:
if cls.chaindb_class is None:
raise AttributeError("`chaindb_class` not set")
return cls.chaindb_class
#
# Chain API
#
@classmethod
def from_genesis(cls,
base_db: BaseAtomicDB,
genesis_state: BeaconState,
genesis_block: BaseBeaconBlock) -> 'BaseBeaconChain':
"""
Initialize the ``BeaconChain`` from a genesis state.
"""
sm_class = cls.get_state_machine_class_for_block_slot(genesis_block.slot)
if type(genesis_block) != sm_class.block_class:
raise BlockClassError(
"Given genesis block class: {}, StateMachine.block_class: {}".format(
type(genesis_block),
sm_class.block_class
)
)
chaindb = cls.get_chaindb_class()(db=base_db)
chaindb.persist_state(genesis_state)
return cls._from_genesis_block(base_db, genesis_block)
@classmethod
def _from_genesis_block(cls,
base_db: BaseAtomicDB,
genesis_block: BaseBeaconBlock) -> 'BaseBeaconChain':
"""
Initialize the ``BeaconChain`` from the genesis block.
"""
chaindb = cls.get_chaindb_class()(db=base_db)
chaindb.persist_block(genesis_block, genesis_block.__class__)
return cls(base_db)
#
# StateMachine API
#
@classmethod
def get_state_machine_class(cls, block: BaseBeaconBlock) -> Type['BaseBeaconStateMachine']:
"""
Returns the ``StateMachine`` instance for the given block slot number.
"""
return cls.get_state_machine_class_for_block_slot(block.slot)
@classmethod
def get_state_machine_class_for_block_slot(
cls,
slot: Slot) -> Type['BaseBeaconStateMachine']:
"""
Return the ``StateMachine`` class for the given block slot number.
"""
if cls.sm_configuration is None:
raise AttributeError("Chain classes must define the StateMachines in sm_configuration")
validate_slot(slot)
for start_slot, sm_class in reversed(cls.sm_configuration):
if slot >= start_slot:
return sm_class
raise StateMachineNotFound("No StateMachine available for block slot: #{0}".format(slot))
def get_state_machine(self, at_block: BaseBeaconBlock=None) -> 'BaseBeaconStateMachine':
"""
Return the ``StateMachine`` instance for the given block number.
"""
block = self.ensure_block(at_block)
sm_class = self.get_state_machine_class_for_block_slot(block.slot)
return sm_class(
chaindb=self.chaindb,
block=block,
)
#
# Block API
#
def get_block_class(self, block_root: Hash32) -> Type[BaseBeaconBlock]:
slot = self.chaindb.get_slot_by_root(block_root)
sm_class = self.get_state_machine_class_for_block_slot(slot)
block_class = sm_class.block_class
return block_class
def create_block_from_parent(self,
parent_block: BaseBeaconBlock,
block_params: FromBlockParams) -> BaseBeaconBlock:
"""
Passthrough helper to the ``StateMachine`` class of the block descending from the
given block.
"""
return self.get_state_machine_class_for_block_slot(
slot=parent_block.slot + 1 if block_params.slot is None else block_params.slot,
).create_block_from_parent(parent_block, block_params)
def get_block_by_root(self, block_root: Hash32) -> BaseBeaconBlock:
"""
Return the requested block as specified by block hash.
Raise ``BlockNotFound`` if there's no block with the given hash in the db.
"""
validate_word(block_root, title="Block Hash")
block_class = self.get_block_class(block_root)
return self.chaindb.get_block_by_root(block_root, block_class)
def get_canonical_head(self) -> BaseBeaconBlock:
"""
Return the block at the canonical chain head.
Raise ``CanonicalHeadNotFound`` if there's no head defined for the canonical chain.
"""
block_root = self.chaindb.get_canonical_head_root()
block_class = self.get_block_class(block_root)
return self.chaindb.get_block_by_root(block_root, block_class)
def get_score(self, block_root: Hash32) -> int:
"""
Return the score of the block with the given hash.
Raise ``BlockNotFound`` if there is no matching black hash.
"""
return self.chaindb.get_score(block_root)
def ensure_block(self, block: BaseBeaconBlock=None) -> BaseBeaconBlock:
"""
Return ``block`` if it is not ``None``, otherwise return the block
of the canonical head.
"""
if block is None:
head = self.get_canonical_head()
return self.create_block_from_parent(head, FromBlockParams())
else:
return block
def get_block(self) -> BaseBeaconBlock:
"""
Return the current TIP block.
"""
return self.get_state_machine().block
def get_canonical_block_by_slot(self, slot: Slot) -> BaseBeaconBlock:
"""
Return the block with the given number in the canonical chain.
Raise ``BlockNotFound`` if there's no block with the given number in the
canonical chain.
"""
validate_slot(slot)
return self.get_block_by_root(self.chaindb.get_canonical_block_root(slot))
def get_canonical_block_root(self, slot: Slot) -> Hash32:
"""
Return the block hash with the given number in the canonical chain.
Raise ``BlockNotFound`` if there's no block with the given number in the
canonical chain.
"""
return self.chaindb.get_canonical_block_root(slot)
def import_block(
self,
block: BaseBeaconBlock,
perform_validation: bool=True
) -> Tuple[BaseBeaconBlock, Tuple[BaseBeaconBlock, ...], Tuple[BaseBeaconBlock, ...]]:
"""
Import a complete block and returns a 3-tuple
- the imported block
- a tuple of blocks which are now part of the canonical chain.
- a tuple of blocks which were canonical and now are no longer canonical.
"""
try:
parent_block = self.get_block_by_root(block.previous_block_root)
except BlockNotFound:
raise ValidationError(
"Attempt to import block #{}. Cannot import block {} before importing "
"its parent block at {}".format(
block.slot,
block.signed_root,
block.previous_block_root,
)
)
base_block_for_import = self.create_block_from_parent(
parent_block,
FromBlockParams(),
)
state, imported_block = self.get_state_machine(base_block_for_import).import_block(block)
# Validate the imported block.
if perform_validation:
validate_imported_block_unchanged(imported_block, block)
# TODO: Now it just persists all state. Should design how to clean up the old state.
self.chaindb.persist_state(state)
(
new_canonical_blocks,
old_canonical_blocks,
) = self.chaindb.persist_block(imported_block, imported_block.__class__)
self.logger.debug(
'IMPORTED_BLOCK: slot %s | signed root %s',
imported_block.slot,
encode_hex(imported_block.signed_root),
)
return imported_block, new_canonical_blocks, old_canonical_blocks
| 30.955224 | 99 | 0.634201 | 1,348 | 12,444 | 5.622404 | 0.141691 | 0.017417 | 0.025729 | 0.02375 | 0.485024 | 0.445046 | 0.38224 | 0.338831 | 0.288429 | 0.264283 | 0 | 0.003717 | 0.286564 | 12,444 | 401 | 100 | 31.032419 | 0.849966 | 0.180006 | 0 | 0.400794 | 0 | 0 | 0.064676 | 0.019259 | 0 | 0 | 0 | 0.004988 | 0 | 1 | 0.126984 | false | 0.063492 | 0.115079 | 0 | 0.34127 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 |
0
| 1 |
d9ba3c5b12232bbc811a9ad606f2570ac2481108
| 10,492 |
py
|
Python
|
nova/conf/hyperv.py
|
raubvogel/nova
|
b78be4e83cdc191e20a4a61b6aae72cb2b37f62b
|
[
"Apache-2.0"
] | null | null | null |
nova/conf/hyperv.py
|
raubvogel/nova
|
b78be4e83cdc191e20a4a61b6aae72cb2b37f62b
|
[
"Apache-2.0"
] | null | null | null |
nova/conf/hyperv.py
|
raubvogel/nova
|
b78be4e83cdc191e20a4a61b6aae72cb2b37f62b
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2016 TUBITAK BILGEM
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
hyperv_opt_group = cfg.OptGroup("hyperv",
title='The Hyper-V feature',
help="""
The hyperv feature allows you to configure the Hyper-V hypervisor
driver to be used within an OpenStack deployment.
""")
hyperv_opts = [
cfg.FloatOpt('dynamic_memory_ratio',
default=1.0,
help="""
Dynamic memory ratio
Enables dynamic memory allocation (ballooning) when set to a value
greater than 1. The value expresses the ratio between the total RAM
assigned to an instance and its startup RAM amount. For example a
ratio of 2.0 for an instance with 1024MB of RAM implies 512MB of
RAM allocated at startup.
Possible values:
* 1.0: Disables dynamic memory allocation (Default).
* Float values greater than 1.0: Enables allocation of total implied
RAM divided by this value for startup.
"""),
cfg.BoolOpt('enable_instance_metrics_collection',
default=False,
help="""
Enable instance metrics collection
Enables metrics collections for an instance by using Hyper-V's
metric APIs. Collected data can be retrieved by other apps and
services, e.g.: Ceilometer.
"""),
cfg.StrOpt('instances_path_share',
default="",
help="""
Instances path share
The name of a Windows share mapped to the "instances_path" dir
and used by the resize feature to copy files to the target host.
If left blank, an administrative share (hidden network share) will
be used, looking for the same "instances_path" used locally.
Possible values:
* "": An administrative share will be used (Default).
* Name of a Windows share.
Related options:
* "instances_path": The directory which will be used if this option
here is left blank.
"""),
cfg.BoolOpt('limit_cpu_features',
default=False,
help="""
Limit CPU features
This flag is needed to support live migration to hosts with
different CPU features and checked during instance creation
in order to limit the CPU features used by the instance.
"""),
cfg.IntOpt('mounted_disk_query_retry_count',
default=10,
min=0,
help="""
Mounted disk query retry count
The number of times to retry checking for a mounted disk.
The query runs until the device can be found or the retry
count is reached.
Possible values:
* Positive integer values. Values greater than 1 is recommended
(Default: 10).
Related options:
* Time interval between disk mount retries is declared with
"mounted_disk_query_retry_interval" option.
"""),
cfg.IntOpt('mounted_disk_query_retry_interval',
default=5,
min=0,
help="""
Mounted disk query retry interval
Interval between checks for a mounted disk, in seconds.
Possible values:
* Time in seconds (Default: 5).
Related options:
* This option is meaningful when the mounted_disk_query_retry_count
is greater than 1.
* The retry loop runs with mounted_disk_query_retry_count and
mounted_disk_query_retry_interval configuration options.
"""),
cfg.IntOpt('power_state_check_timeframe',
default=60,
min=0,
help="""
Power state check timeframe
The timeframe to be checked for instance power state changes.
This option is used to fetch the state of the instance from Hyper-V
through the WMI interface, within the specified timeframe.
Possible values:
* Timeframe in seconds (Default: 60).
"""),
cfg.IntOpt('power_state_event_polling_interval',
default=2,
min=0,
help="""
Power state event polling interval
Instance power state change event polling frequency. Sets the
listener interval for power state events to the given value.
This option enhances the internal lifecycle notifications of
instances that reboot themselves. It is unlikely that an operator
has to change this value.
Possible values:
* Time in seconds (Default: 2).
"""),
cfg.StrOpt('qemu_img_cmd',
default="qemu-img.exe",
help="""
qemu-img command
qemu-img is required for some of the image related operations
like converting between different image types. You can get it
from here: (http://qemu.weilnetz.de/) or you can install the
Cloudbase OpenStack Hyper-V Compute Driver
(https://cloudbase.it/openstack-hyperv-driver/) which automatically
sets the proper path for this config option. You can either give the
full path of qemu-img.exe or set its path in the PATH environment
variable and leave this option to the default value.
Possible values:
* Name of the qemu-img executable, in case it is in the same
directory as the nova-compute service or its path is in the
PATH environment variable (Default).
* Path of qemu-img command (DRIVELETTER:\PATH\TO\QEMU-IMG\COMMAND).
Related options:
* If the config_drive_cdrom option is False, qemu-img will be used to
convert the ISO to a VHD, otherwise the config drive will
remain an ISO. To use config drive with Hyper-V, you must
set the ``mkisofs_cmd`` value to the full path to an ``mkisofs.exe``
installation.
"""),
cfg.StrOpt('vswitch_name',
help="""
External virtual switch name
The Hyper-V Virtual Switch is a software-based layer-2 Ethernet
network switch that is available with the installation of the
Hyper-V server role. The switch includes programmatically managed
and extensible capabilities to connect virtual machines to both
virtual networks and the physical network. In addition, Hyper-V
Virtual Switch provides policy enforcement for security, isolation,
and service levels. The vSwitch represented by this config option
must be an external one (not internal or private).
Possible values:
* If not provided, the first of a list of available vswitches
is used. This list is queried using WQL.
* Virtual switch name.
"""),
cfg.IntOpt('wait_soft_reboot_seconds',
default=60,
min=0,
help="""
Wait soft reboot seconds
Number of seconds to wait for instance to shut down after soft
reboot request is made. We fall back to hard reboot if instance
does not shutdown within this window.
Possible values:
* Time in seconds (Default: 60).
"""),
cfg.BoolOpt('config_drive_cdrom',
default=False,
help="""
Mount config drive as a CD drive.
OpenStack can be configured to write instance metadata to a config drive, which
is then attached to the instance before it boots. The config drive can be
attached as a disk drive (default) or as a CD drive.
Related options:
* This option is meaningful with ``force_config_drive`` option set to ``True``
or when the REST API call to create an instance will have
``--config-drive=True`` flag.
* ``config_drive_format`` option must be set to ``iso9660`` in order to use
CD drive as the config drive image.
* To use config drive with Hyper-V, you must set the
``mkisofs_cmd`` value to the full path to an ``mkisofs.exe`` installation.
Additionally, you must set the ``qemu_img_cmd`` value to the full path
to an ``qemu-img`` command installation.
* You can configure the Compute service to always create a configuration
drive by setting the ``force_config_drive`` option to ``True``.
"""),
cfg.BoolOpt('config_drive_inject_password',
default=False,
help="""
Inject password to config drive.
When enabled, the admin password will be available from the config drive image.
Related options:
* This option is meaningful when used with other options that enable
config drive usage with Hyper-V, such as ``force_config_drive``.
"""),
cfg.IntOpt('volume_attach_retry_count',
default=10,
min=0,
help="""
Volume attach retry count
The number of times to retry attaching a volume. Volume attachment
is retried until success or the given retry count is reached.
Possible values:
* Positive integer values (Default: 10).
Related options:
* Time interval between attachment attempts is declared with
volume_attach_retry_interval option.
"""),
cfg.IntOpt('volume_attach_retry_interval',
default=5,
min=0,
help="""
Volume attach retry interval
Interval between volume attachment attempts, in seconds.
Possible values:
* Time in seconds (Default: 5).
Related options:
* This options is meaningful when volume_attach_retry_count
is greater than 1.
* The retry loop runs with volume_attach_retry_count and
volume_attach_retry_interval configuration options.
"""),
cfg.BoolOpt('enable_remotefx',
default=False,
help="""
Enable RemoteFX feature
This requires at least one DirectX 11 capable graphics adapter for
Windows / Hyper-V Server 2012 R2 or newer and RDS-Virtualization
feature has to be enabled.
Instances with RemoteFX can be requested with the following flavor
extra specs:
**os:resolution**. Guest VM screen resolution size. Acceptable values::
1024x768, 1280x1024, 1600x1200, 1920x1200, 2560x1600, 3840x2160
``3840x2160`` is only available on Windows / Hyper-V Server 2016.
**os:monitors**. Guest VM number of monitors. Acceptable values::
[1, 4] - Windows / Hyper-V Server 2012 R2
[1, 8] - Windows / Hyper-V Server 2016
**os:vram**. Guest VM VRAM amount. Only available on
Windows / Hyper-V Server 2016. Acceptable values::
64, 128, 256, 512, 1024
"""),
cfg.BoolOpt('use_multipath_io',
default=False,
help="""
Use multipath connections when attaching iSCSI or FC disks.
This requires the Multipath IO Windows feature to be enabled. MPIO must be
configured to claim such devices.
"""),
cfg.ListOpt('iscsi_initiator_list',
default=[],
help="""
List of iSCSI initiators that will be used for estabilishing iSCSI sessions.
If none are specified, the Microsoft iSCSI initiator service will choose the
initiator.
""")
]
def register_opts(conf):
conf.register_group(hyperv_opt_group)
conf.register_opts(hyperv_opts, group=hyperv_opt_group)
def list_opts():
return {hyperv_opt_group: hyperv_opts}
| 31.04142 | 79 | 0.735989 | 1,552 | 10,492 | 4.906572 | 0.278351 | 0.026001 | 0.016809 | 0.022062 | 0.222981 | 0.161261 | 0.127118 | 0.088378 | 0.066448 | 0.052265 | 0 | 0.018754 | 0.191956 | 10,492 | 337 | 80 | 31.133531 | 0.879453 | 0.057568 | 0 | 0.31746 | 0 | 0 | 0.841993 | 0.065836 | 0 | 0 | 0 | 0 | 0 | 1 | 0.007937 | false | 0.011905 | 0.003968 | 0.003968 | 0.015873 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
0
| 1 |
d9c3024853794c19d2ce2400c8d47311441430b2
| 8,513 |
py
|
Python
|
src/main/python/rlbot/version.py
|
IamEld3st/RLBot
|
36195ffd3a836ed910ce63aed8ba103b98b7b361
|
[
"MIT"
] | null | null | null |
src/main/python/rlbot/version.py
|
IamEld3st/RLBot
|
36195ffd3a836ed910ce63aed8ba103b98b7b361
|
[
"MIT"
] | null | null | null |
src/main/python/rlbot/version.py
|
IamEld3st/RLBot
|
36195ffd3a836ed910ce63aed8ba103b98b7b361
|
[
"MIT"
] | null | null | null |
# Store the version here so:
# 1) we don't load dependencies by storing it in __init__.py
# 2) we can import it in setup.py for the same reason
# 3) we can import it into your module module
# https://stackoverflow.com/questions/458550/standard-way-to-embed-version-into-python-package
__version__ = '1.6.1'
release_notes = {
'1.6.1': """
Fixed GUI crash when loading certain RLBot config files with relative paths for agents.
Fixed agent preset loading to allow multiple agents to saved/loaded correctly if they have the same name. - ima9rd
""",
'1.6.0':"""
Add support for auto starting .NET executables.
""",
'1.5.1': """
Fixed crash with GUI when no default RLBot.cfg file was found.
Updated GUI to launch Rocket League when clicking run if no Rocket League process is found. - ima9rd
""",
'1.5.0': """
Adding a have_internet helper function to help streamline upgrade checks. - ima9rd
""",
'1.4.2': """
Adding support for auto-running java bots during tournaments. To take advantage of this
in your bot, see https://github.com/RLBot/RLBotJavaExample/wiki/Auto-Launching-Java
Plus bug fixes:
- Fixed a bug where auto-run executables would crash when trying to write to stderr.
- Dragging bots to another team in the GUI no longer breaks the config.
""",
'1.3.0': """
Accurate ball prediction for Hoops and Dropshot modes!
- Kipje13, Marvin, NeverCast, et. al.
""",
'1.2.6': """
Fixed a bug where field info was not extracted properly during dropshot mode.
It was reporting 2 goals rather than the expected 140.
""",
'1.2.5': """
***************************************************
* Fix for dodge cancels / half flips! - ccman32 *
***************************************************
Plus:
- Changing the rendering strategy for 3D lines that go past the camera. Formerly it was
"draw it, even though it's crazy sometimes", now it will be "don't draw it".
- Showing the rate that inputs are received for each player index when you press the
[home] key. Toggle back off with the [end] key.
- Fixed a bug where party_member_bot could get influenced by real controller input.
- Creating new presets in the GUI works better now.
- Got rid of the libpng warning seen when using the GUI.
- Giving specific error messages when cfg files are messed up.
""",
'1.2.2': """
- Rearranged the GUI a bit, and made it load and track appearance configs more effectively.
- Fixed bug where RUN button behavior in the GUI would not work after killing bots.
""",
'1.2.0': """
- We now offer a 'RigidBodyTick' thanks to whatisaphone! It's a lower-level representation of
physics data which updates at 120Hz and is not subject to interpolation. You can still make a
great bot without it, but this feature is quite nice for the scientists among us.
See https://github.com/RLBot/RLBotPythonExample/wiki/Rigid-Body-Tick for more details!
- Faster way to access ball prediction data in python. - Skyborg
""",
'1.1.3': """
- Faster way to access ball prediction data in python. - Skyborg
- Java bots will now shut down when the python framework quits. This has been necessary recently
to avoid buggy situations.
- Shutting down the python framework will no longer attempt to kill bots twice in a row.
- Clicking on the "Run" button twice in a row in the GUI will no longer spawn duplicate processes.
""",
'1.1.2': """
Faster way to access ball prediction data in python. - Skyborg
""",
'1.1.1': """
You can now get information about the ball's status in Dropshot mode thanks to hallo_doei!
Read all about it at https://github.com/RLBot/RLBot/wiki/Dropshot
Other changes:
- The loadout config for orange team is now respected again. - ccman32
- Fixed a bug where the GUI would crash with a "KeyError". - hallo_doei
- Avoiding and suppressing some game crashes, and also restoring the
ability to get game tick data during replays and the postgame. - tarehart
- Fixed a bug where bots would dodge when they intended to double jump. -tarehart
""",
'1.0.6': """
The latest Rocket League patch broke dodges for our bots; this update fixes it.
""",
'1.0.5': """
Maximum size for a render message has been decreased again because many people experienced
errors related to memory access. The limit is now only double the original.
""",
'1.0.4': """
- Maximum size for a render message has been increased by a factor of 100. This means you can
draw a lot of lines at once without getting errors.
- Boost amount for cars will now round up to the nearest integer, so 0.3% boost will now appear
as 1 instead of 0.
- Fixed a crash that would commonly happen after a match ends. As a side effect, you can no longer
see up-to-date player data during instant replays.
""",
'1.0.3': """
Time for the big 1.0 release! We actually left "beta" a long time ago so this isn't as big
a milestone as the number implies, but we DO have two great new features!
1. Setting game state. You can manipulate the position, velocity, etc of the ball and the cars!
This can be a great help during bot development, and you can also get creative with it. Visit
the wiki for details and documentation - https://github.com/RLBot/RLBot/wiki/Manipulating-Game-State
Code written by hallo_doei, ccman32, and tarehart
2. Ball prediction. We now provide a list of future ball positions based on chip's excellent
physics modeling. Take advantage of this to do next-level wall reads, catches, and dribbles! You can
read about the math involved here: https://samuelpmish.github.io/notes/RocketLeague/ball_bouncing/
Note: currently the wall bounces are only accurate on the standard arena, not hoops or dropshot.
Documentation and examples can be found here: https://github.com/RLBot/RLBot/wiki/Ball-Path-Prediction
Code written by chip and tarehart
Bonus:
- You can now play on Salty Shores thanks to hallo_doei
- Bug fix for people with spaces in their file path by Zaptive
- Subprocess agent for future Rust support by whatisaphone
""",
'0.0.32': """
More comprehensive fix for Rocket League patch 1.50. Compared to previous version:
- Dropshot tile data is fixed
- Boost pad data is fixed
- Loadout configuration is fixed
Thanks to ccman32 and dtracers for delivering this fix quickly!
""",
'0.0.31': """
Rapid response to Rocket League patch 1.50 with the following known issues:
- Dropshot tile data is missing
- Boost pad data is missing
- Loadout configuration is broken
Thanks to ccman32 and dtracers for delivering this short-term fix quickly.
We will follow this up with a proper fix as soon as possible. You may also choose to stay on
Rocket League 1.49 and RLBot 0.0.30, ask for instructions on discord.
""",
'0.0.30': """
- New core dll that is less likely to break when Rocket League is patched - ccman32 and hallo-doei
- Fixed bug resulting in incorrect quickchat - dtracers
- Added more built-in colors to the python rendering manager - Eastvillage
- Fix for items with a ':' not showing up in the GUI - hallo-doei
- Fix for GUI not saving correct path - hallo-doei
- Fix for GUI crash when saving preset then canceling - hallo-doei
- Adding file checking before injection (Resolves #167) - Redox
- Fixed typo in rlbot.cfg - Redox
- Fancy release notes - tarehart and Skyborg
"""
}
release_banner = """
______ _ ______ _
10100 | ___ \ | | ___ \ | | 00101
110011 | |_/ / | | |_/ / ___ | |_ 110011
00110110 | /| | | ___ \/ _ \| __| 01101100
010010 | |\ \| |____| |_/ / (_) | |_ 010010
10010 \_| \_\_____/\____/ \___/ \__| 01001
"""
def get_current_release_notes():
if __version__ in release_notes:
return release_notes[__version__]
return ''
def get_help_text():
return "Trouble? Ask on Discord at https://discord.gg/5cNbXgG " \
"or report an issue at https://github.com/RLBot/RLBot/issues"
def print_current_release_notes():
print(release_banner)
print("Version {}".format(__version__))
print(get_current_release_notes())
print(get_help_text())
print("")
| 45.768817 | 118 | 0.677787 | 1,269 | 8,513 | 4.467297 | 0.394011 | 0.008467 | 0.014817 | 0.020109 | 0.095608 | 0.07444 | 0.054683 | 0.054683 | 0.027165 | 0.027165 | 0 | 0.028786 | 0.23282 | 8,513 | 185 | 119 | 46.016216 | 0.839228 | 0.032186 | 0 | 0.158228 | 0 | 0.101266 | 0.909632 | 0.012389 | 0 | 0 | 0 | 0 | 0 | 1 | 0.018987 | false | 0 | 0 | 0.006329 | 0.037975 | 0.037975 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
0
| 1 |
d9c387f6c561372e064bfe33f0566d9f2a1cdd50
| 399 |
py
|
Python
|
Task2C.py
|
StanleyHou117/group66_LentTermProject
|
0255310cb202f21cada8cf7c0f45a045a9b72c1f
|
[
"MIT"
] | null | null | null |
Task2C.py
|
StanleyHou117/group66_LentTermProject
|
0255310cb202f21cada8cf7c0f45a045a9b72c1f
|
[
"MIT"
] | null | null | null |
Task2C.py
|
StanleyHou117/group66_LentTermProject
|
0255310cb202f21cada8cf7c0f45a045a9b72c1f
|
[
"MIT"
] | null | null | null |
from floodsystem.stationdata import build_station_list
from floodsystem.flood import stations_highest_rel_level
def run():
stations = build_station_list()
warning_stations = stations_highest_rel_level(stations,10)
for entry in warning_stations:
print(entry[0].name,entry[1])
if __name__ == "__main__":
print("*** Task 2C: CUED Part IA Flood Warning System ***")
run()
| 28.5 | 63 | 0.734336 | 53 | 399 | 5.150943 | 0.584906 | 0.10989 | 0.117216 | 0.168498 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.01506 | 0.16792 | 399 | 14 | 64 | 28.5 | 0.807229 | 0 | 0 | 0 | 0 | 0 | 0.145 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1 | false | 0 | 0.2 | 0 | 0.3 | 0.2 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
0
| 1 |
d9c7946fa7c34a185ec10fc47b862efa2f519a9d
| 19,770 |
py
|
Python
|
uhd_restpy/testplatform/sessions/ixnetwork/quicktest/learnframes_58e01d83db5d99bcabff902f5cf6ec51.py
|
OpenIxia/ixnetwork_restpy
|
f628db450573a104f327cf3c737ca25586e067ae
|
[
"MIT"
] | 20 |
2019-05-07T01:59:14.000Z
|
2022-02-11T05:24:47.000Z
|
uhd_restpy/testplatform/sessions/ixnetwork/quicktest/learnframes_58e01d83db5d99bcabff902f5cf6ec51.py
|
OpenIxia/ixnetwork_restpy
|
f628db450573a104f327cf3c737ca25586e067ae
|
[
"MIT"
] | 60 |
2019-04-03T18:59:35.000Z
|
2022-02-22T12:05:05.000Z
|
uhd_restpy/testplatform/sessions/ixnetwork/quicktest/learnframes_58e01d83db5d99bcabff902f5cf6ec51.py
|
OpenIxia/ixnetwork_restpy
|
f628db450573a104f327cf3c737ca25586e067ae
|
[
"MIT"
] | 13 |
2019-05-20T10:48:31.000Z
|
2021-10-06T07:45:44.000Z
|
# MIT LICENSE
#
# Copyright 1997 - 2020 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from uhd_restpy.base import Base
from uhd_restpy.files import Files
from typing import List, Any, Union
class LearnFrames(Base):
"""The learning frames that IxNetwork sends during the test.
The LearnFrames class encapsulates a required learnFrames resource which will be retrieved from the server every time the property is accessed.
"""
__slots__ = ()
_SDM_NAME = 'learnFrames'
_SDM_ATT_MAP = {
'FastPathEnable': 'fastPathEnable',
'FastPathLearnFrameSize': 'fastPathLearnFrameSize',
'FastPathNumFrames': 'fastPathNumFrames',
'FastPathRate': 'fastPathRate',
'LearnFrameSize': 'learnFrameSize',
'LearnFrequency': 'learnFrequency',
'LearnNumFrames': 'learnNumFrames',
'LearnRate': 'learnRate',
'LearnSendMacOnly': 'learnSendMacOnly',
'LearnSendRouterSolicitation': 'learnSendRouterSolicitation',
'LearnWaitTime': 'learnWaitTime',
'LearnWaitTimeBeforeTransmit': 'learnWaitTimeBeforeTransmit',
}
_SDM_ENUM_MAP = {
'learnFrequency': ['never', 'onBinaryIteration', 'oncePerFramesize', 'oncePerTest', 'onTrial'],
}
def __init__(self, parent, list_op=False):
super(LearnFrames, self).__init__(parent, list_op)
@property
def FastPathEnable(self):
# type: () -> bool
"""
Returns
-------
- bool: If true, enables fast path transmit.
"""
return self._get_attribute(self._SDM_ATT_MAP['FastPathEnable'])
@FastPathEnable.setter
def FastPathEnable(self, value):
# type: (bool) -> None
self._set_attribute(self._SDM_ATT_MAP['FastPathEnable'], value)
@property
def FastPathLearnFrameSize(self):
# type: () -> int
"""
Returns
-------
- number: Specifies the size of the learning frames in the fast path.
"""
return self._get_attribute(self._SDM_ATT_MAP['FastPathLearnFrameSize'])
@FastPathLearnFrameSize.setter
def FastPathLearnFrameSize(self, value):
# type: (int) -> None
self._set_attribute(self._SDM_ATT_MAP['FastPathLearnFrameSize'], value)
@property
def FastPathNumFrames(self):
# type: () -> int
"""
Returns
-------
- number: Specifies the number of learn frames that IxNetwork sends through fast path.
"""
return self._get_attribute(self._SDM_ATT_MAP['FastPathNumFrames'])
@FastPathNumFrames.setter
def FastPathNumFrames(self, value):
# type: (int) -> None
self._set_attribute(self._SDM_ATT_MAP['FastPathNumFrames'], value)
@property
def FastPathRate(self):
# type: () -> int
"""
Returns
-------
- number: Specifies the rate at which IxNetwork sends learn frames through fast path.
"""
return self._get_attribute(self._SDM_ATT_MAP['FastPathRate'])
@FastPathRate.setter
def FastPathRate(self, value):
# type: (int) -> None
self._set_attribute(self._SDM_ATT_MAP['FastPathRate'], value)
@property
def LearnFrameSize(self):
# type: () -> int
"""
Returns
-------
- number: Specifies the size of the learning frames.
"""
return self._get_attribute(self._SDM_ATT_MAP['LearnFrameSize'])
@LearnFrameSize.setter
def LearnFrameSize(self, value):
# type: (int) -> None
self._set_attribute(self._SDM_ATT_MAP['LearnFrameSize'], value)
@property
def LearnFrequency(self):
# type: () -> str
"""
Returns
-------
- str(never | onBinaryIteration | oncePerFramesize | oncePerTest | onTrial): Allows to choose how frequently IxNetwork sends learning frames during the test.
"""
return self._get_attribute(self._SDM_ATT_MAP['LearnFrequency'])
@LearnFrequency.setter
def LearnFrequency(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['LearnFrequency'], value)
@property
def LearnNumFrames(self):
# type: () -> int
"""
Returns
-------
- number: Specifies the number of learning frames that IxNetwork sends for each address.
"""
return self._get_attribute(self._SDM_ATT_MAP['LearnNumFrames'])
@LearnNumFrames.setter
def LearnNumFrames(self, value):
# type: (int) -> None
self._set_attribute(self._SDM_ATT_MAP['LearnNumFrames'], value)
@property
def LearnRate(self):
# type: () -> int
"""
Returns
-------
- number: Specifies the rate at which IxNetwork sends learn frames to the DUT.
"""
return self._get_attribute(self._SDM_ATT_MAP['LearnRate'])
@LearnRate.setter
def LearnRate(self, value):
# type: (int) -> None
self._set_attribute(self._SDM_ATT_MAP['LearnRate'], value)
@property
def LearnSendMacOnly(self):
# type: () -> bool
"""
Returns
-------
- bool: Sends learning frames to MAC address only.
"""
return self._get_attribute(self._SDM_ATT_MAP['LearnSendMacOnly'])
@LearnSendMacOnly.setter
def LearnSendMacOnly(self, value):
# type: (bool) -> None
self._set_attribute(self._SDM_ATT_MAP['LearnSendMacOnly'], value)
@property
def LearnSendRouterSolicitation(self):
# type: () -> bool
"""
Returns
-------
- bool: Sends router solicitation messages.
"""
return self._get_attribute(self._SDM_ATT_MAP['LearnSendRouterSolicitation'])
@LearnSendRouterSolicitation.setter
def LearnSendRouterSolicitation(self, value):
# type: (bool) -> None
self._set_attribute(self._SDM_ATT_MAP['LearnSendRouterSolicitation'], value)
@property
def LearnWaitTime(self):
# type: () -> int
"""
Returns
-------
- number: Specifies the length of time in ms that IxNetwork pauses before sending all the learning frames from all the ports.
"""
return self._get_attribute(self._SDM_ATT_MAP['LearnWaitTime'])
@LearnWaitTime.setter
def LearnWaitTime(self, value):
# type: (int) -> None
self._set_attribute(self._SDM_ATT_MAP['LearnWaitTime'], value)
@property
def LearnWaitTimeBeforeTransmit(self):
# type: () -> int
"""
Returns
-------
- number: Specifies the length of time in ms that IxNetwork pauses before sending all the
"""
return self._get_attribute(self._SDM_ATT_MAP['LearnWaitTimeBeforeTransmit'])
@LearnWaitTimeBeforeTransmit.setter
def LearnWaitTimeBeforeTransmit(self, value):
# type: (int) -> None
self._set_attribute(self._SDM_ATT_MAP['LearnWaitTimeBeforeTransmit'], value)
def update(self, FastPathEnable=None, FastPathLearnFrameSize=None, FastPathNumFrames=None, FastPathRate=None, LearnFrameSize=None, LearnFrequency=None, LearnNumFrames=None, LearnRate=None, LearnSendMacOnly=None, LearnSendRouterSolicitation=None, LearnWaitTime=None, LearnWaitTimeBeforeTransmit=None):
# type: (bool, int, int, int, int, str, int, int, bool, bool, int, int) -> LearnFrames
"""Updates learnFrames resource on the server.
Args
----
- FastPathEnable (bool): If true, enables fast path transmit.
- FastPathLearnFrameSize (number): Specifies the size of the learning frames in the fast path.
- FastPathNumFrames (number): Specifies the number of learn frames that IxNetwork sends through fast path.
- FastPathRate (number): Specifies the rate at which IxNetwork sends learn frames through fast path.
- LearnFrameSize (number): Specifies the size of the learning frames.
- LearnFrequency (str(never | onBinaryIteration | oncePerFramesize | oncePerTest | onTrial)): Allows to choose how frequently IxNetwork sends learning frames during the test.
- LearnNumFrames (number): Specifies the number of learning frames that IxNetwork sends for each address.
- LearnRate (number): Specifies the rate at which IxNetwork sends learn frames to the DUT.
- LearnSendMacOnly (bool): Sends learning frames to MAC address only.
- LearnSendRouterSolicitation (bool): Sends router solicitation messages.
- LearnWaitTime (number): Specifies the length of time in ms that IxNetwork pauses before sending all the learning frames from all the ports.
- LearnWaitTimeBeforeTransmit (number): Specifies the length of time in ms that IxNetwork pauses before sending all the
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._update(self._map_locals(self._SDM_ATT_MAP, locals()))
def Apply(self, *args, **kwargs):
# type: (*Any, **Any) -> None
"""Executes the apply operation on the server.
Applies the specified Quick Test.
apply(async_operation=bool)
---------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('apply', payload=payload, response_object=None)
def ApplyAsync(self, *args, **kwargs):
# type: (*Any, **Any) -> None
"""Executes the applyAsync operation on the server.
applyAsync(async_operation=bool)
--------------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('applyAsync', payload=payload, response_object=None)
def ApplyAsyncResult(self, *args, **kwargs):
# type: (*Any, **Any) -> Union[bool, None]
"""Executes the applyAsyncResult operation on the server.
applyAsyncResult(async_operation=bool)bool
------------------------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
- Returns bool:
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('applyAsyncResult', payload=payload, response_object=None)
def ApplyITWizardConfiguration(self, *args, **kwargs):
# type: (*Any, **Any) -> None
"""Executes the applyITWizardConfiguration operation on the server.
Applies the specified Quick Test.
applyITWizardConfiguration(async_operation=bool)
------------------------------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('applyITWizardConfiguration', payload=payload, response_object=None)
def GenerateReport(self, *args, **kwargs):
# type: (*Any, **Any) -> Union[str, None]
"""Executes the generateReport operation on the server.
Generate a PDF report for the last succesfull test run.
generateReport(async_operation=bool)string
------------------------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
- Returns str: This method is asynchronous and has no return value.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('generateReport', payload=payload, response_object=None)
def Run(self, *args, **kwargs):
# type: (*Any, **Any) -> Union[List[str], None]
"""Executes the run operation on the server.
Starts the specified Quick Test and waits for its execution to finish.
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
run(async_operation=bool)list
-----------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
- Returns list(str): This method is synchronous and returns the result of the test.
run(InputParameters=string, async_operation=bool)list
-----------------------------------------------------
- InputParameters (str): The input arguments of the test.
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
- Returns list(str): This method is synchronous and returns the result of the test.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('run', payload=payload, response_object=None)
def Start(self, *args, **kwargs):
# type: (*Any, **Any) -> None
"""Executes the start operation on the server.
Starts the specified Quick Test.
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
start(async_operation=bool)
---------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
start(InputParameters=string, async_operation=bool)
---------------------------------------------------
- InputParameters (str): The input arguments of the test.
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('start', payload=payload, response_object=None)
def Stop(self, *args, **kwargs):
# type: (*Any, **Any) -> None
"""Executes the stop operation on the server.
Stops the currently running Quick Test.
stop(async_operation=bool)
--------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('stop', payload=payload, response_object=None)
def WaitForTest(self, *args, **kwargs):
# type: (*Any, **Any) -> Union[List[str], None]
"""Executes the waitForTest operation on the server.
Waits for the execution of the specified Quick Test to be completed.
waitForTest(async_operation=bool)list
-------------------------------------
- async_operation (bool=False): True to execute the operation asynchronously. Any subsequent rest api calls made through the Connection class will block until the operation is complete.
- Returns list(str): This method is synchronous and returns the result of the test.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('waitForTest', payload=payload, response_object=None)
| 44.728507 | 304 | 0.644917 | 2,223 | 19,770 | 5.650022 | 0.127305 | 0.021497 | 0.018631 | 0.025876 | 0.68949 | 0.663296 | 0.605573 | 0.595701 | 0.558519 | 0.519984 | 0 | 0.002926 | 0.239454 | 19,770 | 441 | 305 | 44.829932 | 0.832402 | 0.527112 | 0 | 0.270833 | 0 | 0 | 0.13391 | 0.042006 | 0 | 0 | 0 | 0 | 0 | 1 | 0.243056 | false | 0 | 0.020833 | 0 | 0.451389 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 |
0
| 1 |
d9caf13b41f36d2f1d5f56fee8cc8d3745513f23
| 18,402 |
py
|
Python
|
Yellow_Pages_Lithuania/unit_tests.py
|
Jay4C/Web-Scraping
|
187679bee035dad661d983b5a8382240f820c337
|
[
"MIT"
] | 1 |
2022-02-28T05:05:06.000Z
|
2022-02-28T05:05:06.000Z
|
Yellow_Pages_Lithuania/unit_tests.py
|
Jay4C/Web-Scraping
|
187679bee035dad661d983b5a8382240f820c337
|
[
"MIT"
] | 23 |
2020-03-04T22:17:32.000Z
|
2021-01-21T09:35:33.000Z
|
Yellow_Pages_Lithuania/unit_tests.py
|
Jay4C/Web-Scraping
|
187679bee035dad661d983b5a8382240f820c337
|
[
"MIT"
] | null | null | null |
import time
from bs4 import BeautifulSoup
import requests
import pymysql.cursors
import unittest
class UnitTestsDataMinerYellowPagesLithuania(unittest.TestCase):
def test_extract_one_email(self):
url = "https://www.visalietuva.lt/en/company/astorija-hotel-uab"
# Request the content of a page from the url
html = requests.get(url)
# Parse the content of html_doc
soup = BeautifulSoup(html.content, 'html.parser')
if soup.find('a', {'itemprop': 'email'}) is not None:
email = "info@" + soup.find('a', {'itemprop': 'email'}).text.split("@")[1]
print('email : ' + email)
else:
print('no email business')
def test_extract_emails_from_all_page_of_results_for_one_activity_and_capital(self):
activity = "hotel"
city = "vilniuje"
url_search = "https://www.visalietuva.lt/en/search/" + activity + "/" + city
html_search = requests.get(url_search)
soup_search = BeautifulSoup(html_search.content, 'html.parser')
number_of_pages = 0
if soup_search.find('div', {'class': 'search_count f_left'}) is not None:
number_of_pages_with_coma = int(soup_search
.find('div', {'class': 'search_count f_left'})
.find('span').text
)/20
if int(str(number_of_pages_with_coma).split(".")[1][:1]) < 5:
number_of_pages += round(number_of_pages_with_coma) + 1
print('number_of_pages : ' + str(number_of_pages))
elif int(str(number_of_pages_with_coma).split(".")[1][:1]) >= 5:
number_of_pages += round(number_of_pages_with_coma)
print('number_of_pages : ' + str(number_of_pages))
i_1 = 0
if soup_search.find('div', {'class': 'company_list'}) is not None:
print(url_search)
for result_item in soup_search \
.find('div', {'class': 'company_list'}) \
.find_all('div', {'class': 'item'}):
i_1 += 1
url_result = result_item.find('a', {'class': 'company-item-title'}).get('href')
time.sleep(2)
# Request the content of a page from the url
html_result = requests.get(url_result)
# Parse the content of html_doc
soup_result = BeautifulSoup(html_result.content, 'html.parser')
if soup_result.find('a', {'itemprop': 'email'}) is not None:
email = "info@" + soup_result.find('a', {'itemprop': 'email'}).text.split("@")[1]
print(str(i_1) + ' email : ' + email)
else:
print(str(i_1) + ' no email business')
else:
print('sorry there is nothing')
if number_of_pages > 1:
for i in range(2, number_of_pages+1):
url_of_one_page_of_results = url_search + "/" + str(i)
print(url_of_one_page_of_results)
time.sleep(2)
html_of_one_page_of_results = requests.get(url_of_one_page_of_results)
soup_of_one_page_of_results = BeautifulSoup(html_of_one_page_of_results.content, 'html.parser')
if soup_of_one_page_of_results.find('div', {'class': 'company_list'}) is not None:
for result_item in soup_of_one_page_of_results\
.find('div', {'class': 'company_list'})\
.find_all('div', {'class': 'item'}):
i_1 += 1
url_result = result_item.find('a', {'class': 'company-item-title'}).get('href')
# Request the content of a page from the url
html_result = requests.get(url_result)
# Parse the content of html_doc
soup_result = BeautifulSoup(html_result.content, 'html.parser')
if soup_result.find('a', {'itemprop': 'email'}) is not None:
email = "info@" + soup_result.find('a', {'itemprop': 'email'}).text.split("@")[1]
print(str(i_1) + ' email : ' + email)
else:
print(str(i_1) + ' no email business')
else:
print('sorry there is nothing')
def test_extract_emails_from_all_page_of_results_for_all_activities_and_capitals(self):
activites = [
# {'id': '1', 'url': 'labour'}
#{'id': '2', 'url': 'real+estate'},
#{'id': '3', 'url': 'recruitment'},
#{'id': '4', 'url': 'software'},
#{'id': '5', 'url': 'hotel'},
#{'id': '6', 'url': 'landlord'},
#{'id': '7', 'url': 'cleaning'},
#{'id': '8', 'url': 'association'},
#{'id': '9', 'url': 'financial'},
#{'id': '10', 'url': 'restaurant'},
#{'id': '11', 'url': 'building'},
#{'id': '12', 'url': 'hairdresser'},
#{'id': '13', 'url': 'florist'},
#{'id': '14', 'url': 'locksmith'},
#{'id': '15', 'url': 'bakery'},
#{'id': '16', 'url': 'insurance'},
#{'id': '17', 'url': 'pharmacy'},
#{'id': '18', 'url': 'moving'},
#{'id': '19', 'url': 'electricity'},
#{'id': '20', 'url': 'plumbing'},
#{'id': '21', 'url': 'security'},
#{'id': '22', 'url': 'lawyer'},
#{'id': '23', 'url': 'bank'},
#{'id': '24', 'url': 'garage'},
#{'id': '25', 'url': 'dentist'},
#{'id': '26', 'url': 'doctor'},
#{'id': '27', 'url': 'accounting'},
#{'id': '28', 'url': 'store'},
#{'id': '29', 'url': 'notary'},
#{'id': '30', 'url': 'jeweller'},
#{'id': '31', 'url': 'tailor'},
#{'id': '32', 'url': 'meat'},
#{'id': '33', 'url': 'library'},
#{'id': '34', 'url': 'architect'}
]
capitales_du_monde = [
{'id': '183', 'nom': 'akmeneje'},#Akmenė
{'id': '184', 'nom': 'alytuje'},#Alytus
{'id': '185', 'nom': 'anyksciuose'},#Anykščiai
{'id': '186', 'nom': 'birstone'},#Birštonas
{'id': '187', 'nom': 'birzuose'},#Biržai
{'id': '188', 'nom': 'druskininkuose'},#Druskininkai
{'id': '189', 'nom': 'elektrenuose'},#Elektrėnai
{'id': '190', 'nom': 'ignalinoje'},#Ignalina
{'id': '191', 'nom': 'jonavoje'},#Jonava
{'id': '192', 'nom': 'joniskyje'},#Joniškis
{'id': '193', 'nom': 'jurbarke'},#Jurbarkas
{'id': '194', 'nom': 'kaisiadoryse'},#Kaišiadorys
{'id': '195', 'nom': 'kalvarijoje'},#Kalvarija
{'id': '196', 'nom': 'kaune'},#Kaunas
{'id': '197', 'nom': 'kazlu-rudoje'},#Kazlų Rūda
{'id': '198', 'nom': 'kedainiuose'},#Kėdainiai
{'id': '199', 'nom': 'kelmeje'},#Kelmė
{'id': '200', 'nom': 'klaipedoje'},#Klaipėda
{'id': '201', 'nom': 'kretingoje'},#Kretinga
{'id': '202', 'nom': 'kupiskyje'},#Kupiškis
{'id': '203', 'nom': 'lazdijuose'},#Lazdijai
{'id': '204', 'nom': 'marijampoleje'},#Marijampolė
{'id': '205', 'nom': 'mazeikiuose'},#Mažeikiai
{'id': '206', 'nom': 'moletuose'},#Molėtai
{'id': '207', 'nom': 'neringoje'},#Neringa
{'id': '208', 'nom': 'pagegiuose'},#Pagėgiai
{'id': '209', 'nom': 'pakruojyje'},#Pakruojis
{'id': '210', 'nom': 'palangoje'},#Palanga
{'id': '211', 'nom': 'panevezyje'},#Panevėžys
{'id': '212', 'nom': 'pasvalyje'},#Pasvalys
{'id': '213', 'nom': 'plungeje'},#Plungė
{'id': '214', 'nom': 'prienuose'},#Prienai
{'id': '215', 'nom': 'radviliskyje'},#Radviliškis
{'id': '216', 'nom': 'raseiniuose'},#Raseiniai
{'id': '217', 'nom': 'rietave'},#Rietavas
{'id': '218', 'nom': 'rokiskyje'},#Rokiškis
{'id': '219', 'nom': 'sakiuose'},#Šakiai
{'id': '220', 'nom': 'salcininkuose'},#Šalčininkai
{'id': '221', 'nom': 'siauliuose'},#Šiauliai
{'id': '222', 'nom': 'silaleje'},#Šilalė
{'id': '223', 'nom': 'siluteje'},#Šilutė
{'id': '224', 'nom': 'sirvintose'},#Širvintos
{'id': '225', 'nom': 'skuode'},#Skuodas
{'id': '226', 'nom': 'svencionyse'},#Švenčionys
{'id': '227', 'nom': 'taurageje'},#Tauragė
{'id': '228', 'nom': 'telsiuose'},#Telšiai
{'id': '229', 'nom': 'trakuose'},#Trakai
{'id': '230', 'nom': 'ukmergeje'},#Ukmergė
{'id': '231', 'nom': 'utenoje'},#Utena
{'id': '232', 'nom': 'varenoje'},#Varėna
{'id': '233', 'nom': 'vilkaviskyje'},#Vilkaviškis
{'id': '234', 'nom': 'vilniuje'},#Vilnius
{'id': '235', 'nom': 'visagine'},#Visaginas
{'id': '236', 'nom': 'zarasuose'}#Zarasai
]
try:
for capitale in capitales_du_monde:
for activite in activites:
try:
activity = activite.get("url")
city = capitale.get("nom")
url_search = "https://www.visalietuva.lt/en/search/" + activity + "/" + city
html_search = requests.get(url_search)
soup_search = BeautifulSoup(html_search.content, 'html.parser')
number_of_pages = 0
if soup_search.find('div', {'class': 'search_count f_left'}) is not None:
number_of_pages_with_coma = int(soup_search
.find('div', {'class': 'search_count f_left'})
.find('span').text
) / 20
if int(str(number_of_pages_with_coma).split(".")[1][:1]) < 5:
number_of_pages += round(number_of_pages_with_coma) + 1
print('number_of_pages : ' + str(number_of_pages))
elif int(str(number_of_pages_with_coma).split(".")[1][:1]) >= 5:
number_of_pages += round(number_of_pages_with_coma)
print('number_of_pages : ' + str(number_of_pages))
i_1 = 0
if soup_search.find('div', {'class': 'company_list'}) is not None:
print(url_search)
for result_item in soup_search \
.find('div', {'class': 'company_list'}) \
.find_all('div', {'class': 'item'}):
i_1 += 1
url_result = result_item.find('a', {'class': 'company-item-title'}).get('href')
time.sleep(2)
# Request the content of a page from the url
html_result = requests.get(url_result)
# Parse the content of html_doc
soup_result = BeautifulSoup(html_result.content, 'html.parser')
if soup_result.find('a', {'itemprop': 'email'}) is not None:
email = "info@" + soup_result.find('a', {'itemprop': 'email'}).text.split("@")[1]
try:
connection = pymysql.connect(
host='localhost',
port=3306,
user='',
password='',
db='contacts_professionnels',
charset='utf8mb4',
cursorclass=pymysql.cursors.DictCursor
)
with connection.cursor() as cursor:
try:
sql = "INSERT INTO `emails` (" \
"`id_activite`, " \
"`id_capitale_du_monde`, " \
"`email`) VALUE (%s, %s, %s)"
cursor.execute(sql, (
activite.get('id'),
capitale.get('id'),
email))
connection.commit()
print(str(i_1) + " The record is stored : " + email)
connection.close()
except:
print(str(i_1) + " The record already exists : " + email)
connection.close()
except Exception as e:
print(str(i_1) + " An error with the email : " + email + " " + str(e))
else:
print(str(i_1) + ' no email business')
else:
print('sorry there is nothing')
if number_of_pages > 1:
for i in range(2, number_of_pages + 1):
url_of_one_page_of_results = url_search + "/" + str(i)
print(url_of_one_page_of_results)
time.sleep(2)
html_of_one_page_of_results = requests.get(url_of_one_page_of_results)
soup_of_one_page_of_results = BeautifulSoup(html_of_one_page_of_results.content,
'html.parser')
if soup_of_one_page_of_results.find('div', {'class': 'company_list'}) is not None:
for result_item in soup_of_one_page_of_results \
.find('div', {'class': 'company_list'}) \
.find_all('div', {'class': 'item'}):
i_1 += 1
url_result = result_item.find('a', {'class': 'company-item-title'}).get('href')
# Request the content of a page from the url
html_result = requests.get(url_result)
# Parse the content of html_doc
soup_result = BeautifulSoup(html_result.content, 'html.parser')
if soup_result.find('a', {'itemprop': 'email'}) is not None:
email = "info@" + \
soup_result.find('a', {'itemprop': 'email'}).text.split("@")[1]
try:
connection = pymysql.connect(
host='localhost',
port=3306,
user='',
password='',
db='contacts_professionnels',
charset='utf8mb4',
cursorclass=pymysql.cursors.DictCursor
)
with connection.cursor() as cursor:
try:
sql = "INSERT INTO `emails` (" \
"`id_activite`, " \
"`id_capitale_du_monde`, " \
"`email`) VALUE (%s, %s, %s)"
cursor.execute(sql, (
activite.get('id'),
capitale.get('id'),
email))
connection.commit()
print(str(i_1) + " The record is stored : " + email)
connection.close()
except:
print(str(i_1) + " The record already exists : " + email)
connection.close()
except Exception as e:
print(str(i_1) + " An error with the email : " + email + " " + str(e))
else:
print(str(i_1) + ' no email business')
else:
print('sorry there is nothing')
except Exception as e:
print("There is an error connection at url : " + str(e))
finally:
print('done')
if __name__ == '__main__':
unittest.main()
| 53.0317 | 119 | 0.395392 | 1,592 | 18,402 | 4.378141 | 0.232412 | 0.032138 | 0.052224 | 0.025251 | 0.657389 | 0.647489 | 0.647489 | 0.643472 | 0.638737 | 0.638737 | 0 | 0.029659 | 0.463156 | 18,402 | 346 | 120 | 53.184971 | 0.675878 | 0.101185 | 0 | 0.639216 | 0 | 0 | 0.1551 | 0.005596 | 0 | 0 | 0 | 0 | 0 | 1 | 0.011765 | false | 0.007843 | 0.019608 | 0 | 0.035294 | 0.109804 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
0
| 1 |
d9cb561a08fd3aac17d5adf4c0665d1418e60e6a
| 3,262 |
py
|
Python
|
python_modules/dagster/dagster_tests/compat_tests/test_back_compat.py
|
vatervonacht/dagster
|
595d78c883ef20618052ac1575fe46cde51fd541
|
[
"Apache-2.0"
] | 3 |
2020-04-28T16:27:33.000Z
|
2020-07-22T07:43:30.000Z
|
python_modules/dagster/dagster_tests/compat_tests/test_back_compat.py
|
vatervonacht/dagster
|
595d78c883ef20618052ac1575fe46cde51fd541
|
[
"Apache-2.0"
] | null | null | null |
python_modules/dagster/dagster_tests/compat_tests/test_back_compat.py
|
vatervonacht/dagster
|
595d78c883ef20618052ac1575fe46cde51fd541
|
[
"Apache-2.0"
] | 1 |
2021-02-21T12:16:47.000Z
|
2021-02-21T12:16:47.000Z
|
# pylint: disable=protected-access
import os
import re
import pytest
from dagster import file_relative_path
from dagster.core.errors import DagsterInstanceMigrationRequired
from dagster.core.instance import DagsterInstance, InstanceRef
from dagster.utils.test import restore_directory
# test that we can load runs and events from an old instance
def test_0_6_4():
test_dir = file_relative_path(__file__, 'snapshot_0_6_4')
with restore_directory(test_dir):
instance = DagsterInstance.from_ref(InstanceRef.from_dir(test_dir))
runs = instance.get_runs()
with pytest.raises(
DagsterInstanceMigrationRequired,
match=re.escape(
'Instance is out of date and must be migrated (SqliteEventLogStorage for run '
'c7a6c4d7-6c88-46d0-8baa-d4937c3cefe5). Database is at revision None, head is '
'567bc23fd1ac. Please run `dagster instance migrate`.'
),
):
for run in runs:
instance.all_logs(run.run_id)
def test_0_6_6_sqlite_exc():
test_dir = file_relative_path(__file__, 'snapshot_0_6_6/sqlite')
with restore_directory(test_dir):
instance = DagsterInstance.from_ref(InstanceRef.from_dir(test_dir))
runs = instance.get_runs()
# Note that this is a deliberate choice -- old runs are simply invisible, and their
# presence won't raise DagsterInstanceMigrationRequired. This is a reasonable choice since
# the runs.db has moved and otherwise we would have to do a check for the existence of an
# old runs.db every time we accessed the runs. Instead, we'll do this only in the upgrade
# method.
assert len(runs) == 0
run_ids = instance._event_storage.get_all_run_ids()
assert run_ids == ['89296095-892d-4a15-aa0d-9018d1580945']
with pytest.raises(
DagsterInstanceMigrationRequired,
match=re.escape(
'Instance is out of date and must be migrated (SqliteEventLogStorage for run '
'89296095-892d-4a15-aa0d-9018d1580945). Database is at revision None, head is '
'567bc23fd1ac. Please run `dagster instance migrate`.'
),
):
instance._event_storage.get_logs_for_run('89296095-892d-4a15-aa0d-9018d1580945')
def test_0_6_6_sqlite_migrate():
test_dir = file_relative_path(__file__, 'snapshot_0_6_6/sqlite')
assert os.path.exists(file_relative_path(__file__, 'snapshot_0_6_6/sqlite/runs.db'))
assert not os.path.exists(file_relative_path(__file__, 'snapshot_0_6_6/sqlite/history/runs.db'))
with restore_directory(test_dir):
instance = DagsterInstance.from_ref(InstanceRef.from_dir(test_dir))
instance.upgrade()
runs = instance.get_runs()
assert len(runs) == 1
run_ids = instance._event_storage.get_all_run_ids()
assert run_ids == ['89296095-892d-4a15-aa0d-9018d1580945']
instance._event_storage.get_logs_for_run('89296095-892d-4a15-aa0d-9018d1580945')
assert not os.path.exists(file_relative_path(__file__, 'snapshot_0_6_6/sqlite/runs.db'))
assert os.path.exists(file_relative_path(__file__, 'snapshot_0_6_6/sqlite/history/runs.db'))
| 42.363636 | 100 | 0.698038 | 432 | 3,262 | 4.979167 | 0.280093 | 0.009298 | 0.059507 | 0.033473 | 0.662483 | 0.662483 | 0.647606 | 0.632729 | 0.632729 | 0.615528 | 0 | 0.074961 | 0.218884 | 3,262 | 76 | 101 | 42.921053 | 0.769231 | 0.136726 | 0 | 0.584906 | 0 | 0 | 0.264245 | 0.155983 | 0 | 0 | 0 | 0 | 0.150943 | 1 | 0.056604 | false | 0 | 0.132075 | 0 | 0.188679 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
0
| 1 |
d9d59aa9c4853e8590f823a63f53768b8aecbce1
| 6,899 |
py
|
Python
|
python/ray/experimental/workflow/execution.py
|
wgifford/ray
|
8acb469b047cd9b327c9477a13b030eb7357860e
|
[
"Apache-2.0"
] | null | null | null |
python/ray/experimental/workflow/execution.py
|
wgifford/ray
|
8acb469b047cd9b327c9477a13b030eb7357860e
|
[
"Apache-2.0"
] | 32 |
2021-09-04T07:08:45.000Z
|
2022-02-19T08:08:11.000Z
|
python/ray/experimental/workflow/execution.py
|
wgifford/ray
|
8acb469b047cd9b327c9477a13b030eb7357860e
|
[
"Apache-2.0"
] | null | null | null |
import asyncio
import logging
import time
from typing import Set, List, Tuple, Optional, TYPE_CHECKING
import uuid
import ray
from ray.experimental.workflow import workflow_context
from ray.experimental.workflow import workflow_storage
from ray.experimental.workflow.common import (Workflow, WorkflowStatus,
WorkflowMetaData, StepType)
from ray.experimental.workflow.step_executor import commit_step
from ray.experimental.workflow.storage import get_global_storage
from ray.experimental.workflow.workflow_access import (
flatten_workflow_output, get_or_create_management_actor,
get_management_actor)
if TYPE_CHECKING:
from ray.experimental.workflow.step_executor import WorkflowExecutionResult
logger = logging.getLogger(__name__)
def run(entry_workflow: Workflow,
workflow_id: Optional[str] = None,
overwrite: bool = True) -> ray.ObjectRef:
"""Run a workflow asynchronously.
# TODO(suquark): The current "run" always overwrite existing workflow.
# We need to fix this later.
"""
store = get_global_storage()
assert ray.is_initialized()
if workflow_id is None:
# Workflow ID format: {Entry workflow UUID}.{Unix time to nanoseconds}
workflow_id = f"{str(uuid.uuid4())}.{time.time():.9f}"
logger.info(f"Workflow job created. [id=\"{workflow_id}\", storage_url="
f"\"{store.storage_url}\"].")
with workflow_context.workflow_step_context(workflow_id,
store.storage_url):
# checkpoint the workflow
ws = workflow_storage.get_workflow_storage(workflow_id)
commit_step(ws, "", entry_workflow)
workflow_manager = get_or_create_management_actor()
ignore_existing = (entry_workflow.data.step_type != StepType.FUNCTION)
# NOTE: It is important to 'ray.get' the returned output. This
# ensures caller of 'run()' holds the reference to the workflow
# result. Otherwise if the actor removes the reference of the
# workflow output, the caller may fail to resolve the result.
result: "WorkflowExecutionResult" = ray.get(
workflow_manager.run_or_resume.remote(workflow_id,
ignore_existing))
if entry_workflow.data.step_type == StepType.FUNCTION:
return flatten_workflow_output(workflow_id,
result.persisted_output)
else:
return flatten_workflow_output(workflow_id, result.volatile_output)
# TODO(suquark): support recovery with ObjectRef inputs.
def resume(workflow_id: str) -> ray.ObjectRef:
"""Resume a workflow asynchronously. See "api.resume()" for details.
"""
storage = get_global_storage()
logger.info(f"Resuming workflow [id=\"{workflow_id}\", storage_url="
f"\"{storage.storage_url}\"].")
workflow_manager = get_or_create_management_actor()
# NOTE: It is important to 'ray.get' the returned output. This
# ensures caller of 'run()' holds the reference to the workflow
# result. Otherwise if the actor removes the reference of the
# workflow output, the caller may fail to resolve the result.
result: "WorkflowExecutionResult" = ray.get(
workflow_manager.run_or_resume.remote(
workflow_id, ignore_existing=False))
logger.info(f"Workflow job {workflow_id} resumed.")
return flatten_workflow_output(workflow_id, result.persisted_output)
def get_output(workflow_id: str, name: Optional[str]) -> ray.ObjectRef:
"""Get the output of a running workflow.
See "api.get_output()" for details.
"""
assert ray.is_initialized()
try:
workflow_manager = get_management_actor()
except ValueError as e:
raise ValueError(
"Failed to connect to the workflow management "
"actor. The workflow could have already failed. You can use "
"workflow.resume() to resume the workflow.") from e
output = ray.get(workflow_manager.get_output.remote(workflow_id, name))
return flatten_workflow_output(workflow_id, output)
def cancel(workflow_id: str) -> None:
try:
workflow_manager = get_management_actor()
ray.get(workflow_manager.cancel_workflow.remote(workflow_id))
except ValueError:
wf_store = workflow_storage.get_workflow_storage(workflow_id)
wf_store.save_workflow_meta(WorkflowMetaData(WorkflowStatus.CANCELED))
def get_status(workflow_id: str) -> Optional[WorkflowStatus]:
try:
workflow_manager = get_management_actor()
running = ray.get(
workflow_manager.is_workflow_running.remote(workflow_id))
except Exception:
running = False
if running:
return WorkflowStatus.RUNNING
store = workflow_storage.get_workflow_storage(workflow_id)
meta = store.load_workflow_meta()
if meta is None:
raise ValueError(f"No such workflow_id {workflow_id}")
return meta.status
def list_all(status_filter: Set[WorkflowStatus]
) -> List[Tuple[str, WorkflowStatus]]:
try:
workflow_manager = get_management_actor()
except ValueError:
workflow_manager = None
if workflow_manager is None:
runnings = []
else:
runnings = ray.get(workflow_manager.list_running_workflow.remote())
if WorkflowStatus.RUNNING in status_filter and len(status_filter) == 1:
return [(r, WorkflowStatus.RUNNING) for r in runnings]
runnings = set(runnings)
# Here we don't have workflow id, so use empty one instead
store = workflow_storage.get_workflow_storage("")
ret = []
for (k, s) in store.list_workflow():
if s == WorkflowStatus.RUNNING and k not in runnings:
s = WorkflowStatus.RESUMABLE
if s in status_filter:
ret.append((k, s))
return ret
def resume_all(with_failed: bool) -> List[Tuple[str, ray.ObjectRef]]:
filter_set = {WorkflowStatus.RESUMABLE}
if with_failed:
filter_set.add(WorkflowStatus.FAILED)
all_failed = list_all(filter_set)
try:
workflow_manager = get_management_actor()
except Exception as e:
raise RuntimeError("Failed to get management actor") from e
async def _resume_one(wid: str) -> Tuple[str, Optional[ray.ObjectRef]]:
try:
result: "WorkflowExecutionResult" = (
await workflow_manager.run_or_resume.remote(wid))
obj = flatten_workflow_output(wid, result.persisted_output)
return wid, obj
except Exception:
logger.error(f"Failed to resume workflow {wid}")
return (wid, None)
ret = workflow_storage.asyncio_run(
asyncio.gather(*[_resume_one(wid) for (wid, _) in all_failed]))
return [(wid, obj) for (wid, obj) in ret if obj is not None]
| 40.345029 | 79 | 0.681258 | 838 | 6,899 | 5.400955 | 0.201671 | 0.061865 | 0.031816 | 0.041759 | 0.383341 | 0.35285 | 0.292974 | 0.194432 | 0.150243 | 0.124613 | 0 | 0.000566 | 0.232208 | 6,899 | 170 | 80 | 40.582353 | 0.85388 | 0.14031 | 0 | 0.184 | 0 | 0 | 0.078885 | 0.018021 | 0 | 0 | 0 | 0.011765 | 0.016 | 1 | 0.056 | false | 0 | 0.104 | 0 | 0.248 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
0
| 1 |
d9d66c8e24ecdddf4d2ecdc3b422d09645a2f485
| 3,021 |
py
|
Python
|
mro/stages/analyzer/run_differential_expression/__init__.py
|
qiangli/cellranger
|
046e24c3275cfbd4516a6ebc064594513a5c45b7
|
[
"MIT"
] | 1 |
2019-03-29T04:05:58.000Z
|
2019-03-29T04:05:58.000Z
|
mro/stages/analyzer/run_differential_expression/__init__.py
|
qiangli/cellranger
|
046e24c3275cfbd4516a6ebc064594513a5c45b7
|
[
"MIT"
] | null | null | null |
mro/stages/analyzer/run_differential_expression/__init__.py
|
qiangli/cellranger
|
046e24c3275cfbd4516a6ebc064594513a5c45b7
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
#
# Copyright (c) 2017 10X Genomics, Inc. All rights reserved.
#
import cellranger.analysis.diffexp as cr_diffexp
import cellranger.analysis.io as analysis_io
from cellranger.analysis.singlegenome import SingleGenomeAnalysis
import cellranger.h5_constants as h5_constants
import cellranger.analysis.constants as analysis_constants
import cellranger.matrix as cr_matrix
import cellranger.io as cr_io
import cellranger.library_constants as lib_constants
NUM_THREADS_MIN = 4
#TODO Not clear why this stage takes > 1 thread. Martian thinks it does and kills it on long jobs
__MRO__ = """
stage RUN_DIFFERENTIAL_EXPRESSION(
in h5 matrix_h5,
in h5 clustering_h5,
in bool skip,
in int random_seed,
in int max_clusters,
out h5 diffexp_h5,
out path diffexp_csv,
src py "stages/analyzer/run_differential_expression",
) split using (
in string clustering_key,
)
"""
def split(args):
if args.skip:
return {'chunks': [{'__mem_gb': h5_constants.MIN_MEM_GB}]}
chunks = []
# FIXME: Add one for reasons unknown
matrix_mem_gb = 1.8 * cr_matrix.CountMatrix.get_mem_gb_from_matrix_h5(args.matrix_h5)
chunk_mem_gb = int(max(matrix_mem_gb, h5_constants.MIN_MEM_GB))
# HACK - give big jobs more threads in order to avoid overloading a node
threads = min(cr_io.get_thread_request_from_mem_gb(chunk_mem_gb), NUM_THREADS_MIN)
threads = 4
for key in SingleGenomeAnalysis.load_clustering_keys_from_h5(args.clustering_h5):
chunks.append({
'clustering_key': key,
'__mem_gb': chunk_mem_gb,
'__threads': threads,
})
return {'chunks': chunks, 'join': {'__mem_gb' : 1}}
def main(args, outs):
if args.skip:
return
matrix = cr_matrix.CountMatrix.load_h5_file(args.matrix_h5)
# For now, only compute for gene expression features
matrix = matrix.select_features_by_type(lib_constants.GENE_EXPRESSION_LIBRARY_TYPE)
clustering = SingleGenomeAnalysis.load_clustering_from_h5(args.clustering_h5, args.clustering_key)
diffexp = cr_diffexp.run_differential_expression(matrix, clustering.clusters)
with analysis_io.open_h5_for_writing(outs.diffexp_h5) as f:
cr_diffexp.save_differential_expression_h5(f, args.clustering_key, diffexp)
cr_diffexp.save_differential_expression_csv(args.clustering_key, diffexp, matrix, outs.diffexp_csv)
def join(args, outs, chunk_defs, chunk_outs):
if args.skip:
return
chunk_h5s = [chunk_out.diffexp_h5 for chunk_out in chunk_outs]
chunk_csv_dirs = [chunk_out.diffexp_csv for chunk_out in chunk_outs]
analysis_io.combine_h5_files(chunk_h5s, outs.diffexp_h5, [analysis_constants.ANALYSIS_H5_DIFFERENTIAL_EXPRESSION_GROUP,
analysis_constants.ANALYSIS_H5_KMEANS_DIFFERENTIAL_EXPRESSION_GROUP])
for csv_dir in chunk_csv_dirs:
cr_io.copytree(csv_dir, outs.diffexp_csv, allow_existing=True)
| 35.541176 | 125 | 0.735849 | 424 | 3,021 | 4.900943 | 0.316038 | 0.028874 | 0.034649 | 0.023099 | 0.16025 | 0.076035 | 0.023099 | 0 | 0 | 0 | 0 | 0.016353 | 0.190334 | 3,021 | 84 | 126 | 35.964286 | 0.833197 | 0.109897 | 0 | 0.087719 | 0 | 0 | 0.148825 | 0.027602 | 0 | 0 | 0 | 0.011905 | 0 | 1 | 0.052632 | false | 0 | 0.140351 | 0 | 0.263158 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
0
| 1 |
d9d95781d1bacab44253ba285649d7b99ee1e33d
| 542 |
py
|
Python
|
src/vatic_checker/config.py
|
jonkeane/vatic-checker
|
fa8aec6946dcfd3f466b62f9c00d81bc43514b22
|
[
"MIT"
] | null | null | null |
src/vatic_checker/config.py
|
jonkeane/vatic-checker
|
fa8aec6946dcfd3f466b62f9c00d81bc43514b22
|
[
"MIT"
] | null | null | null |
src/vatic_checker/config.py
|
jonkeane/vatic-checker
|
fa8aec6946dcfd3f466b62f9c00d81bc43514b22
|
[
"MIT"
] | null | null | null |
localhost = "http://localhost/" # your local host
database = "mysql://root@localhost/vaticChecker" # server://user:pass@localhost/dbname
min_training = 2 # the minimum number of training videos to be considered
recaptcha_secret = "" # recaptcha secret for verification
duplicate_annotations = False # Should the server allow for duplicate annotations?
import os.path
import sys
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
# TODO: remove on server
import os
os.environ['PYTHON_EGG_CACHE'] = '/tmp/apache'
| 38.714286 | 94 | 0.745387 | 72 | 542 | 5.486111 | 0.694444 | 0.04557 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.002169 | 0.149446 | 542 | 13 | 95 | 41.692308 | 0.854664 | 0.394834 | 0 | 0 | 0 | 0 | 0.246106 | 0.109034 | 0 | 0 | 0 | 0.076923 | 0 | 1 | 0 | false | 0 | 0.3 | 0 | 0.3 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
0
| 1 |
d9d96360237e53141cd11d1271cee29b6140650f
| 8,233 |
py
|
Python
|
django/utils/timezone.py
|
graingert/django
|
784d0c261c76535dc760bc8d76793d92f35c1513
|
[
"BSD-3-Clause"
] | 1 |
2015-11-11T12:20:45.000Z
|
2015-11-11T12:20:45.000Z
|
django/utils/timezone.py
|
graingert/django
|
784d0c261c76535dc760bc8d76793d92f35c1513
|
[
"BSD-3-Clause"
] | null | null | null |
django/utils/timezone.py
|
graingert/django
|
784d0c261c76535dc760bc8d76793d92f35c1513
|
[
"BSD-3-Clause"
] | null | null | null |
"""Timezone helper functions.
This module uses pytz when it's available and fallbacks when it isn't.
"""
from datetime import datetime, timedelta, tzinfo
from threading import local
import time as _time
try:
import pytz
except ImportError:
pytz = None
from django.conf import settings
__all__ = [
'utc', 'get_default_timezone', 'get_current_timezone',
'activate', 'deactivate', 'override',
'is_naive', 'is_aware', 'make_aware', 'make_naive',
]
# UTC and local time zones
ZERO = timedelta(0)
class UTC(tzinfo):
"""
UTC implementation taken from Python's docs.
Used only when pytz isn't available.
"""
def __repr__(self):
return "<UTC>"
def utcoffset(self, dt):
return ZERO
def tzname(self, dt):
return "UTC"
def dst(self, dt):
return ZERO
class LocalTimezone(tzinfo):
"""
Local time implementation taken from Python's docs.
Used only when pytz isn't available, and most likely inaccurate. If you're
having trouble with this class, don't waste your time, just install pytz.
"""
def __init__(self):
# This code is moved in __init__ to execute it as late as possible
# See get_default_timezone().
self.STDOFFSET = timedelta(seconds=-_time.timezone)
if _time.daylight:
self.DSTOFFSET = timedelta(seconds=-_time.altzone)
else:
self.DSTOFFSET = self.STDOFFSET
self.DSTDIFF = self.DSTOFFSET - self.STDOFFSET
tzinfo.__init__(self)
def __repr__(self):
return "<LocalTimezone>"
def utcoffset(self, dt):
if self._isdst(dt):
return self.DSTOFFSET
else:
return self.STDOFFSET
def dst(self, dt):
if self._isdst(dt):
return self.DSTDIFF
else:
return ZERO
def tzname(self, dt):
return _time.tzname[self._isdst(dt)]
def _isdst(self, dt):
tt = (dt.year, dt.month, dt.day,
dt.hour, dt.minute, dt.second,
dt.weekday(), 0, 0)
stamp = _time.mktime(tt)
tt = _time.localtime(stamp)
return tt.tm_isdst > 0
utc = pytz.utc if pytz else UTC()
"""UTC time zone as a tzinfo instance."""
# In order to avoid accessing the settings at compile time,
# wrap the expression in a function and cache the result.
_localtime = None
def get_default_timezone():
"""
Returns the default time zone as a tzinfo instance.
This is the time zone defined by settings.TIME_ZONE.
See also :func:`get_current_timezone`.
"""
global _localtime
if _localtime is None:
if isinstance(settings.TIME_ZONE, basestring) and pytz is not None:
_localtime = pytz.timezone(settings.TIME_ZONE)
else:
_localtime = LocalTimezone()
return _localtime
# This function exists for consistency with get_current_timezone_name
def get_default_timezone_name():
"""
Returns the name of the default time zone.
"""
return _get_timezone_name(get_default_timezone())
_active = local()
def get_current_timezone():
"""
Returns the currently active time zone as a tzinfo instance.
"""
return getattr(_active, "value", get_default_timezone())
def get_current_timezone_name():
"""
Returns the name of the currently active time zone.
"""
return _get_timezone_name(get_current_timezone())
def _get_timezone_name(timezone):
"""
Returns the name of ``timezone``.
"""
try:
# for pytz timezones
return timezone.zone
except AttributeError:
# for regular tzinfo objects
local_now = datetime.now(timezone)
return timezone.tzname(local_now)
# Timezone selection functions.
# These functions don't change os.environ['TZ'] and call time.tzset()
# because it isn't thread safe.
def activate(timezone):
"""
Sets the time zone for the current thread.
The ``timezone`` argument must be an instance of a tzinfo subclass or a
time zone name. If it is a time zone name, pytz is required.
"""
if isinstance(timezone, tzinfo):
_active.value = timezone
elif isinstance(timezone, basestring) and pytz is not None:
_active.value = pytz.timezone(timezone)
else:
raise ValueError("Invalid timezone: %r" % timezone)
def deactivate():
"""
Unsets the time zone for the current thread.
Django will then use the time zone defined by settings.TIME_ZONE.
"""
if hasattr(_active, "value"):
del _active.value
class override(object):
"""
Temporarily set the time zone for the current thread.
This is a context manager that uses ``~django.utils.timezone.activate()``
to set the timezone on entry, and restores the previously active timezone
on exit.
The ``timezone`` argument must be an instance of a ``tzinfo`` subclass, a
time zone name, or ``None``. If is it a time zone name, pytz is required.
If it is ``None``, Django enables the default time zone.
"""
def __init__(self, timezone):
self.timezone = timezone
self.old_timezone = getattr(_active, 'value', None)
def __enter__(self):
if self.timezone is None:
deactivate()
else:
activate(self.timezone)
def __exit__(self, exc_type, exc_value, traceback):
if self.old_timezone is not None:
_active.value = self.old_timezone
else:
del _active.value
# Templates
def template_localtime(value, use_tz=None):
"""
Checks if value is a datetime and converts it to local time if necessary.
If use_tz is provided and is not None, that will force the value to
be converted (or not), overriding the value of settings.USE_TZ.
This function is designed for use by the template engine.
"""
should_convert = (isinstance(value, datetime)
and (settings.USE_TZ if use_tz is None else use_tz)
and not is_naive(value)
and getattr(value, 'convert_to_local_time', True))
return localtime(value) if should_convert else value
# Utilities
def localtime(value, timezone=None):
"""
Converts an aware datetime.datetime to local time.
Local time is defined by the current time zone, unless another time zone
is specified.
"""
if timezone is None:
timezone = get_current_timezone()
value = value.astimezone(timezone)
if hasattr(timezone, 'normalize'):
# available for pytz time zones
value = timezone.normalize(value)
return value
def now():
"""
Returns an aware or naive datetime.datetime, depending on settings.USE_TZ.
"""
if settings.USE_TZ:
# timeit shows that datetime.now(tz=utc) is 24% slower
return datetime.utcnow().replace(tzinfo=utc)
else:
return datetime.now()
# By design, these four functions don't perform any checks on their arguments.
# The caller should ensure that they don't receive an invalid value like None.
def is_aware(value):
"""
Determines if a given datetime.datetime is aware.
The logic is described in Python's docs:
http://docs.python.org/library/datetime.html#datetime.tzinfo
"""
return value.tzinfo is not None and value.tzinfo.utcoffset(value) is not None
def is_naive(value):
"""
Determines if a given datetime.datetime is naive.
The logic is described in Python's docs:
http://docs.python.org/library/datetime.html#datetime.tzinfo
"""
return value.tzinfo is None or value.tzinfo.utcoffset(value) is None
def make_aware(value, timezone):
"""
Makes a naive datetime.datetime in a given time zone aware.
"""
if hasattr(timezone, 'localize'):
# available for pytz time zones
return timezone.localize(value, is_dst=None)
else:
# may be wrong around DST changes
return value.replace(tzinfo=timezone)
def make_naive(value, timezone):
"""
Makes an aware datetime.datetime naive in a given time zone.
"""
value = value.astimezone(timezone)
if hasattr(timezone, 'normalize'):
# available for pytz time zones
value = timezone.normalize(value)
return value.replace(tzinfo=None)
| 28.195205 | 81 | 0.66197 | 1,098 | 8,233 | 4.839709 | 0.220401 | 0.034626 | 0.023711 | 0.009785 | 0.285284 | 0.252352 | 0.228453 | 0.176139 | 0.125329 | 0.125329 | 0 | 0.000972 | 0.24997 | 8,233 | 291 | 82 | 28.292096 | 0.859595 | 0.383457 | 0 | 0.242424 | 0 | 0 | 0.045045 | 0.004505 | 0 | 0 | 0 | 0 | 0 | 1 | 0.204545 | false | 0 | 0.045455 | 0.045455 | 0.469697 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 |
0
| 1 |
d9e551f94d290cc9b470d1fddfc0e91666dab7ba
| 444 |
py
|
Python
|
setup.py
|
zhanghang1989/notedown
|
b0fa1eac88d1cd7fa2261d6c454f82669e6f552b
|
[
"BSD-2-Clause"
] | null | null | null |
setup.py
|
zhanghang1989/notedown
|
b0fa1eac88d1cd7fa2261d6c454f82669e6f552b
|
[
"BSD-2-Clause"
] | null | null | null |
setup.py
|
zhanghang1989/notedown
|
b0fa1eac88d1cd7fa2261d6c454f82669e6f552b
|
[
"BSD-2-Clause"
] | null | null | null |
from setuptools import setup
# create __version__
exec(open('./_version.py').read())
setup(
name="notedown",
version=__version__,
description="Convert markdown to IPython notebook.",
author="Aaron O'Leary",
author_email='dev@aaren.me',
url='http://github.com/aaren/notedown',
install_requires=['ipython', ],
entry_points={
'console_scripts': [
'notedown = notedown:cli',
],
}
)
| 22.2 | 56 | 0.628378 | 47 | 444 | 5.659574 | 0.787234 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.218468 | 444 | 19 | 57 | 23.368421 | 0.766571 | 0.040541 | 0 | 0 | 0 | 0 | 0.377358 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0.0625 | 0 | 0.0625 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 |
0
| 1 |
d9e8867f9d8fa5dbea3f62a0b298eac5f535d37a
| 9,499 |
py
|
Python
|
src/bots/test/test_inputs.py
|
drewbitt/lightnovel-crawler
|
fa9546ad9dcff49c75296b0b8772f6578689adcc
|
[
"Apache-2.0"
] | 1 |
2019-03-10T13:02:23.000Z
|
2019-03-10T13:02:23.000Z
|
src/bots/test/test_inputs.py
|
drewbitt/lightnovel-crawler
|
fa9546ad9dcff49c75296b0b8772f6578689adcc
|
[
"Apache-2.0"
] | null | null | null |
src/bots/test/test_inputs.py
|
drewbitt/lightnovel-crawler
|
fa9546ad9dcff49c75296b0b8772f6578689adcc
|
[
"Apache-2.0"
] | null | null | null |
from base64 import decodestring as b64decode
allowed_failures = [
'https://ranobelib.me/',
'https://www.aixdzs.com/',
'https://webnovelindonesia.com/',
b64decode("aHR0cHM6Ly9jb21yYWRlbWFvLmNvbS8=".encode()).decode()
]
test_user_inputs = {
b64decode("aHR0cHM6Ly9jb21yYWRlbWFvLmNvbS8=".encode()).decode(): [
b64decode(
"aHR0cHM6Ly9jb21yYWRlbWFvLmNvbS9ub3ZlbC90c3VydWdpLW5vLWpvb3UtdG8tcmFrdWluLW5vLWtvLw==".encode()).decode()
],
'https://novelsrock.com/': [
'https://novelsrock.com/novel/the-returner/',
'kuro'
],
'http://gravitytales.com/': [
'http://gravitytales.com/posts/novel/a-dragons-curiosity'
],
'http://novelfull.com/': [
'http://novelfull.com/dungeon-defense.html',
'Sinister Ex Girlfriend',
],
'http://www.machinenoveltranslation.com/': [
'http://www.machinenoveltranslation.com/a-thought-through-eternity',
],
'http://zenithnovels.com/': [
'http://zenithnovels.com/infinity-armament/',
],
'https://anythingnovel.com/': [
'https://anythingnovel.com/novel/king-of-gods/',
],
'https://boxnovel.com/': [
'https://boxnovel.com/novel/the-rest-of-my-life-is-for-you/',
'cultivation chat',
],
'https://crescentmoon.blog/': [
'https://crescentmoon.blog/dark-blue-and-moonlight/',
],
'https://litnet.com/': [
'https://litnet.com/en/book/candy-lips-1-b106232',
'candy lips',
],
'https://lnmtl.com/': [
'https://lnmtl.com/novel/the-strongest-dan-god',
],
'https://m.chinesefantasynovels.com/': [
'https://m.chinesefantasynovels.com/3838/',
],
'https://m.novelspread.com/': [
'https://m.novelspread.com/novel/the-legend-of-the-concubine-s-daughter-minglan',
],
'https://m.romanticlovebooks.com/': [
'https://m.romanticlovebooks.com/xuanhuan/207.html',
],
'http://www.tiknovel.com/': [
'http://www.tiknovel.com/book/index?id=717',
],
'https://www.wuxiaworld.co/': [
'sword',
],
'https://m.wuxiaworld.co/': [
'https://m.wuxiaworld.co/Reincarnation-Of-The-Strongest-Sword-God/',
],
'https://meionovel.id/': [
'https://meionovel.id/novel/the-legendary-mechanic/',
],
'https://mtled-novels.com/': [
'https://mtled-novels.com/novels/great-ruler/',
'great ruler'
],
'https://bestlightnovel.com/': [
'https://bestlightnovel.com/novel_888103800',
'martial'
],
'https://novelplanet.com/': [
'https://novelplanet.com/Novel/Returning-from-the-Immortal-World',
'immortal'
],
'https://www.volarenovels.com/': [
'https://www.volarenovels.com/novel/adorable-creature-attacks',
],
'https://webnovel.online/': [
'https://webnovel.online/full-marks-hidden-marriage-pick-up-a-son-get-a-free-husband',
],
'https://www.idqidian.us/': [
'https://www.idqidian.us/novel/peerless-martial-god/'
],
'https://www.novelall.com/': [
'https://www.novelall.com/novel/Virtual-World-Close-Combat-Mage.html',
'combat'
],
'https://www.novelspread.com/': [
'https://www.novelspread.com/novel/the-legend-of-the-concubine-s-daughter-minglan'
],
'https://www.readlightnovel.org/': [
'https://www.readlightnovel.org/top-furious-doctor-soldier'
],
'https://www.romanticlovebooks.com/': [
'https://www.romanticlovebooks.com/xianxia/251.html'
],
'https://www.royalroad.com/': [
'https://www.royalroad.com/fiction/21220/mother-of-learning',
'mother'
],
'https://www.scribblehub.com/': [
'https://www.scribblehub.com/series/73550/modern-life-of-the-exalted-immortal/',
'cultivation'
],
'https://www.webnovel.com/': [
'https://www.webnovel.com/book/8212987205006305/Trial-Marriage-Husband%3A-Need-to-Work-Hard',
'martial',
],
'https://www.worldnovel.online/': [
'https://www.worldnovel.online/novel/solo-leveling/',
],
'https://www.wuxiaworld.co/': [
'https://www.wuxiaworld.co/Reincarnation-Of-The-Strongest-Sword-God/',
'sword'
],
'https://rewayat.club/': [
'https://rewayat.club/novel/almighty-sword-domain/'
],
'https://www.wuxiaworld.com/': [
'https://www.wuxiaworld.com/novel/martial-god-asura',
'martial',
],
'https://creativenovels.com/': [
'https://creativenovels.com/novel/eternal-reverence/',
],
'https://www.tapread.com/': [
'https://www.tapread.com/book/detail/80',
],
'http://www.tapread.com/': [
'http://www.tapread.com/book/detail/80',
],
'https://readnovelfull.com/': [
'https://readnovelfull.com/lord-of-all-realms.html',
'cultivation'
],
'https://myoniyonitranslations.com/': [
'https://myoniyonitranslations.com/top-management/',
'https://myoniyonitranslations.com/category/god-of-tennis',
],
'https://babelnovel.com/': [
'https://babelnovel.com/books/ceo-let-me-go',
'dazzle Good'
],
'https://wuxiaworld.online/': [
'https://wuxiaworld.online/trial-marriage-husband-need-to-work-hard',
'cultivation',
],
'https://www.novelv.com/': [
'https://www.novelv.com/0/349/'
],
'http://fullnovel.live/': [
'http://fullnovel.live/novel-a-will-eternal',
'will eternal',
],
'https://www.noveluniverse.com/': [
'https://www.noveluniverse.com/index/novel/info/id/15.html'
],
'https://novelraw.blogspot.com/': [
'https://novelraw.blogspot.com/2019/03/dragon-king-son-in-law-mtl.html'
],
'https://light-novel.online/': [
'https://light-novel.online/great-tyrannical-deity',
'tyrannical'
],
'https://www.rebirth.online/': [
'https://www.rebirth.online/novel/upside-down'
],
'https://www.jieruihao.cn/': [
'https://www.jieruihao.cn/novel/against-the-gods/',
],
'https://www.wattpad.com/': [
'https://www.wattpad.com/story/87505567-loving-mr-jerkface-%E2%9C%94%EF%B8%8F'
],
'https://novelgo.id/': [
'https://novelgo.id/novel/the-mightiest-leveling-system/'
],
'https://yukinovel.me/': [
'https://yukinovel.me/novel/the-second-coming-of-avarice/',
],
'https://www.asianhobbyist.com/': [
'https://www.asianhobbyist.com/series/that-time-i-got-reincarnated-as-a-slime/'
],
'https://kisslightnovels.info/': [
'https://kisslightnovels.info/novel/solo-leveling/'
],
'https://novelonlinefull.com/': [
'https://novelonlinefull.com/novel/abo1520855001564322110'
],
'https://www.machine-translation.org/': [
'https://www.machine-translation.org/novel/bace21c9b10d34e9/world-of-cultivation.html'
],
'https://www.fanfiction.net/': [
'https://www.fanfiction.net/s/7268451/1/Facebook-For-wizards'
],
'https://www.mtlnovel.com/': [
'https://www.mtlnovel.com/trapped-in-a-typical-idol-drama/'
],
'https://wordexcerpt.com/': [
'https://wordexcerpt.com/series/transmigration-raising-the-child-of-the-male-lead-boss/'
],
'https://www.translateindo.com/': [
'https://www.translateindo.com/demon-wang-golden-status-favoured-fei/'
],
'https://ranobelib.me/': [
'https://ranobelib.me/sozvezdie-klinka'
],
'https://novelringan.com/': [
'https://novelringan.com/series/the-most-loving-marriage-in-history-master-mus-pampered-wife/'
],
'https://wuxiaworld.site/': [
'https://wuxiaworld.site/novel/only-i-level-up/'
],
'https://id.mtlnovel.com/': [
'https://id.mtlnovel.com/the-strongest-plane-becomes-god/'
],
'https://www.shinsori.com/': [
'https://www.shinsori.com/akuyaku-reijou-ni-nanka-narimasen/'
],
'https://www.flying-lines.com/': [
'https://www.flying-lines.com/novel/one-useless-rebirth'
],
'https://book.qidian.com/': [
'https://book.qidian.com/info/1016597088'
],
'https://kiss-novel.com/': [
'https://kiss-novel.com/the-first-order'
],
'https://www.machine-translation.org/': [
'https://www.machine-translation.org/novel/a5eee127d75da0d2/long-live-summons.html'
],
'https://www.aixdzs.com/': [
'https://www.aixdzs.com/d/66/66746/'
],
'https://webnovelonline.com/': [
'https://webnovelonline.com/novel/the_anarchic_consort'
],
'https://4scanlation.com/': [
'https://4scanlation.com/tensei-shitara-slime-datta-ken-wn/'
],
'https://listnovel.com/': [
'https://listnovel.com/novel/my-sassy-crown-princess/'
],
'https://tomotranslations.com/': [
'https://tomotranslations.com/this-hero-is-invincible-but-too-cautious/'
],
'https://www.wuxialeague.com/': [
'https://www.wuxialeague.com/novel/245/'
],
'http://liberspark.com/': [
'http://liberspark.com/novel/black-irons-glory'
],
'https://webnovelindonesia.com/': [
'https://webnovelindonesia.com/nv/almighty-student'
],
'https://webnovelindonesia.com/': [
'https://webnovelindonesia.com/nv/almighty-student'
],
'http://tiknovel.com/': [
'http://tiknovel.com/book/index?id=717'
],
'http://boxnovel.org/': [
'http://boxnovel.org/novel/martial-god-asura'
]
}
| 34.922794 | 117 | 0.596694 | 1,009 | 9,499 | 5.611497 | 0.331021 | 0.08195 | 0.036913 | 0.018368 | 0.11798 | 0.102437 | 0.084776 | 0.084776 | 0.068174 | 0.044507 | 0 | 0.024765 | 0.192336 | 9,499 | 271 | 118 | 35.051661 | 0.713243 | 0 | 0 | 0.33829 | 0 | 0.052045 | 0.713759 | 0.015581 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.003717 | 0 | 0.003717 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
0
| 1 |
d9ec7fb034397cf9a445f613d02c81768a1461eb
| 3,410 |
py
|
Python
|
bokeh/client/util.py
|
areaweb/bokeh
|
9d131e45d626a912e85aee5b2647139c194dc893
|
[
"BSD-3-Clause"
] | 1 |
2021-01-31T22:13:13.000Z
|
2021-01-31T22:13:13.000Z
|
bokeh/client/util.py
|
adsbxchange/bokeh
|
47aa8f8420944c47e876c1c36be182d257c14b87
|
[
"BSD-3-Clause"
] | 1 |
2017-01-12T00:37:38.000Z
|
2017-01-12T00:37:38.000Z
|
bokeh/client/util.py
|
adsbxchange/bokeh
|
47aa8f8420944c47e876c1c36be182d257c14b87
|
[
"BSD-3-Clause"
] | null | null | null |
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2017, Anaconda, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
''' Internal utility functions used by ``bokeh.client``
'''
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
log = logging.getLogger(__name__)
from bokeh.util.api import public, internal ; public, internal
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
# External imports
# Bokeh imports
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Public API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Internal API
#-----------------------------------------------------------------------------
@internal((1,0,0))
def server_url_for_websocket_url(url):
''' Convert an ``ws(s)`` URL for a Bokeh server into the appropriate
``http(s)`` URL for the websocket endpoint.
Args:
url (str):
An ``ws(s)`` URL ending in ``/ws``
Returns:
str:
The corresponding ``http(s)`` URL.
Raises:
ValueError:
If the input URL is not of the proper form.
'''
if url.startswith("ws:"):
reprotocoled = "http" + url[2:]
elif url.startswith("wss:"):
reprotocoled = "https" + url[3:]
else:
raise ValueError("URL has non-websocket protocol " + url)
if not reprotocoled.endswith("/ws"):
raise ValueError("websocket URL does not end in /ws")
return reprotocoled[:-2]
@internal((1,0,0))
def websocket_url_for_server_url(url):
''' Convert an ``http(s)`` URL for a Bokeh server websocket endpoint into
the appropriate ``ws(s)`` URL
Args:
url (str):
An ``http(s)`` URL
Returns:
str:
The corresponding ``ws(s)`` URL ending in ``/ws``
Raises:
ValueError:
If the input URL is not of the proper form.
'''
if url.startswith("http:"):
reprotocoled = "ws" + url[4:]
elif url.startswith("https:"):
reprotocoled = "wss" + url[5:]
else:
raise ValueError("URL has unknown protocol " + url)
if reprotocoled.endswith("/"):
return reprotocoled + "ws"
else:
return reprotocoled + "/ws"
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| 31.574074 | 82 | 0.389443 | 271 | 3,410 | 4.830258 | 0.383764 | 0.024446 | 0.018335 | 0.016807 | 0.210848 | 0.151261 | 0.097785 | 0.097785 | 0.097785 | 0.097785 | 0 | 0.006702 | 0.168622 | 3,410 | 107 | 83 | 31.869159 | 0.455026 | 0.65044 | 0 | 0.185185 | 0 | 0 | 0.123928 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.074074 | false | 0 | 0.111111 | 0 | 0.296296 | 0.037037 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
0
| 1 |
d9f04eac1f39d4c14950ae0caf3dff21f18defd4
| 84,990 |
py
|
Python
|
source/browseMode.py
|
neal-hub/nvda-test
|
4c3a67b2eafa9721c5de3f671d10e60ab2d43865
|
[
"bzip2-1.0.6"
] | 1 |
2022-02-20T23:10:39.000Z
|
2022-02-20T23:10:39.000Z
|
source/browseMode.py
|
neal-hub/nvda-test
|
4c3a67b2eafa9721c5de3f671d10e60ab2d43865
|
[
"bzip2-1.0.6"
] | null | null | null |
source/browseMode.py
|
neal-hub/nvda-test
|
4c3a67b2eafa9721c5de3f671d10e60ab2d43865
|
[
"bzip2-1.0.6"
] | null | null | null |
# A part of NonVisual Desktop Access (NVDA)
# Copyright (C) 2007-2021 NV Access Limited, Babbage B.V., James Teh, Leonard de Ruijter,
# Thomas Stivers, Accessolutions, Julien Cochuyt
# This file is covered by the GNU General Public License.
# See the file COPYING for more details.
from typing import Any, Callable, Union
import os
import itertools
import collections
import winsound
import time
import weakref
import wx
import core
from logHandler import log
import documentBase
import review
import scriptHandler
import eventHandler
import nvwave
import queueHandler
import gui
import ui
import cursorManager
from scriptHandler import script, isScriptWaiting, willSayAllResume
import aria
import controlTypes
from controlTypes import OutputReason
import config
import textInfos
import braille
import vision
import speech
from speech import sayAll
import treeInterceptorHandler
import inputCore
import api
import gui.guiHelper
from gui.dpiScalingHelper import DpiScalingHelperMixinWithoutInit
from NVDAObjects import NVDAObject
import gui.contextHelp
from abc import ABCMeta, abstractmethod
import globalVars
from typing import Optional
def reportPassThrough(treeInterceptor,onlyIfChanged=True):
"""Reports the pass through mode if it has changed.
@param treeInterceptor: The current Browse Mode treeInterceptor.
@type treeInterceptor: L{BrowseModeTreeInterceptor}
@param onlyIfChanged: if true reporting will not happen if the last reportPassThrough reported the same thing.
@type onlyIfChanged: bool
"""
if not onlyIfChanged or treeInterceptor.passThrough != reportPassThrough.last:
if config.conf["virtualBuffers"]["passThroughAudioIndication"]:
sound = "focusMode.wav" if treeInterceptor.passThrough else "browseMode.wav"
nvwave.playWaveFile(os.path.join(globalVars.appDir, "waves", sound))
else:
if treeInterceptor.passThrough:
# Translators: The mode to interact with controls in documents
ui.message(_("Focus mode"))
else:
# Translators: The mode that presents text in a flat representation
# that can be navigated with the cursor keys like in a text document
ui.message(_("Browse mode"))
reportPassThrough.last = treeInterceptor.passThrough
reportPassThrough.last = False
def mergeQuickNavItemIterators(iterators,direction="next"):
"""
Merges multiple iterators that emit L{QuickNavItem} objects, yielding them from first to last.
They are sorted using min or max (__lt__ should be implemented on the L{QuickNavItem} objects).
@param iters: the iterators you want to merge.
@type iters: sequence of iterators that emit L{QuicknavItem} objects.
@param direction: the direction these iterators are searching (e.g. next, previous)
@type direction: string
"""
finder=min if direction=="next" else max
curValues=[]
# Populate a list with all iterators and their corisponding first value
for it in iterators:
try:
val=next(it)
except StopIteration:
continue
curValues.append((it,val))
# Until all iterators have been used up,
# Find the first (minimum or maximum) of all the values,
# emit that, and update the list with the next available value for the iterator whose value was emitted.
while len(curValues)>0:
first=finder(curValues,key=lambda x: x[1])
curValues.remove(first)
it,val=first
yield val
try:
newVal=next(it)
except StopIteration:
continue
curValues.append((it,newVal))
class QuickNavItem(object, metaclass=ABCMeta):
""" Emitted by L{BrowseModeTreeInterceptor._iterNodesByType}, this represents one of many positions in a browse mode document, based on the type of item being searched for (e.g. link, heading, table etc)."""
itemType=None #: The type of items searched for (e.g. link, heading, table etc)
label=None #: The label that should represent this item in the Elements list.
isAfterSelection=False #: Is this item positioned after the caret in the document? Used by the elements list to place its own selection.
def __init__(self,itemType,document):
"""
@param itemType: the type that was searched for (e.g. link, heading, table etc)
@type itemType: string
@param document: the browse mode document this item is a part of.
@type document: L{BrowseModeTreeInterceptor}
"""
self.itemType=itemType
self.document=document
@abstractmethod
def isChild(self,parent):
"""
Is this item a child of the given parent?
This is used when representing items in a hierarchical tree structure, such as the Elements List.
@param parent: the item of whom this item may be a child of.
@type parent: L{QuickNavItem}
@return: True if this item is a child, false otherwise.
@rtype: bool
"""
raise NotImplementedError
@abstractmethod
def report(self,readUnit=None):
"""
Reports the contents of this item.
@param readUnit: the optional unit (e.g. line, paragraph) that should be used to announce the item position when moved to. If not given, then the full sise of the item is used.
@type readUnit: a L{textInfos}.UNIT_* constant.
"""
raise NotImplementedError
@abstractmethod
def moveTo(self):
"""
Moves the browse mode caret or focus to this item.
"""
raise NotImplementedError
def activate(self):
"""
Activates this item's position. E.g. follows a link, presses a button etc.
"""
raise NotImplementedError
def rename(self,newName):
"""
Renames this item with the new name.
"""
raise NotImplementedError
@property
def isRenameAllowed(self):
return False
class TextInfoQuickNavItem(QuickNavItem):
""" Represents a quick nav item in a browse mode document who's positions are represented by a L{textInfos.TextInfo}. """
def __init__(self,itemType,document,textInfo):
"""
See L{QuickNavItem.__init__} for itemType and document argument definitions.
@param textInfo: the textInfo position this item represents.
@type textInfo: L{textInfos.TextInfo}
"""
self.textInfo=textInfo
super(TextInfoQuickNavItem,self).__init__(itemType,document)
def __lt__(self,other):
return self.textInfo.compareEndPoints(other.textInfo,"startToStart")<0
@property
def obj(self):
return self.textInfo.basePosition if isinstance(self.textInfo.basePosition,NVDAObject) else None
@property
def label(self):
return self.textInfo.text.strip()
def isChild(self,parent):
if parent.textInfo.isOverlapping(self.textInfo):
return True
return False
def report(self,readUnit=None):
info=self.textInfo
# If we are dealing with a form field, ensure we don't read the whole content if it's an editable text.
if self.itemType == "formField":
if self.obj.role == controlTypes.Role.EDITABLETEXT:
readUnit = textInfos.UNIT_LINE
if readUnit:
fieldInfo = info.copy()
info.collapse()
info.move(readUnit, 1, endPoint="end")
if info.compareEndPoints(fieldInfo, "endToEnd") > 0:
# We've expanded past the end of the field, so limit to the end of the field.
info.setEndPoint(fieldInfo, "endToEnd")
speech.speakTextInfo(info, reason=OutputReason.QUICKNAV)
def activate(self):
self.textInfo.obj._activatePosition(info=self.textInfo)
def moveTo(self):
if self.document.passThrough and getattr(self, "obj", False):
if controlTypes.State.FOCUSABLE in self.obj.states:
self.obj.setFocus()
return
self.document.passThrough = False
reportPassThrough(self.document)
info = self.textInfo.copy()
info.collapse()
self.document._set_selection(info, reason=OutputReason.QUICKNAV)
@property
def isAfterSelection(self):
caret=self.document.makeTextInfo(textInfos.POSITION_CARET)
return self.textInfo.compareEndPoints(caret, "startToStart") > 0
def _getLabelForProperties(self, labelPropertyGetter: Callable[[str], Optional[Any]]):
"""
Fetches required properties for this L{TextInfoQuickNavItem} and constructs a label to be shown in an elements list.
This can be used by subclasses to implement the L{label} property.
@Param labelPropertyGetter: A callable taking 1 argument, specifying the property to fetch.
For example, if L{itemType} is landmark, the callable must return the landmark type when "landmark" is passed as the property argument.
Alternative property names might be name or value.
The callable must return None if the property doesn't exist.
An expected callable might be get method on a L{Dict},
or "lambda property: getattr(self.obj, property, None)" for an L{NVDAObject}.
"""
content = self.textInfo.text.strip()
if self.itemType == "heading":
# Output: displayed text of the heading.
return content
labelParts = None
name = labelPropertyGetter("name")
if self.itemType == "landmark":
landmark = aria.landmarkRoles.get(labelPropertyGetter("landmark"))
# Example output: main menu; navigation
labelParts = (name, landmark)
else:
role: Union[controlTypes.Role, int] = labelPropertyGetter("role")
role = controlTypes.Role(role)
roleText = role.displayString
# Translators: Reported label in the elements list for an element which which has no name and value
unlabeled = _("Unlabeled")
realStates = labelPropertyGetter("states")
labeledStates = " ".join(controlTypes.processAndLabelStates(role, realStates, OutputReason.FOCUS))
if self.itemType == "formField":
if role in (
controlTypes.Role.BUTTON,
controlTypes.Role.DROPDOWNBUTTON,
controlTypes.Role.TOGGLEBUTTON,
controlTypes.Role.SPLITBUTTON,
controlTypes.Role.MENUBUTTON,
controlTypes.Role.DROPDOWNBUTTONGRID,
controlTypes.Role.TREEVIEWBUTTON
):
# Example output: Mute; toggle button; pressed
labelParts = (content or name or unlabeled, roleText, labeledStates)
else:
# Example output: Find a repository...; edit; has auto complete; NVDA
labelParts = (name or unlabeled, roleText, labeledStates, content)
elif self.itemType in ("link", "button"):
# Example output: You have unread notifications; visited
labelParts = (content or name or unlabeled, labeledStates)
if labelParts:
label = "; ".join(lp for lp in labelParts if lp)
else:
label = content
return label
class BrowseModeTreeInterceptor(treeInterceptorHandler.TreeInterceptor):
scriptCategory = inputCore.SCRCAT_BROWSEMODE
_disableAutoPassThrough = False
APPLICATION_ROLES = (controlTypes.Role.APPLICATION, controlTypes.Role.DIALOG)
def _get_currentNVDAObject(self):
raise NotImplementedError
def _get_currentFocusableNVDAObject(self):
return self.makeTextInfo(textInfos.POSITION_CARET).focusableNVDAObjectAtStart
def event_treeInterceptor_gainFocus(self):
"""Triggered when this browse mode interceptor gains focus.
This event is only fired upon entering this treeInterceptor when it was not the current treeInterceptor before.
This is different to L{event_gainFocus}, which is fired when an object inside this treeInterceptor gains focus, even if that object is in the same treeInterceptor.
"""
reportPassThrough(self)
ALWAYS_SWITCH_TO_PASS_THROUGH_ROLES = frozenset({
controlTypes.Role.COMBOBOX,
controlTypes.Role.EDITABLETEXT,
controlTypes.Role.LIST,
controlTypes.Role.LISTITEM,
controlTypes.Role.SLIDER,
controlTypes.Role.TABCONTROL,
controlTypes.Role.MENUBAR,
controlTypes.Role.POPUPMENU,
controlTypes.Role.TREEVIEW,
controlTypes.Role.TREEVIEWITEM,
controlTypes.Role.SPINBUTTON,
controlTypes.Role.TABLEROW,
controlTypes.Role.TABLECELL,
controlTypes.Role.TABLEROWHEADER,
controlTypes.Role.TABLECOLUMNHEADER,
})
SWITCH_TO_PASS_THROUGH_ON_FOCUS_ROLES = frozenset({
controlTypes.Role.LISTITEM,
controlTypes.Role.RADIOBUTTON,
controlTypes.Role.TAB,
controlTypes.Role.MENUITEM,
controlTypes.Role.RADIOMENUITEM,
controlTypes.Role.CHECKMENUITEM,
})
IGNORE_DISABLE_PASS_THROUGH_WHEN_FOCUSED_ROLES = frozenset({
controlTypes.Role.MENUITEM,
controlTypes.Role.RADIOMENUITEM,
controlTypes.Role.CHECKMENUITEM,
controlTypes.Role.TABLECELL,
})
def shouldPassThrough(self, obj, reason: Optional[OutputReason] = None):
"""Determine whether pass through mode should be enabled (focus mode) or disabled (browse mode) for a given object.
@param obj: The object in question.
@type obj: L{NVDAObjects.NVDAObject}
@param reason: The reason for this query;
one of the output reasons, or C{None} for manual pass through mode activation by the user.
@return: C{True} if pass through mode (focus mode) should be enabled, C{False} if it should be disabled (browse mode).
"""
if reason and (
self.disableAutoPassThrough
or (reason == OutputReason.FOCUS and not config.conf["virtualBuffers"]["autoPassThroughOnFocusChange"])
or (reason == OutputReason.CARET and not config.conf["virtualBuffers"]["autoPassThroughOnCaretMove"])
):
# This check relates to auto pass through and auto pass through is disabled, so don't change the pass through state.
return self.passThrough
if reason == OutputReason.QUICKNAV:
return False
states = obj.states
role = obj.role
if controlTypes.State.EDITABLE in states and controlTypes.State.UNAVAILABLE not in states:
return True
# Menus sometimes get focus due to menuStart events even though they don't report as focused/focusable.
if not obj.isFocusable and controlTypes.State.FOCUSED not in states and role != controlTypes.Role.POPUPMENU:
return False
# many controls that are read-only should not switch to passThrough.
# However, there are exceptions.
if controlTypes.State.READONLY in states:
# #13221: For Slack message lists, and the MS Edge downloads window, switch to passthrough
# even though the list item and list are read-only, but focusable.
if (
role == controlTypes.Role.LISTITEM and controlTypes.State.FOCUSED in states
and obj.parent.role == controlTypes.Role.LIST and controlTypes.State.FOCUSABLE in obj.parent.states
):
return True
# Certain controls such as combo boxes and readonly edits are read-only but still interactive.
# #5118: read-only ARIA grids should also be allowed (focusable table cells, rows and headers).
if role not in (
controlTypes.Role.EDITABLETEXT, controlTypes.Role.COMBOBOX, controlTypes.Role.TABLEROW,
controlTypes.Role.TABLECELL, controlTypes.Role.TABLEROWHEADER, controlTypes.Role.TABLECOLUMNHEADER
):
return False
# Any roles or states for which we always switch to passThrough
if role in self.ALWAYS_SWITCH_TO_PASS_THROUGH_ROLES or controlTypes.State.EDITABLE in states:
return True
# focus is moving to this control. Perhaps after pressing tab or clicking a button that brings up a menu (via javascript)
if reason == OutputReason.FOCUS:
if role in self.SWITCH_TO_PASS_THROUGH_ON_FOCUS_ROLES:
return True
# If this is a focus change, pass through should be enabled for certain ancestor containers.
# this is done last for performance considerations. Walking up the through the parents could be costly
while obj and obj != self.rootNVDAObject:
if obj.role == controlTypes.Role.TOOLBAR:
return True
obj = obj.parent
return False
def _get_shouldTrapNonCommandGestures(self):
return config.conf['virtualBuffers']['trapNonCommandGestures']
def script_trapNonCommandGesture(self,gesture):
winsound.PlaySound("default",1)
singleLetterNavEnabled=True #: Whether single letter navigation scripts should be active (true) or if these letters should fall to the application.
def getAlternativeScript(self,gesture,script):
if self.passThrough or not gesture.isCharacter:
return script
if not self.singleLetterNavEnabled:
return None
if not script and self.shouldTrapNonCommandGestures:
script=self.script_trapNonCommandGesture
return script
def script_toggleSingleLetterNav(self,gesture):
if self.singleLetterNavEnabled:
self.singleLetterNavEnabled=False
# Translators: Reported when single letter navigation in browse mode is turned off.
ui.message(_("Single letter navigation off"))
else:
self.singleLetterNavEnabled=True
# Translators: Reported when single letter navigation in browse mode is turned on.
ui.message(_("Single letter navigation on"))
# Translators: the description for the toggleSingleLetterNavigation command in browse mode.
script_toggleSingleLetterNav.__doc__=_("Toggles single letter navigation on and off. When on, single letter keys in browse mode jump to various kinds of elements on the page. When off, these keys are passed to the application")
def _get_ElementsListDialog(self):
return ElementsListDialog
def _iterNodesByType(self,itemType,direction="next",pos=None):
"""
Yields L{QuickNavItem} objects representing the ordered positions in this document according to the type being searched for (e.g. link, heading, table etc).
@param itemType: the type being searched for (e.g. link, heading, table etc)
@type itemType: string
@param direction: the direction in which to search (next, previous, up)
@type direction: string
@param pos: the position in the document from where to start the search.
@type pos: Usually an L{textInfos.TextInfo}
@raise NotImplementedError: This type is not supported by this BrowseMode implementation
"""
raise NotImplementedError
def _iterNotLinkBlock(self, direction="next", pos=None):
raise NotImplementedError
def _quickNavScript(self,gesture, itemType, direction, errorMessage, readUnit):
if itemType=="notLinkBlock":
iterFactory=self._iterNotLinkBlock
else:
iterFactory=lambda direction,info: self._iterNodesByType(itemType,direction,info)
info=self.selection
try:
item = next(iterFactory(direction, info))
except NotImplementedError:
# Translators: a message when a particular quick nav command is not supported in the current document.
ui.message(_("Not supported in this document"))
return
except StopIteration:
ui.message(errorMessage)
return
# #8831: Report before moving because moving might change the focus, which
# might mutate the document, potentially invalidating info if it is
# offset-based.
if not gesture or not willSayAllResume(gesture):
item.report(readUnit=readUnit)
item.moveTo()
@classmethod
def addQuickNav(
cls,
itemType: str,
key: Optional[str],
nextDoc: str,
nextError: str,
prevDoc: str,
prevError: str,
readUnit: Optional[str] = None
):
"""Adds a script for the given quick nav item.
@param itemType: The type of item, I.E. "heading" "Link" ...
@param key: The quick navigation key to bind to the script.
Shift is automatically added for the previous item gesture. E.G. h for heading.
If C{None} is provided, the script is unbound by default.
@param nextDoc: The command description to bind to the script that yields the next quick nav item.
@param nextError: The error message if there are no more quick nav items of type itemType in this direction.
@param prevDoc: The command description to bind to the script that yields the previous quick nav item.
@param prevError: The error message if there are no more quick nav items of type itemType in this direction.
@param readUnit: The unit (one of the textInfos.UNIT_* constants) to announce when moving to this type of item.
For example, only the line is read when moving to tables to avoid reading a potentially massive table.
If None, the entire item will be announced.
"""
scriptSuffix = itemType[0].upper() + itemType[1:]
scriptName = "next%s" % scriptSuffix
funcName = "script_%s" % scriptName
script = lambda self,gesture: self._quickNavScript(gesture, itemType, "next", nextError, readUnit)
script.__doc__ = nextDoc
script.__name__ = funcName
script.resumeSayAllMode = sayAll.CURSOR.CARET
setattr(cls, funcName, script)
if key is not None:
cls.__gestures["kb:%s" % key] = scriptName
scriptName = "previous%s" % scriptSuffix
funcName = "script_%s" % scriptName
script = lambda self,gesture: self._quickNavScript(gesture, itemType, "previous", prevError, readUnit)
script.__doc__ = prevDoc
script.__name__ = funcName
script.resumeSayAllMode = sayAll.CURSOR.CARET
setattr(cls, funcName, script)
if key is not None:
cls.__gestures["kb:shift+%s" % key] = scriptName
def script_elementsList(self, gesture):
# We need this to be a modal dialog, but it mustn't block this script.
def run():
gui.mainFrame.prePopup()
d = self.ElementsListDialog(self)
d.ShowModal()
d.Destroy()
gui.mainFrame.postPopup()
wx.CallAfter(run)
# Translators: the description for the Elements List command in browse mode.
script_elementsList.__doc__ = _("Lists various types of elements in this document")
script_elementsList.ignoreTreeInterceptorPassThrough = True
def _activateNVDAObject(self, obj):
"""Activate an object in response to a user request.
This should generally perform the default action or click on the object.
@param obj: The object to activate.
@type obj: L{NVDAObjects.NVDAObject}
"""
try:
obj.doAction()
except NotImplementedError:
log.debugWarning("doAction not implemented")
def _activatePosition(self, obj=None):
if not obj:
obj=self.currentNVDAObject
if not obj:
return
if obj.role == controlTypes.Role.MATH:
import mathPres
try:
return mathPres.interactWithMathMl(obj.mathMl)
except (NotImplementedError, LookupError):
pass
return
if self.shouldPassThrough(obj):
obj.setFocus()
self.passThrough = True
reportPassThrough(self)
elif obj.role == controlTypes.Role.EMBEDDEDOBJECT or obj.role in self.APPLICATION_ROLES:
obj.setFocus()
speech.speakObject(obj, reason=OutputReason.FOCUS)
else:
self._activateNVDAObject(obj)
def script_activatePosition(self,gesture):
if config.conf["virtualBuffers"]["autoFocusFocusableElements"]:
self._activatePosition()
else:
self._focusLastFocusableObject(activatePosition=True)
# Translators: the description for the activatePosition script on browseMode documents.
script_activatePosition.__doc__ = _("Activates the current object in the document")
def _focusLastFocusableObject(self, activatePosition=False):
"""Used when auto focus focusable elements is disabled to sync the focus
to the browse mode cursor.
When auto focus focusable elements is disabled, NVDA doesn't focus elements
as the user moves the browse mode cursor. However, there are some cases
where the user always wants to interact with the focus; e.g. if they press
the applications key to open the context menu. In these cases, this method
is called first to sync the focus to the browse mode cursor.
"""
obj = self.currentFocusableNVDAObject
if obj!=self.rootNVDAObject and self._shouldSetFocusToObj(obj) and obj!= api.getFocusObject():
obj.setFocus()
# We might be about to activate or pass through a key which will cause
# this object to change (e.g. checking a check box). However, we won't
# actually get the focus event until after the change has occurred.
# Therefore, we must cache properties for speech before the change occurs.
speech.speakObject(obj, OutputReason.ONLYCACHE)
self._objPendingFocusBeforeActivate = obj
if activatePosition:
# Make sure we activate the object at the caret, which is not necessarily focusable.
self._activatePosition()
def script_passThrough(self,gesture):
if not config.conf["virtualBuffers"]["autoFocusFocusableElements"]:
self._focusLastFocusableObject()
gesture.send()
# Translators: the description for the passThrough script on browseMode documents.
script_passThrough.__doc__ = _("Passes gesture through to the application")
def script_disablePassThrough(self, gesture):
if not self.passThrough or self.disableAutoPassThrough:
return gesture.send()
# #3215 ARIA menus should get the Escape key unconditionally so they can handle it without invoking browse mode first
obj = api.getFocusObject()
if obj and obj.role in self.IGNORE_DISABLE_PASS_THROUGH_WHEN_FOCUSED_ROLES:
return gesture.send()
self.passThrough = False
self.disableAutoPassThrough = False
reportPassThrough(self)
script_disablePassThrough.ignoreTreeInterceptorPassThrough = True
def _set_disableAutoPassThrough(self, state):
# If the user manually switches to focus mode with NVDA+space, that enables
# pass-through and disables auto pass-through. If auto focusing of focusable
# elements is disabled, NVDA won't have synced the focus to the browse mode
# cursor. However, since the user is switching to focus mode, they probably
# want to interact with the focus, so sync the focus here.
if (
state
and not config.conf["virtualBuffers"]["autoFocusFocusableElements"]
and self.passThrough
):
self._focusLastFocusableObject()
self._disableAutoPassThrough = state
def _get_disableAutoPassThrough(self):
return self._disableAutoPassThrough
__gestures={
"kb:NVDA+f7": "elementsList",
"kb:enter": "activatePosition",
"kb:numpadEnter": "activatePosition",
"kb:space": "activatePosition",
"kb:NVDA+shift+space":"toggleSingleLetterNav",
"kb:escape": "disablePassThrough",
"kb:control+enter": "passThrough",
"kb:control+numpadEnter": "passThrough",
"kb:shift+enter": "passThrough",
"kb:shift+numpadEnter": "passThrough",
"kb:control+shift+enter": "passThrough",
"kb:control+shift+numpadEnter": "passThrough",
"kb:alt+enter": "passThrough",
"kb:alt+numpadEnter": "passThrough",
"kb:applications": "passThrough",
"kb:shift+applications": "passThrough",
"kb:shift+f10": "passThrough",
}
# Add quick navigation scripts.
qn = BrowseModeTreeInterceptor.addQuickNav
qn("heading", key="h",
# Translators: Input help message for a quick navigation command in browse mode.
nextDoc=_("moves to the next heading"),
# Translators: Message presented when the browse mode element is not found.
nextError=_("no next heading"),
# Translators: Input help message for a quick navigation command in browse mode.
prevDoc=_("moves to the previous heading"),
# Translators: Message presented when the browse mode element is not found.
prevError=_("no previous heading"))
qn("heading1", key="1",
# Translators: Input help message for a quick navigation command in browse mode.
nextDoc=_("moves to the next heading at level 1"),
# Translators: Message presented when the browse mode element is not found.
nextError=_("no next heading at level 1"),
# Translators: Input help message for a quick navigation command in browse mode.
prevDoc=_("moves to the previous heading at level 1"),
# Translators: Message presented when the browse mode element is not found.
prevError=_("no previous heading at level 1"))
qn("heading2", key="2",
# Translators: Input help message for a quick navigation command in browse mode.
nextDoc=_("moves to the next heading at level 2"),
# Translators: Message presented when the browse mode element is not found.
nextError=_("no next heading at level 2"),
# Translators: Input help message for a quick navigation command in browse mode.
prevDoc=_("moves to the previous heading at level 2"),
# Translators: Message presented when the browse mode element is not found.
prevError=_("no previous heading at level 2"))
qn("heading3", key="3",
# Translators: Input help message for a quick navigation command in browse mode.
nextDoc=_("moves to the next heading at level 3"),
# Translators: Message presented when the browse mode element is not found.
nextError=_("no next heading at level 3"),
# Translators: Input help message for a quick navigation command in browse mode.
prevDoc=_("moves to the previous heading at level 3"),
# Translators: Message presented when the browse mode element is not found.
prevError=_("no previous heading at level 3"))
qn("heading4", key="4",
# Translators: Input help message for a quick navigation command in browse mode.
nextDoc=_("moves to the next heading at level 4"),
# Translators: Message presented when the browse mode element is not found.
nextError=_("no next heading at level 4"),
# Translators: Input help message for a quick navigation command in browse mode.
prevDoc=_("moves to the previous heading at level 4"),
# Translators: Message presented when the browse mode element is not found.
prevError=_("no previous heading at level 4"))
qn("heading5", key="5",
# Translators: Input help message for a quick navigation command in browse mode.
nextDoc=_("moves to the next heading at level 5"),
# Translators: Message presented when the browse mode element is not found.
nextError=_("no next heading at level 5"),
# Translators: Input help message for a quick navigation command in browse mode.
prevDoc=_("moves to the previous heading at level 5"),
# Translators: Message presented when the browse mode element is not found.
prevError=_("no previous heading at level 5"))
qn("heading6", key="6",
# Translators: Input help message for a quick navigation command in browse mode.
nextDoc=_("moves to the next heading at level 6"),
# Translators: Message presented when the browse mode element is not found.
nextError=_("no next heading at level 6"),
# Translators: Input help message for a quick navigation command in browse mode.
prevDoc=_("moves to the previous heading at level 6"),
# Translators: Message presented when the browse mode element is not found.
prevError=_("no previous heading at level 6"))
qn("table", key="t",
# Translators: Input help message for a quick navigation command in browse mode.
nextDoc=_("moves to the next table"),
# Translators: Message presented when the browse mode element is not found.
nextError=_("no next table"),
# Translators: Input help message for a quick navigation command in browse mode.
prevDoc=_("moves to the previous table"),
# Translators: Message presented when the browse mode element is not found.
prevError=_("no previous table"),
readUnit=textInfos.UNIT_LINE)
qn("link", key="k",
# Translators: Input help message for a quick navigation command in browse mode.
nextDoc=_("moves to the next link"),
# Translators: Message presented when the browse mode element is not found.
nextError=_("no next link"),
# Translators: Input help message for a quick navigation command in browse mode.
prevDoc=_("moves to the previous link"),
# Translators: Message presented when the browse mode element is not found.
prevError=_("no previous link"))
qn("visitedLink", key="v",
# Translators: Input help message for a quick navigation command in browse mode.
nextDoc=_("moves to the next visited link"),
# Translators: Message presented when the browse mode element is not found.
nextError=_("no next visited link"),
# Translators: Input help message for a quick navigation command in browse mode.
prevDoc=_("moves to the previous visited link"),
# Translators: Message presented when the browse mode element is not found.
prevError=_("no previous visited link"))
qn("unvisitedLink", key="u",
# Translators: Input help message for a quick navigation command in browse mode.
nextDoc=_("moves to the next unvisited link"),
# Translators: Message presented when the browse mode element is not found.
nextError=_("no next unvisited link"),
# Translators: Input help message for a quick navigation command in browse mode.
prevDoc=_("moves to the previous unvisited link"),
# Translators: Message presented when the browse mode element is not found.
prevError=_("no previous unvisited link"))
qn("formField", key="f",
# Translators: Input help message for a quick navigation command in browse mode.
nextDoc=_("moves to the next form field"),
# Translators: Message presented when the browse mode element is not found.
nextError=_("no next form field"),
# Translators: Input help message for a quick navigation command in browse mode.
prevDoc=_("moves to the previous form field"),
# Translators: Message presented when the browse mode element is not found.
prevError=_("no previous form field"))
qn("list", key="l",
# Translators: Input help message for a quick navigation command in browse mode.
nextDoc=_("moves to the next list"),
# Translators: Message presented when the browse mode element is not found.
nextError=_("no next list"),
# Translators: Input help message for a quick navigation command in browse mode.
prevDoc=_("moves to the previous list"),
# Translators: Message presented when the browse mode element is not found.
prevError=_("no previous list"),
readUnit=textInfos.UNIT_LINE)
qn("listItem", key="i",
# Translators: Input help message for a quick navigation command in browse mode.
nextDoc=_("moves to the next list item"),
# Translators: Message presented when the browse mode element is not found.
nextError=_("no next list item"),
# Translators: Input help message for a quick navigation command in browse mode.
prevDoc=_("moves to the previous list item"),
# Translators: Message presented when the browse mode element is not found.
prevError=_("no previous list item"))
qn("button", key="b",
# Translators: Input help message for a quick navigation command in browse mode.
nextDoc=_("moves to the next button"),
# Translators: Message presented when the browse mode element is not found.
nextError=_("no next button"),
# Translators: Input help message for a quick navigation command in browse mode.
prevDoc=_("moves to the previous button"),
# Translators: Message presented when the browse mode element is not found.
prevError=_("no previous button"))
qn("edit", key="e",
# Translators: Input help message for a quick navigation command in browse mode.
nextDoc=_("moves to the next edit field"),
# Translators: Message presented when the browse mode element is not found.
nextError=_("no next edit field"),
# Translators: Input help message for a quick navigation command in browse mode.
prevDoc=_("moves to the previous edit field"),
# Translators: Message presented when the browse mode element is not found.
prevError=_("no previous edit field"),
readUnit=textInfos.UNIT_LINE)
qn("frame", key="m",
# Translators: Input help message for a quick navigation command in browse mode.
nextDoc=_("moves to the next frame"),
# Translators: Message presented when the browse mode element is not found.
nextError=_("no next frame"),
# Translators: Input help message for a quick navigation command in browse mode.
prevDoc=_("moves to the previous frame"),
# Translators: Message presented when the browse mode element is not found.
prevError=_("no previous frame"),
readUnit=textInfos.UNIT_LINE)
qn("separator", key="s",
# Translators: Input help message for a quick navigation command in browse mode.
nextDoc=_("moves to the next separator"),
# Translators: Message presented when the browse mode element is not found.
nextError=_("no next separator"),
# Translators: Input help message for a quick navigation command in browse mode.
prevDoc=_("moves to the previous separator"),
# Translators: Message presented when the browse mode element is not found.
prevError=_("no previous separator"))
qn("radioButton", key="r",
# Translators: Input help message for a quick navigation command in browse mode.
nextDoc=_("moves to the next radio button"),
# Translators: Message presented when the browse mode element is not found.
nextError=_("no next radio button"),
# Translators: Input help message for a quick navigation command in browse mode.
prevDoc=_("moves to the previous radio button"),
# Translators: Message presented when the browse mode element is not found.
prevError=_("no previous radio button"))
qn("comboBox", key="c",
# Translators: Input help message for a quick navigation command in browse mode.
nextDoc=_("moves to the next combo box"),
# Translators: Message presented when the browse mode element is not found.
nextError=_("no next combo box"),
# Translators: Input help message for a quick navigation command in browse mode.
prevDoc=_("moves to the previous combo box"),
# Translators: Message presented when the browse mode element is not found.
prevError=_("no previous combo box"))
qn("checkBox", key="x",
# Translators: Input help message for a quick navigation command in browse mode.
nextDoc=_("moves to the next check box"),
# Translators: Message presented when the browse mode element is not found.
nextError=_("no next check box"),
# Translators: Input help message for a quick navigation command in browse mode.
prevDoc=_("moves to the previous check box"),
# Translators: Message presented when the browse mode element is not found.
prevError=_("no previous check box"))
qn("graphic", key="g",
# Translators: Input help message for a quick navigation command in browse mode.
nextDoc=_("moves to the next graphic"),
# Translators: Message presented when the browse mode element is not found.
nextError=_("no next graphic"),
# Translators: Input help message for a quick navigation command in browse mode.
prevDoc=_("moves to the previous graphic"),
# Translators: Message presented when the browse mode element is not found.
prevError=_("no previous graphic"))
qn("blockQuote", key="q",
# Translators: Input help message for a quick navigation command in browse mode.
nextDoc=_("moves to the next block quote"),
# Translators: Message presented when the browse mode element is not found.
nextError=_("no next block quote"),
# Translators: Input help message for a quick navigation command in browse mode.
prevDoc=_("moves to the previous block quote"),
# Translators: Message presented when the browse mode element is not found.
prevError=_("no previous block quote"))
qn("notLinkBlock", key="n",
# Translators: Input help message for a quick navigation command in browse mode.
nextDoc=_("skips forward past a block of links"),
# Translators: Message presented when the browse mode element is not found.
nextError=_("no more text after a block of links"),
# Translators: Input help message for a quick navigation command in browse mode.
prevDoc=_("skips backward past a block of links"),
# Translators: Message presented when the browse mode element is not found.
prevError=_("no more text before a block of links"),
readUnit=textInfos.UNIT_LINE)
qn("landmark", key="d",
# Translators: Input help message for a quick navigation command in browse mode.
nextDoc=_("moves to the next landmark"),
# Translators: Message presented when the browse mode element is not found.
nextError=_("no next landmark"),
# Translators: Input help message for a quick navigation command in browse mode.
prevDoc=_("moves to the previous landmark"),
# Translators: Message presented when the browse mode element is not found.
prevError=_("no previous landmark"),
readUnit=textInfos.UNIT_LINE)
qn("embeddedObject", key="o",
# Translators: Input help message for a quick navigation command in browse mode.
nextDoc=_("moves to the next embedded object"),
# Translators: Message presented when the browse mode element is not found.
nextError=_("no next embedded object"),
# Translators: Input help message for a quick navigation command in browse mode.
prevDoc=_("moves to the previous embedded object"),
# Translators: Message presented when the browse mode element is not found.
prevError=_("no previous embedded object"))
qn("annotation", key="a",
# Translators: Input help message for a quick navigation command in browse mode.
nextDoc=_("moves to the next annotation"),
# Translators: Message presented when the browse mode element is not found.
nextError=_("no next annotation"),
# Translators: Input help message for a quick navigation command in browse mode.
prevDoc=_("moves to the previous annotation"),
# Translators: Message presented when the browse mode element is not found.
prevError=_("no previous annotation"))
qn("error", key="w",
# Translators: Input help message for a quick navigation command in browse mode.
nextDoc=_("moves to the next error"),
# Translators: Message presented when the browse mode element is not found.
nextError=_("no next error"),
# Translators: Input help message for a quick navigation command in browse mode.
prevDoc=_("moves to the previous error"),
# Translators: Message presented when the browse mode element is not found.
prevError=_("no previous error"))
qn(
"article", key=None,
# Translators: Input help message for a quick navigation command in browse mode.
nextDoc=_("moves to the next article"),
# Translators: Message presented when the browse mode element is not found.
nextError=_("no next article"),
# Translators: Input help message for a quick navigation command in browse mode.
prevDoc=_("moves to the previous article"),
# Translators: Message presented when the browse mode element is not found.
prevError=_("no previous article")
)
qn(
"grouping", key=None,
# Translators: Input help message for a quick navigation command in browse mode.
nextDoc=_("moves to the next grouping"),
# Translators: Message presented when the browse mode element is not found.
nextError=_("no next grouping"),
# Translators: Input help message for a quick navigation command in browse mode.
prevDoc=_("moves to the previous grouping"),
# Translators: Message presented when the browse mode element is not found.
prevError=_("no previous grouping")
)
del qn
class ElementsListDialog(
DpiScalingHelperMixinWithoutInit,
gui.contextHelp.ContextHelpMixin,
wx.Dialog # wxPython does not seem to call base class initializer, put last in MRO
):
helpId = "ElementsList"
ELEMENT_TYPES = (
# Translators: The label of a radio button to select the type of element
# in the browse mode Elements List dialog.
("link", _("Lin&ks")),
# Translators: The label of a radio button to select the type of element
# in the browse mode Elements List dialog.
("heading", _("&Headings")),
# Translators: The label of a radio button to select the type of element
# in the browse mode Elements List dialog.
("formField", _("&Form fields")),
# Translators: The label of a radio button to select the type of element
# in the browse mode Elements List dialog.
("button", _("&Buttons")),
# Translators: The label of a radio button to select the type of element
# in the browse mode Elements List dialog.
("landmark", _("Lan&dmarks")),
)
Element = collections.namedtuple("Element", ("item", "parent"))
lastSelectedElementType=0
def __init__(self, document):
super().__init__(
parent=gui.mainFrame,
# Translators: The title of the browse mode Elements List dialog.
title=_("Elements List")
)
self.document = document
mainSizer = wx.BoxSizer(wx.VERTICAL)
contentsSizer = wx.BoxSizer(wx.VERTICAL)
# Translators: The label of a group of radio buttons to select the type of element
# in the browse mode Elements List dialog.
child = wx.RadioBox(self, wx.ID_ANY, label=_("Type:"), choices=tuple(et[1] for et in self.ELEMENT_TYPES))
child.SetSelection(self.lastSelectedElementType)
child.Bind(wx.EVT_RADIOBOX, self.onElementTypeChange)
contentsSizer.Add(child, flag=wx.EXPAND)
contentsSizer.AddSpacer(gui.guiHelper.SPACE_BETWEEN_VERTICAL_DIALOG_ITEMS)
self.tree = wx.TreeCtrl(
self,
size=self.scaleSize((500, 300)), # height is chosen to ensure the dialog will fit on an 800x600 screen
style=wx.TR_HAS_BUTTONS | wx.TR_HIDE_ROOT | wx.TR_LINES_AT_ROOT | wx.TR_SINGLE | wx.TR_EDIT_LABELS
)
self.tree.Bind(wx.EVT_SET_FOCUS, self.onTreeSetFocus)
self.tree.Bind(wx.EVT_CHAR, self.onTreeChar)
self.tree.Bind(wx.EVT_TREE_BEGIN_LABEL_EDIT, self.onTreeLabelEditBegin)
self.tree.Bind(wx.EVT_TREE_END_LABEL_EDIT, self.onTreeLabelEditEnd)
self.treeRoot = self.tree.AddRoot("root")
contentsSizer.Add(self.tree,flag=wx.EXPAND)
contentsSizer.AddSpacer(gui.guiHelper.SPACE_BETWEEN_VERTICAL_DIALOG_ITEMS)
# Translators: The label of an editable text field to filter the elements
# in the browse mode Elements List dialog.
filterText = _("Filter b&y:")
labeledCtrl = gui.guiHelper.LabeledControlHelper(self, filterText, wx.TextCtrl)
self.filterEdit = labeledCtrl.control
self.filterEdit.Bind(wx.EVT_TEXT, self.onFilterEditTextChange)
contentsSizer.Add(labeledCtrl.sizer)
contentsSizer.AddSpacer(gui.guiHelper.SPACE_BETWEEN_VERTICAL_DIALOG_ITEMS)
bHelper = gui.guiHelper.ButtonHelper(wx.HORIZONTAL)
# Translators: The label of a button to activate an element in the browse mode Elements List dialog.
# Beware not to set an accelerator that would collide with other controls in this dialog, such as an
# element type radio label.
self.activateButton = bHelper.addButton(self, label=_("Activate"))
self.activateButton.Bind(wx.EVT_BUTTON, lambda evt: self.onAction(True))
# Translators: The label of a button to move to an element
# in the browse mode Elements List dialog.
self.moveButton = bHelper.addButton(self, label=_("&Move to"))
self.moveButton.Bind(wx.EVT_BUTTON, lambda evt: self.onAction(False))
bHelper.addButton(self, id=wx.ID_CANCEL)
contentsSizer.Add(bHelper.sizer, flag=wx.ALIGN_RIGHT)
mainSizer.Add(contentsSizer, border=gui.guiHelper.BORDER_FOR_DIALOGS, flag=wx.ALL)
mainSizer.Fit(self)
self.SetSizer(mainSizer)
self.tree.SetFocus()
self.initElementType(self.ELEMENT_TYPES[self.lastSelectedElementType][0])
self.CentreOnScreen()
def onElementTypeChange(self, evt):
elementType=evt.GetInt()
# We need to make sure this gets executed after the focus event.
# Otherwise, NVDA doesn't seem to get the event.
queueHandler.queueFunction(queueHandler.eventQueue, self.initElementType, self.ELEMENT_TYPES[elementType][0])
self.lastSelectedElementType=elementType
def initElementType(self, elType):
if elType in ("link","button"):
# Links and buttons can be activated.
self.activateButton.Enable()
self.SetAffirmativeId(self.activateButton.GetId())
else:
# No other element type can be activated.
self.activateButton.Disable()
self.SetAffirmativeId(self.moveButton.GetId())
# Gather the elements of this type.
self._elements = []
self._initialElement = None
parentElements = []
isAfterSelection=False
for item in self.document._iterNodesByType(elType):
# Find the parent element, if any.
for parent in reversed(parentElements):
if item.isChild(parent.item):
break
else:
# We're not a child of this parent, so this parent has no more children and can be removed from the stack.
parentElements.pop()
else:
# No parent found, so we're at the root.
# Note that parentElements will be empty at this point, as all parents are no longer relevant and have thus been removed from the stack.
parent = None
element=self.Element(item,parent)
self._elements.append(element)
if not isAfterSelection:
isAfterSelection=item.isAfterSelection
if not isAfterSelection:
# The element immediately preceding or overlapping the caret should be the initially selected element.
# Since we have not yet passed the selection, use this as the initial element.
try:
self._initialElement = self._elements[-1]
except IndexError:
# No previous element.
pass
# This could be the parent of a subsequent element, so add it to the parents stack.
parentElements.append(element)
# Start with no filtering.
self.filterEdit.ChangeValue("")
self.filter("", newElementType=True)
def filter(self, filterText, newElementType=False):
# If this is a new element type, use the element nearest the cursor.
# Otherwise, use the currently selected element.
# #8753: wxPython 4 returns "invalid tree item" when the tree view is empty, so use initial element if appropriate.
try:
defaultElement = self._initialElement if newElementType else self.tree.GetItemData(self.tree.GetSelection())
except:
defaultElement = self._initialElement
# Clear the tree.
self.tree.DeleteChildren(self.treeRoot)
# Populate the tree with elements matching the filter text.
elementsToTreeItems = {}
defaultItem = None
matched = False
#Do case-insensitive matching by lowering both filterText and each element's text.
filterText=filterText.lower()
for element in self._elements:
label=element.item.label
if filterText and filterText not in label.lower():
continue
matched = True
parent = element.parent
if parent:
parent = elementsToTreeItems.get(parent)
item = self.tree.AppendItem(parent or self.treeRoot, label)
self.tree.SetItemData(item, element)
elementsToTreeItems[element] = item
if element == defaultElement:
defaultItem = item
self.tree.ExpandAll()
if not matched:
# No items, so disable the buttons.
self.activateButton.Disable()
self.moveButton.Disable()
return
# If there's no default item, use the first item in the tree.
self.tree.SelectItem(defaultItem or self.tree.GetFirstChild(self.treeRoot)[0])
# Enable the button(s).
# If the activate button isn't the default button, it is disabled for this element type and shouldn't be enabled here.
if self.AffirmativeId == self.activateButton.Id:
self.activateButton.Enable()
self.moveButton.Enable()
def onTreeSetFocus(self, evt):
# Start with no search.
self._searchText = ""
self._searchCallLater = None
evt.Skip()
def onTreeChar(self, evt):
key = evt.KeyCode
if key == wx.WXK_RETURN:
# The enter key should be propagated to the dialog and thus activate the default button,
# but this is broken (wx ticket #3725).
# Therefore, we must catch the enter key here.
# Activate the current default button.
evt = wx.CommandEvent(wx.wxEVT_COMMAND_BUTTON_CLICKED, wx.ID_ANY)
button = self.FindWindowById(self.AffirmativeId)
if button.Enabled:
button.ProcessEvent(evt)
else:
wx.Bell()
elif key == wx.WXK_F2:
item=self.tree.GetSelection()
if item:
selectedItemType=self.tree.GetItemData(item).item
self.tree.EditLabel(item)
evt.Skip()
elif key >= wx.WXK_START or key == wx.WXK_BACK:
# Non-printable character.
self._searchText = ""
evt.Skip()
else:
# Search the list.
# We have to implement this ourselves, as tree views don't accept space as a search character.
char = chr(evt.UnicodeKey).lower()
# IF the same character is typed twice, do the same search.
if self._searchText != char:
self._searchText += char
if self._searchCallLater:
self._searchCallLater.Restart()
else:
self._searchCallLater = wx.CallLater(1000, self._clearSearchText)
self.search(self._searchText)
def onTreeLabelEditBegin(self,evt):
item=self.tree.GetSelection()
selectedItemType = self.tree.GetItemData(item).item
if not selectedItemType.isRenameAllowed:
evt.Veto()
def onTreeLabelEditEnd(self,evt):
selectedItemNewName=evt.GetLabel()
item=self.tree.GetSelection()
selectedItemType = self.tree.GetItemData(item).item
selectedItemType.rename(selectedItemNewName)
def _clearSearchText(self):
self._searchText = ""
def search(self, searchText):
item = self.tree.GetSelection()
if not item:
# No items.
return
# First try searching from the current item.
# Failing that, search from the first item.
items = itertools.chain(self._iterReachableTreeItemsFromItem(item), self._iterReachableTreeItemsFromItem(self.tree.GetFirstChild(self.treeRoot)[0]))
if len(searchText) == 1:
# If only a single character has been entered, skip (search after) the current item.
next(items)
for item in items:
if self.tree.GetItemText(item).lower().startswith(searchText):
self.tree.SelectItem(item)
return
# Not found.
wx.Bell()
def _iterReachableTreeItemsFromItem(self, item):
while item:
yield item
childItem = self.tree.GetFirstChild(item)[0]
if childItem and self.tree.IsExpanded(item):
# Has children and is reachable, so recurse.
for childItem in self._iterReachableTreeItemsFromItem(childItem):
yield childItem
item = self.tree.GetNextSibling(item)
def onFilterEditTextChange(self, evt):
self.filter(self.filterEdit.GetValue())
evt.Skip()
def onAction(self, activate):
prevFocus = gui.mainFrame.prevFocus
self.Close()
# Save off the last selected element type on to the class so its used in initialization next time.
self.__class__.lastSelectedElementType=self.lastSelectedElementType
item = self.tree.GetSelection()
item = self.tree.GetItemData(item).item
if activate:
item.activate()
else:
def move():
speech.cancelSpeech()
# Avoid double announce if item.obj is about to gain focus.
if not (
self.document.passThrough
and getattr(item, "obj", False)
and item.obj != prevFocus
and controlTypes.State.FOCUSABLE in item.obj.states
):
# #8831: Report before moving because moving might change the focus, which
# might mutate the document, potentially invalidating info if it is
# offset-based.
item.report()
item.moveTo()
# We must use core.callLater rather than wx.CallLater to ensure that the callback runs within NVDA's core pump.
# If it didn't, and it directly or indirectly called wx.Yield, it could start executing NVDA's core pump from within the yield, causing recursion.
core.callLater(100, move)
class BrowseModeDocumentTextInfo(textInfos.TextInfo):
def _get_focusableNVDAObjectAtStart(self):
try:
item = next(self.obj._iterNodesByType("focusable", "up", self))
except StopIteration:
return self.obj.rootNVDAObject
if not item:
return self.obj.rootNVDAObject
return item.obj
class BrowseModeDocumentTreeInterceptor(documentBase.DocumentWithTableNavigation,cursorManager.CursorManager,BrowseModeTreeInterceptor,treeInterceptorHandler.DocumentTreeInterceptor):
programmaticScrollMayFireEvent = False
def __init__(self,obj):
super(BrowseModeDocumentTreeInterceptor,self).__init__(obj)
self._lastProgrammaticScrollTime = None
self.documentConstantIdentifier = self.documentConstantIdentifier
self._lastFocusObj = None
self._objPendingFocusBeforeActivate = None
self._hadFirstGainFocus = False
self._enteringFromOutside = True
# We need to cache this because it will be unavailable once the document dies.
if not hasattr(self.rootNVDAObject.appModule, "_browseModeRememberedCaretPositions"):
self.rootNVDAObject.appModule._browseModeRememberedCaretPositions = {}
self._lastCaretPosition = None
#: True if the last caret move was due to a focus change.
self._lastCaretMoveWasFocus = False
def terminate(self):
if self.shouldRememberCaretPositionAcrossLoads and self._lastCaretPosition:
try:
self.rootNVDAObject.appModule._browseModeRememberedCaretPositions[self.documentConstantIdentifier] = self._lastCaretPosition
except AttributeError:
# The app module died.
pass
def _get_currentNVDAObject(self):
return self.makeTextInfo(textInfos.POSITION_CARET).NVDAObjectAtStart
def event_treeInterceptor_gainFocus(self):
doSayAll=False
hadFirstGainFocus=self._hadFirstGainFocus
if not hadFirstGainFocus:
# This treeInterceptor is gaining focus for the first time.
# Fake a focus event on the focus object, as the treeInterceptor may have missed the actual focus event.
focus = api.getFocusObject()
self.event_gainFocus(focus, lambda: focus.event_gainFocus())
if not self.passThrough:
# We only set the caret position if in browse mode.
# If in focus mode, the document must have forced the focus somewhere,
# so we don't want to override it.
initialPos = self._getInitialCaretPos()
if initialPos:
self.selection = self.makeTextInfo(initialPos)
reportPassThrough(self)
doSayAll=config.conf['virtualBuffers']['autoSayAllOnPageLoad']
self._hadFirstGainFocus = True
if not self.passThrough:
if doSayAll:
speech.speakObjectProperties(self.rootNVDAObject, name=True, states=True, reason=OutputReason.FOCUS)
sayAll.SayAllHandler.readText(sayAll.CURSOR.CARET)
else:
# Speak it like we would speak focus on any other document object.
# This includes when entering the treeInterceptor for the first time:
if not hadFirstGainFocus:
speech.speakObject(self.rootNVDAObject, reason=OutputReason.FOCUS)
else:
# And when coming in from an outside object
# #4069 But not when coming up from a non-rendered descendant.
ancestors=api.getFocusAncestors()
fdl=api.getFocusDifferenceLevel()
try:
tl=ancestors.index(self.rootNVDAObject)
except ValueError:
tl=len(ancestors)
if fdl<=tl:
speech.speakObject(self.rootNVDAObject, reason=OutputReason.FOCUS)
info = self.selection
if not info.isCollapsed:
speech.speakPreselectedText(info.text)
else:
info.expand(textInfos.UNIT_LINE)
speech.speakTextInfo(info, reason=OutputReason.CARET, unit=textInfos.UNIT_LINE)
reportPassThrough(self)
braille.handler.handleGainFocus(self)
def event_caret(self, obj, nextHandler):
if self.passThrough:
nextHandler()
def _activateLongDesc(self,controlField):
"""
Activates (presents) the long description for a particular field (usually a graphic).
@param controlField: the field who's long description should be activated. This field is guaranteed to have states containing HASLONGDESC state.
@type controlField: dict
"""
raise NotImplementedError
def _activatePosition(self, obj=None, info=None):
if info:
obj=info.NVDAObjectAtStart
if not obj:
return
super(BrowseModeDocumentTreeInterceptor,self)._activatePosition(obj=obj)
def _set_selection(self, info, reason=OutputReason.CARET):
super(BrowseModeDocumentTreeInterceptor, self)._set_selection(info)
if isScriptWaiting() or not info.isCollapsed:
return
# Save the last caret position for use in terminate().
# This must be done here because the buffer might be cleared just before terminate() is called,
# causing the last caret position to be lost.
caret = info.copy()
caret.collapse()
self._lastCaretPosition = caret.bookmark
review.handleCaretMove(caret)
if reason == OutputReason.FOCUS:
self._lastCaretMoveWasFocus = True
focusObj = api.getFocusObject()
if focusObj==self.rootNVDAObject:
return
else:
self._lastCaretMoveWasFocus = False
focusObj=info.focusableNVDAObjectAtStart
obj=info.NVDAObjectAtStart
if not obj:
log.debugWarning("Invalid NVDAObjectAtStart")
return
if obj==self.rootNVDAObject:
return
obj.scrollIntoView()
if self.programmaticScrollMayFireEvent:
self._lastProgrammaticScrollTime = time.time()
if focusObj:
self.passThrough = self.shouldPassThrough(focusObj, reason=reason)
if (
not eventHandler.isPendingEvents("gainFocus")
and focusObj != self.rootNVDAObject
and focusObj != api.getFocusObject()
and self._shouldSetFocusToObj(focusObj)
):
followBrowseModeFocus = config.conf["virtualBuffers"]["autoFocusFocusableElements"]
if followBrowseModeFocus or self.passThrough:
focusObj.setFocus()
# Queue the reporting of pass through mode so that it will be spoken after the actual content.
queueHandler.queueFunction(queueHandler.eventQueue, reportPassThrough, self)
def _shouldSetFocusToObj(self, obj):
"""Determine whether an object should receive focus.
Subclasses may extend or override this method.
@param obj: The object in question.
@type obj: L{NVDAObjects.NVDAObject}
"""
return obj.role not in self.APPLICATION_ROLES and obj.isFocusable and obj.role!=controlTypes.Role.EMBEDDEDOBJECT
def script_activateLongDesc(self,gesture):
info=self.makeTextInfo(textInfos.POSITION_CARET)
info.expand("character")
for field in reversed(info.getTextWithFields()):
if isinstance(field,textInfos.FieldCommand) and field.command=="controlStart":
states=field.field.get('states')
if states and controlTypes.State.HASLONGDESC in states:
self._activateLongDesc(field.field)
break
else:
# Translators: the message presented when the activateLongDescription script cannot locate a long description to activate.
ui.message(_("No long description"))
# Translators: the description for the activateLongDescription script on browseMode documents.
script_activateLongDesc.__doc__=_("Shows the long description at this position if one is found.")
def event_caretMovementFailed(self, obj, nextHandler, gesture=None):
if not self.passThrough or not gesture or not config.conf["virtualBuffers"]["autoPassThroughOnCaretMove"]:
return nextHandler()
if gesture.mainKeyName in ("home", "end"):
# Home, end, control+home and control+end should not disable pass through.
return nextHandler()
script = self.getScript(gesture)
if not script:
return nextHandler()
# We've hit the edge of the focused control.
# Therefore, move the virtual caret to the same edge of the field.
info = self.makeTextInfo(textInfos.POSITION_CARET)
info.expand(textInfos.UNIT_CONTROLFIELD)
if gesture.mainKeyName in ("leftArrow", "upArrow", "pageUp"):
info.collapse()
else:
info.collapse(end=True)
info.move(textInfos.UNIT_CHARACTER, -1)
info.updateCaret()
scriptHandler.queueScript(script, gesture)
currentExpandedControl=None #: an NVDAObject representing the control that has just been expanded with the collapseOrExpandControl script.
def script_collapseOrExpandControl(self, gesture):
if not config.conf["virtualBuffers"]["autoFocusFocusableElements"]:
self._focusLastFocusableObject()
oldFocus = api.getFocusObject()
oldFocusStates = oldFocus.states
gesture.send()
if controlTypes.State.COLLAPSED in oldFocusStates:
self.passThrough = True
# When a control (such as a combo box) is expanded, we expect that its descendants will be classed as being outside the browseMode document.
# We save off the expanded control so that the next focus event within the browseMode document can see if it is for the control,
# and if so, it disables passthrough, as the control has obviously been collapsed again.
self.currentExpandedControl=oldFocus
elif not self.disableAutoPassThrough:
self.passThrough = False
reportPassThrough(self)
def _tabOverride(self, direction):
"""Override the tab order if the virtual caret is not within the currently focused node.
This is done because many nodes are not focusable and it is thus possible for the virtual caret to be unsynchronised with the focus.
In this case, we want tab/shift+tab to move to the next/previous focusable node relative to the virtual caret.
If the virtual caret is within the focused node, the tab/shift+tab key should be passed through to allow normal tab order navigation.
Note that this method does not pass the key through itself if it is not overridden. This should be done by the calling script if C{False} is returned.
@param direction: The direction in which to move.
@type direction: str
@return: C{True} if the tab order was overridden, C{False} if not.
@rtype: bool
"""
if self._lastCaretMoveWasFocus:
# #5227: If the caret was last moved due to a focus change, don't override tab.
# This ensures that tabbing behaves as expected after tabbing hits an iframe document.
return False
focus = api.getFocusObject()
try:
focusInfo = self.makeTextInfo(focus)
except:
return False
# We only want to override the tab order if the caret is not within the focused node.
caretInfo=self.makeTextInfo(textInfos.POSITION_CARET)
#Only check that the caret is within the focus for things that ar not documents
#As for documents we should always override
if focus.role!=controlTypes.Role.DOCUMENT or controlTypes.State.EDITABLE in focus.states:
# Expand to one character, as isOverlapping() doesn't yield the desired results with collapsed ranges.
caretInfo.expand(textInfos.UNIT_CHARACTER)
if focusInfo.isOverlapping(caretInfo):
return False
# If we reach here, we do want to override tab/shift+tab if possible.
# Find the next/previous focusable node.
try:
item = next(self._iterNodesByType("focusable", direction, caretInfo))
except StopIteration:
return False
obj=item.obj
newInfo=item.textInfo
if obj == api.getFocusObject():
# This node is already focused, so we need to move to and speak this node here.
newCaret = newInfo.copy()
newCaret.collapse()
self._set_selection(newCaret, reason=OutputReason.FOCUS)
if self.passThrough:
obj.event_gainFocus()
else:
speech.speakTextInfo(newInfo, reason=OutputReason.FOCUS)
else:
# This node doesn't have the focus, so just set focus to it. The gainFocus event will handle the rest.
obj.setFocus()
return True
def script_tab(self, gesture):
if not self._tabOverride("next"):
gesture.send()
def script_shiftTab(self, gesture):
if not self._tabOverride("previous"):
gesture.send()
def event_focusEntered(self,obj,nextHandler):
if obj==self.rootNVDAObject:
self._enteringFromOutside = True
# Even if passThrough is enabled, we still completely drop focusEntered events here.
# In order to get them back when passThrough is enabled, we replay them with the _replayFocusEnteredEvents method in event_gainFocus.
# The reason for this is to ensure that focusEntered events are delayed until a focus event has had a chance to disable passthrough mode.
# As in this case we would not want them.
def _shouldIgnoreFocus(self, obj):
"""Determines whether focus on a given object should be ignored.
@param obj: The object in question.
@type obj: L{NVDAObjects.NVDAObject}
@return: C{True} if focus on L{obj} should be ignored, C{False} otherwise.
@rtype: bool
"""
return False
def _postGainFocus(self, obj):
"""Executed after a gainFocus within the browseMode document.
This will not be executed if L{event_gainFocus} determined that it should abort and call nextHandler.
@param obj: The object that gained focus.
@type obj: L{NVDAObjects.NVDAObject}
"""
def _replayFocusEnteredEvents(self):
# We blocked the focusEntered events because we were in browse mode,
# but now that we've switched to focus mode, we need to fire them.
for parent in api.getFocusAncestors()[api.getFocusDifferenceLevel():]:
try:
parent.event_focusEntered()
except:
log.exception("Error executing focusEntered event: %s" % parent)
def event_gainFocus(self, obj, nextHandler):
enteringFromOutside=self._enteringFromOutside
self._enteringFromOutside=False
if not self.isReady:
if self.passThrough:
self._replayFocusEnteredEvents()
nextHandler()
return
# If a control has been expanded by the collapseOrExpandControl script, and this focus event is for it,
# disable passThrough and report the control, as the control has obviously been collapsed again.
# Note that whether or not this focus event was for that control, the last expanded control is forgotten, so that only the next focus event for the browseMode document can handle the collapsed control.
lastExpandedControl=self.currentExpandedControl
self.currentExpandedControl=None
if self.passThrough and obj==lastExpandedControl:
self.passThrough=False
reportPassThrough(self)
nextHandler()
return
if enteringFromOutside and not self.passThrough and self._lastFocusObj==obj:
# We're entering the document from outside (not returning from an inside object/application; #3145)
# and this was the last non-root node with focus, so ignore this focus event.
# Otherwise, if the user switches away and back to this document, the cursor will jump to this node.
# This is not ideal if the user was positioned over a node which cannot receive focus.
return
if obj==self.rootNVDAObject:
if self.passThrough:
self._replayFocusEnteredEvents()
return nextHandler()
return
if not self.passThrough and self._shouldIgnoreFocus(obj):
return
# If the previous focus object was removed, we might hit a false positive for overlap detection.
# Track the previous focus target so that we can account for this scenario.
previousFocusObjIsDefunct = False
if self._lastFocusObj:
try:
states = self._lastFocusObj.states
previousFocusObjIsDefunct = controlTypes.State.DEFUNCT in states
except Exception:
log.debugWarning(
"Error fetching states when checking for defunct object. Treating object as defunct anyway.",
exc_info=True
)
previousFocusObjIsDefunct = True
self._lastFocusObj=obj
try:
focusInfo = self.makeTextInfo(obj)
except:
# This object is not in the treeInterceptor, even though it resides beneath the document.
# Automatic pass through should be enabled in certain circumstances where this occurs.
if not self.passThrough and self.shouldPassThrough(obj, reason=OutputReason.FOCUS):
self.passThrough=True
reportPassThrough(self)
self._replayFocusEnteredEvents()
return nextHandler()
#We only want to update the caret and speak the field if we're not in the same one as before
caretInfo=self.makeTextInfo(textInfos.POSITION_CARET)
# Expand to one character, as isOverlapping() doesn't treat, for example, (4,4) and (4,5) as overlapping.
caretInfo.expand(textInfos.UNIT_CHARACTER)
isOverlapping = focusInfo.isOverlapping(caretInfo)
if not self._hadFirstGainFocus or not isOverlapping or (isOverlapping and previousFocusObjIsDefunct):
# The virtual caret is not within the focus node.
oldPassThrough=self.passThrough
passThrough = self.shouldPassThrough(obj, reason=OutputReason.FOCUS)
if not oldPassThrough and (passThrough or sayAll.SayAllHandler.isRunning()):
# If pass-through is disabled, cancel speech, as a focus change should cause page reading to stop.
# This must be done before auto-pass-through occurs, as we want to stop page reading even if pass-through will be automatically enabled by this focus change.
speech.cancelSpeech()
self.passThrough=passThrough
if not self.passThrough:
# We read the info from the browseMode document instead of the control itself.
speech.speakTextInfo(focusInfo, reason=OutputReason.FOCUS)
# However, we still want to update the speech property cache so that property changes will be spoken properly.
speech.speakObject(obj, controlTypes.OutputReason.ONLYCACHE)
# As we do not call nextHandler which would trigger the vision framework to handle gain focus,
# we need to call it manually here.
vision.handler.handleGainFocus(obj)
else:
# Although we are going to speak the object rather than textInfo content, we still need to silently speak the textInfo content so that the textInfo speech cache is updated correctly.
# Not doing this would cause later browseMode speaking to either not speak controlFields it had entered, or speak controlField exits after having already exited.
# See #7435 for a discussion on this.
speech.speakTextInfo(focusInfo, reason=OutputReason.ONLYCACHE)
self._replayFocusEnteredEvents()
nextHandler()
focusInfo.collapse()
self._set_selection(focusInfo, reason=OutputReason.FOCUS)
else:
# The virtual caret was already at the focused node.
if not self.passThrough:
# This focus change was caused by a virtual caret movement, so don't speak the focused node to avoid double speaking.
# However, we still want to update the speech property cache so that property changes will be spoken properly.
speech.speakObject(obj, OutputReason.ONLYCACHE)
if config.conf["virtualBuffers"]["autoFocusFocusableElements"]:
# As we do not call nextHandler which would trigger the vision framework to handle gain focus,
# we need to call it manually here.
# Note: this is usually called after the caret movement.
vision.handler.handleGainFocus(obj)
elif (
self._objPendingFocusBeforeActivate
and obj == self._objPendingFocusBeforeActivate
and obj is not self._objPendingFocusBeforeActivate
):
# With auto focus focusable elements disabled, when the user activates
# an element (e.g. by pressing enter) or presses a key which we pass
# through (e.g. control+enter), we call _focusLastFocusableObject.
# However, the activation/key press might cause a property change
# before we get the focus event, so NVDA's normal reporting of
# changes to the focus won't pick it up.
# The speech property cache on _objPendingFocusBeforeActivate reflects
# the properties before the activation/key, so use that to speak any
# changes.
speech.speakObject(
self._objPendingFocusBeforeActivate,
OutputReason.CHANGE
)
self._objPendingFocusBeforeActivate = None
else:
self._replayFocusEnteredEvents()
return nextHandler()
self._postGainFocus(obj)
event_gainFocus.ignoreIsReady=True
def _handleScrollTo(
self,
obj: Union[NVDAObject, textInfos.TextInfo],
) -> bool:
"""Handle scrolling the browseMode document to a given object in response to an event.
Subclasses should call this from an event which indicates that the document has scrolled.
@postcondition: The virtual caret is moved to L{obj} and the buffer content for L{obj} is reported.
@param obj: The object to which the document should scroll.
@return: C{True} if the document was scrolled, C{False} if not.
@note: If C{False} is returned, calling events should probably call their nextHandler.
"""
if self.programmaticScrollMayFireEvent and self._lastProgrammaticScrollTime and time.time() - self._lastProgrammaticScrollTime < 0.4:
# This event was probably caused by this browseMode document's call to scrollIntoView().
# Therefore, ignore it. Otherwise, the cursor may bounce back to the scroll point.
# However, pretend we handled it, as we don't want it to be passed on to the object either.
return True
if isinstance(obj, NVDAObject):
try:
scrollInfo = self.makeTextInfo(obj)
except (NotImplementedError, RuntimeError):
return False
elif isinstance(obj, textInfos.TextInfo):
scrollInfo = obj.copy()
else:
raise ValueError(f"{obj} is not a supported type")
#We only want to update the caret and speak the field if we're not in the same one as before
caretInfo=self.makeTextInfo(textInfos.POSITION_CARET)
# Expand to one character, as isOverlapping() doesn't treat, for example, (4,4) and (4,5) as overlapping.
caretInfo.expand(textInfos.UNIT_CHARACTER)
if not scrollInfo.isOverlapping(caretInfo):
if scrollInfo.isCollapsed:
scrollInfo.expand(textInfos.UNIT_LINE)
speech.speakTextInfo(scrollInfo, reason=OutputReason.CARET)
scrollInfo.collapse()
self.selection = scrollInfo
return True
return False
def _isNVDAObjectInApplication_noWalk(self, obj):
"""Determine whether a given object is within an application without walking ancestors.
The base implementation simply checks whether the object has an application role.
Subclasses can override this if they can provide a definite answer without needing to walk.
For example, for virtual buffers, if the object is in the buffer,
it definitely isn't in an application.
L{_isNVDAObjectInApplication} calls this and walks to the next ancestor if C{None} is returned.
@return: C{True} if definitely in an application,
C{False} if definitely not in an application,
C{None} if this can't be determined without walking ancestors.
"""
if (
# roles such as application and dialog should be treated as being within a "application" and therefore outside of the browseMode document.
obj.role in self.APPLICATION_ROLES
# Anything other than an editable text box inside a combo box should be
# treated as being outside a browseMode document.
or (
obj.role != controlTypes.Role.EDITABLETEXT and obj.container
and obj.container.role == controlTypes.Role.COMBOBOX
)
):
return True
return None
def _isNVDAObjectInApplication(self, obj):
"""Determine whether a given object is within an application.
The object is considered to be within an application if it or one of its ancestors has an application role.
This should only be called on objects beneath the treeInterceptor's root NVDAObject.
@param obj: The object in question.
@type obj: L{NVDAObjects.NVDAObject}
@return: C{True} if L{obj} is within an application, C{False} otherwise.
@rtype: bool
"""
# We cache the result for each object we walk.
# There can be browse mode documents within other documents and the result might be different between these,
# so the cache must be maintained on the TreeInterceptor rather than the object itself.
try:
cache = self._isInAppCache
except AttributeError:
# Create this lazily, as this method isn't used by all browse mode implementations.
cache = self._isInAppCache = weakref.WeakKeyDictionary()
objs = []
def doResult(result):
# Cache this on descendants we've walked over.
for obj in objs:
cache[obj] = result
return result
while obj and obj != self.rootNVDAObject:
inApp = cache.get(obj)
if inApp is not None:
# We found a cached result.
return doResult(inApp)
objs.append(obj)
inApp = self._isNVDAObjectInApplication_noWalk(obj)
if inApp is not None:
return doResult(inApp)
# We must walk ancestors.
# Cache container.
container = obj.container
obj.container = container
obj = container
return doResult(False)
def _get_documentConstantIdentifier(self):
"""Get the constant identifier for this document.
This identifier should uniquely identify all instances (not just one instance) of a document for at least the current session of the hosting application.
Generally, the document URL should be used.
@return: The constant identifier for this document, C{None} if there is none.
"""
return None
def _get_shouldRememberCaretPositionAcrossLoads(self):
"""Specifies whether the position of the caret should be remembered when this document is loaded again.
This is useful when the browser remembers the scroll position for the document,
but does not communicate this information via APIs.
The remembered caret position is associated with this document using L{documentConstantIdentifier}.
@return: C{True} if the caret position should be remembered, C{False} if not.
@rtype: bool
"""
docConstId = self.documentConstantIdentifier
# Return True if the URL indicates that this is probably a web browser document.
# We do this check because we don't want to remember caret positions for email messages, etc.
if isinstance(docConstId, str):
protocols=("http", "https", "ftp", "ftps", "file")
protocol=docConstId.split("://", 1)[0]
return protocol in protocols
return False
def _getInitialCaretPos(self):
"""Retrieve the initial position of the caret after the buffer has been loaded.
This position, if any, will be passed to L{makeTextInfo}.
Subclasses should extend this method.
@return: The initial position of the caret, C{None} if there isn't one.
@rtype: TextInfo position
"""
if self.shouldRememberCaretPositionAcrossLoads:
try:
return self.rootNVDAObject.appModule._browseModeRememberedCaretPositions[self.documentConstantIdentifier]
except KeyError:
pass
return None
def getEnclosingContainerRange(self, textRange):
textRange = textRange.copy()
textRange.collapse()
try:
item = next(self._iterNodesByType("container", "up", textRange))
except (NotImplementedError,StopIteration):
try:
item = next(self._iterNodesByType("landmark", "up", textRange))
except (NotImplementedError,StopIteration):
return
return item.textInfo
def script_moveToStartOfContainer(self,gesture):
info=self.makeTextInfo(textInfos.POSITION_CARET)
info.expand(textInfos.UNIT_CHARACTER)
container=self.getEnclosingContainerRange(info)
if not container:
# Translators: Reported when the user attempts to move to the start or end of a container
# (list, table, etc.) but there is no container.
ui.message(_("Not in a container"))
return
container.collapse()
self._set_selection(container, reason=OutputReason.QUICKNAV)
if not willSayAllResume(gesture):
container.expand(textInfos.UNIT_LINE)
speech.speakTextInfo(container, reason=OutputReason.FOCUS)
script_moveToStartOfContainer.resumeSayAllMode = sayAll.CURSOR.CARET
# Translators: Description for the Move to start of container command in browse mode.
script_moveToStartOfContainer.__doc__=_("Moves to the start of the container element, such as a list or table")
def script_movePastEndOfContainer(self,gesture):
info=self.makeTextInfo(textInfos.POSITION_CARET)
info.expand(textInfos.UNIT_CHARACTER)
container=self.getEnclosingContainerRange(info)
if not container:
# Translators: Reported when the user attempts to move to the start or end of a container
# (list, table, etc.) but there is no container.
ui.message(_("Not in a container"))
return
container.collapse(end=True)
docEnd=container.obj.makeTextInfo(textInfos.POSITION_LAST)
if container.compareEndPoints(docEnd,"endToEnd")>=0:
container=docEnd
# Translators: a message reported when:
# Review cursor is at the bottom line of the current navigator object.
# Landing at the end of a browse mode document when trying to jump to the end of the current container.
ui.message(_("Bottom"))
self._set_selection(container, reason=OutputReason.QUICKNAV)
if not willSayAllResume(gesture):
container.expand(textInfos.UNIT_LINE)
speech.speakTextInfo(container, reason=OutputReason.FOCUS)
script_movePastEndOfContainer.resumeSayAllMode = sayAll.CURSOR.CARET
# Translators: Description for the Move past end of container command in browse mode.
script_movePastEndOfContainer.__doc__=_("Moves past the end of the container element, such as a list or table")
NOT_LINK_BLOCK_MIN_LEN = 30
def _isSuitableNotLinkBlock(self, textRange):
return len(textRange.text) >= self.NOT_LINK_BLOCK_MIN_LEN
def _iterNotLinkBlock(self, direction="next", pos=None):
links = self._iterNodesByType("link", direction=direction, pos=pos)
# We want to compare each link against the next link.
item1 = next(links, None)
if item1 is None:
return
for item2 in links:
# If the distance between the links is small, this is probably just a piece of non-link text within a block of links; e.g. an inactive link of a nav bar.
if direction=="previous":
textRange=item1.textInfo.copy()
textRange.collapse()
textRange.setEndPoint(item2.textInfo,"startToEnd")
else:
textRange=item2.textInfo.copy()
textRange.collapse()
textRange.setEndPoint(item1.textInfo,"startToEnd")
if self._isSuitableNotLinkBlock(textRange):
yield TextInfoQuickNavItem("notLinkBlock", self, textRange)
item1=item2
__gestures={
"kb:NVDA+d": "activateLongDesc",
"kb:alt+upArrow": "collapseOrExpandControl",
"kb:alt+downArrow": "collapseOrExpandControl",
"kb:tab": "tab",
"kb:shift+tab": "shiftTab",
"kb:shift+,": "moveToStartOfContainer",
"kb:,": "movePastEndOfContainer",
}
@script(
description=_(
# Translators: the description for the toggleScreenLayout script.
"Toggles on and off if the screen layout is preserved while rendering the document content"
),
gesture="kb:NVDA+v",
)
def script_toggleScreenLayout(self, gesture):
# Translators: The message reported for not supported toggling of screen layout
ui.message(_("Not supported in this document."))
| 44.265625 | 229 | 0.740899 | 10,985 | 84,990 | 5.67929 | 0.117979 | 0.025005 | 0.015837 | 0.019491 | 0.365092 | 0.303765 | 0.279145 | 0.263516 | 0.258243 | 0.249427 | 0 | 0.002256 | 0.181245 | 84,990 | 1,919 | 230 | 44.288692 | 0.894289 | 0.426191 | 0 | 0.297735 | 0 | 0.000809 | 0.134236 | 0.011349 | 0 | 0 | 0 | 0 | 0 | 1 | 0.072816 | false | 0.072006 | 0.032362 | 0.008091 | 0.193366 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 |
0
| 1 |
d9f9cd4e7a0b73e79eb71d2bdbfa755d69a9cc9d
| 597 |
py
|
Python
|
examples/first_char_last_column.py
|
clarkfitzg/sta141c
|
129704ba0952a4b80f9b093dcfa49f49f37b052d
|
[
"MIT"
] | 24 |
2019-01-08T20:10:11.000Z
|
2021-11-26T12:18:58.000Z
|
examples/first_char_last_column.py
|
timilchene/sta141c-winter19
|
129704ba0952a4b80f9b093dcfa49f49f37b052d
|
[
"MIT"
] | 1 |
2017-06-25T05:35:24.000Z
|
2017-06-25T05:35:24.000Z
|
examples/first_char_last_column.py
|
timilchene/sta141c-winter19
|
129704ba0952a4b80f9b093dcfa49f49f37b052d
|
[
"MIT"
] | 22 |
2019-01-08T20:02:15.000Z
|
2021-12-16T23:27:56.000Z
|
#!/usr/bin/env python3
"""
For the last column, print only the first character.
Usage:
$ printf "100,200\n0,\n" | python3 first_char_last_column.py
Should print "100,2\n0,"
"""
import csv
from sys import stdin, stdout
def main():
reader = csv.reader(stdin)
writer = csv.writer(stdout)
for row in reader:
try:
row[-1] = row[-1][0]
except IndexError:
# Python: Better to ask forgiveness than permission
# Alternative: Look before you leap
pass
writer.writerow(row)
if __name__ == "__main__":
main()
| 19.258065 | 64 | 0.606365 | 79 | 597 | 4.443038 | 0.696203 | 0.05698 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.03972 | 0.283082 | 597 | 30 | 65 | 19.9 | 0.780374 | 0.425461 | 0 | 0 | 0 | 0 | 0.024242 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.076923 | false | 0.076923 | 0.153846 | 0 | 0.230769 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 |
0
| 1 |
d9fb744315858b3e553e097f0866c6de49262adf
| 1,996 |
py
|
Python
|
env_ci.py
|
reloadware/stickybeak
|
8ac52a80849a3098fb6b2f47115970a734a73c14
|
[
"Apache-2.0"
] | null | null | null |
env_ci.py
|
reloadware/stickybeak
|
8ac52a80849a3098fb6b2f47115970a734a73c14
|
[
"Apache-2.0"
] | null | null | null |
env_ci.py
|
reloadware/stickybeak
|
8ac52a80849a3098fb6b2f47115970a734a73c14
|
[
"Apache-2.0"
] | 1 |
2022-01-01T15:14:42.000Z
|
2022-01-01T15:14:42.000Z
|
from pathlib import Path
root = Path(__file__).parent.absolute()
import envo
envo.add_source_roots([root])
from pathlib import Path
from typing import Any, Dict, List, Optional, Tuple
from envo import Env, Namespace, env_var, logger, run
from env_comm import StickybeakCommEnv as ParentEnv
p = Namespace("p")
class StickybeakCiEnv(ParentEnv):
class Meta(ParentEnv.Meta):
stage: str = "ci"
emoji: str = "⚙"
load_env_vars = True
class Environ(ParentEnv.Environ):
pypi_username: Optional[str] = env_var(raw=True)
pypi_password: Optional[str] = env_var(raw=True)
e: Environ
def init(self) -> None:
super().init()
@p.command
def bootstrap(self, test_apps=True) -> None:
super().bootstrap(test_apps)
@p.command
def test(self) -> None:
run("pytest --reruns 2 -v tests")
@p.command
def build(self) -> None:
run("poetry build")
@p.command
def publish(self) -> None:
run(f'poetry publish --username "{self.e.pypi_username}" --password "{self.e.pypi_password}"', verbose=False)
@p.command
def rstcheck(self) -> None:
pass
# run("rstcheck README.rst | tee ./workspace/rstcheck.txt")
@p.command
def flake(self) -> None:
pass
# run("flake8 . | tee ./workspace/flake8.txt")
@p.command
def check_black(self) -> None:
run("black --check .")
@p.command
def check_isort(self) -> None:
run("black --check .")
@p.command
def mypy(self) -> None:
pass
run("mypy .")
@p.command
def generate_version(self) -> None:
import toml
config = toml.load(str(self.meta.root / "pyproject.toml"))
version: str = config["tool"]["poetry"]["version"]
version_file = self.meta.root / "stickybeak/__version__.py"
Path(version_file).touch()
version_file.write_text(f'__version__ = "{version}"\n')
ThisEnv = StickybeakCiEnv
| 22.942529 | 117 | 0.613727 | 251 | 1,996 | 4.752988 | 0.36255 | 0.067058 | 0.092205 | 0.03772 | 0.093881 | 0.093881 | 0.053646 | 0.053646 | 0 | 0 | 0 | 0.002003 | 0.249499 | 1,996 | 86 | 118 | 23.209302 | 0.793725 | 0.051102 | 0 | 0.298246 | 0 | 0 | 0.130619 | 0.038604 | 0 | 0 | 0 | 0 | 0 | 1 | 0.192982 | false | 0.087719 | 0.122807 | 0 | 0.385965 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 |
0
| 1 |
d9fe5aa1f8632d451d56260ea6fb9079bd975a31
| 475 |
py
|
Python
|
bsp/nrf5x/tools/sdk_dist.py
|
BreederBai/rt-thread
|
53ed0314982556dfa9c5db75d4f3e02485d16ab5
|
[
"Apache-2.0"
] | 7,482 |
2015-01-01T09:23:08.000Z
|
2022-03-31T19:34:05.000Z
|
bsp/nrf5x/tools/sdk_dist.py
|
ArdaFu/rt-thread
|
eebb2561ec166e0016187c7b7998ada4f8212b3a
|
[
"Apache-2.0"
] | 2,543 |
2015-01-09T02:01:34.000Z
|
2022-03-31T23:10:14.000Z
|
bsp/nrf5x/tools/sdk_dist.py
|
ArdaFu/rt-thread
|
eebb2561ec166e0016187c7b7998ada4f8212b3a
|
[
"Apache-2.0"
] | 4,645 |
2015-01-06T07:05:31.000Z
|
2022-03-31T18:21:50.000Z
|
import os
import sys
import shutil
cwd_path = os.getcwd()
sys.path.append(os.path.join(os.path.dirname(cwd_path), 'rt-thread', 'tools'))
# BSP dist function
def dist_do_building(BSP_ROOT, dist_dir):
from mkdist import bsp_copy_files
import rtconfig
library_dir = os.path.join(dist_dir, 'libraries')
print("=> copy nrf52 bsp libraries")
library_path = os.path.join(os.path.dirname(BSP_ROOT), 'libraries')
bsp_copy_files(library_path, library_dir)
| 26.388889 | 78 | 0.734737 | 74 | 475 | 4.5 | 0.405405 | 0.09009 | 0.09009 | 0.072072 | 0.138138 | 0.138138 | 0 | 0 | 0 | 0 | 0 | 0.004926 | 0.145263 | 475 | 17 | 79 | 27.941176 | 0.815271 | 0.035789 | 0 | 0 | 0 | 0 | 0.129386 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.083333 | false | 0 | 0.416667 | 0 | 0.5 | 0.083333 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 |
0
| 1 |
8a047dbb3e81227c03ec206589ca325125601905
| 1,721 |
py
|
Python
|
app/blueprints/department_blueprint.py
|
Maxcutex/personal_ecommerce
|
be09fb20eae1b225523acde06f8e75effcc3676f
|
[
"MIT"
] | null | null | null |
app/blueprints/department_blueprint.py
|
Maxcutex/personal_ecommerce
|
be09fb20eae1b225523acde06f8e75effcc3676f
|
[
"MIT"
] | 2 |
2019-05-21T08:44:29.000Z
|
2021-04-30T20:46:08.000Z
|
app/blueprints/department_blueprint.py
|
Maxcutex/personal_ecommerce
|
be09fb20eae1b225523acde06f8e75effcc3676f
|
[
"MIT"
] | null | null | null |
from flasgger import swag_from
from app.blueprints.base_blueprint import Blueprint, BaseBlueprint, request, Security, Auth
from app.controllers.department_controller import DepartmentController
url_prefix = '{}/departments'.format(BaseBlueprint.base_url_prefix)
department_blueprint = Blueprint('department', __name__, url_prefix=url_prefix)
department_controller = DepartmentController(request)
@department_blueprint.route('/', methods=['GET'])
@Auth.has_permission('view_department')
@swag_from('documentation/get_all_departments.yml')
def list_departments():
return department_controller.list_departments()
@department_blueprint.route('/<int:department_id>', methods=['GET'])
@Auth.has_permission('view_department')
@swag_from('documentation/get_single_department.yml')
def get_department(department_id):
return department_controller.get_department(department_id)
@department_blueprint.route('/', methods=['POST'])
@Auth.has_role('admin')
@Security.validator(['name|required:ifExists_Department_name', 'description|required'])
@swag_from('documentation/create_department.yml')
def create_department():
return department_controller.create_department()
@department_blueprint.route('/<int:department_id>', methods=['DELETE'])
@Auth.has_role('admin')
@swag_from('documentation/delete_department.yml')
def delete_department(department_id):
return department_controller.delete_department(department_id)
@department_blueprint.route('/<int:department_id>', methods=['PATCH'])
@Auth.has_role('admin')
@Security.validator(['name|optional', 'description|optional'])
@swag_from('documentation/update_department.yml')
def update_department(department_id):
return department_controller.update_department(department_id)
| 41.97561 | 91 | 0.820453 | 199 | 1,721 | 6.773869 | 0.241206 | 0.080119 | 0.097923 | 0.060089 | 0.405786 | 0.405786 | 0.248516 | 0.096439 | 0.096439 | 0.096439 | 0 | 0 | 0.048809 | 1,721 | 40 | 92 | 43.025 | 0.823458 | 0 | 0 | 0.151515 | 0 | 0 | 0.246368 | 0.127252 | 0 | 0 | 0 | 0 | 0 | 1 | 0.151515 | false | 0 | 0.090909 | 0.151515 | 0.393939 | 0.242424 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 |
0
| 1 |
8a06d974512def3c400fb25769c0185d59195602
| 1,405 |
py
|
Python
|
baymax/api.py
|
dmrz/baymax
|
60cca5ae2e7cb42e093747f91b809e34e6782fcd
|
[
"MIT"
] | 34 |
2018-02-14T09:37:26.000Z
|
2021-02-13T10:06:54.000Z
|
baymax/api.py
|
Avishekbhattacharjee/baymax
|
487930c4f3021ff50504d371de09ff31e458c09f
|
[
"MIT"
] | 1 |
2018-03-03T02:55:38.000Z
|
2018-03-17T21:57:15.000Z
|
baymax/api.py
|
Avishekbhattacharjee/baymax
|
487930c4f3021ff50504d371de09ff31e458c09f
|
[
"MIT"
] | 7 |
2018-02-28T07:35:35.000Z
|
2022-01-26T11:54:40.000Z
|
import json
import aiohttp
async def request(url, payload=None, params=None, headers=None):
headers = {'content-type': 'application/json', **(headers or {})}
data = payload and json.dumps(payload)
async with aiohttp.ClientSession() as client:
async with client.post(
url, data=data, params=params, headers=headers) as resp:
# TODO: Check response status
json_response = await resp.json()
return json_response
async def get_updates(base_url, timeout, offset):
params = {
'timeout': timeout,
'offset': offset
}
return await request(f'{base_url}/getUpdates', params=params)
async def send_message(base_url, chat_id, text, reply_markup=None):
payload = {
'chat_id': chat_id,
'text': text
}
if reply_markup is not None:
payload['reply_markup'] = reply_markup
return await request(f'{base_url}/sendMessage', payload)
async def answer_callback_query(
base_url, callback_query_id, text, show_alert,
url=None, cache_time=None):
payload = {
'callback_query_id': callback_query_id,
'text': text,
'show_alert': show_alert
}
if url is not None:
payload['url'] = url
if cache_time is not None:
payload['cache_time'] = cache_time
return await request(f'{base_url}/answerCallbackQuery', payload)
| 29.270833 | 72 | 0.641993 | 176 | 1,405 | 4.943182 | 0.318182 | 0.048276 | 0.062069 | 0.065517 | 0.089655 | 0.089655 | 0 | 0 | 0 | 0 | 0 | 0 | 0.251957 | 1,405 | 47 | 73 | 29.893617 | 0.827783 | 0.019217 | 0 | 0.054054 | 0 | 0 | 0.131541 | 0.053052 | 0 | 0 | 0 | 0.021277 | 0 | 1 | 0 | false | 0 | 0.054054 | 0 | 0.162162 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
0
| 1 |
8a0988ba1c9ee5db70eabfa7b9b35ad041f9c1f7
| 2,238 |
py
|
Python
|
pymatgen/analysis/tests/test_piezo.py
|
exenGT/pymatgen
|
a8ffb820ab8fc3f60251099e38c8888f45eae618
|
[
"MIT"
] | 1 |
2021-11-02T21:10:11.000Z
|
2021-11-02T21:10:11.000Z
|
pymatgen/analysis/tests/test_piezo.py
|
exenGT/pymatgen
|
a8ffb820ab8fc3f60251099e38c8888f45eae618
|
[
"MIT"
] | 5 |
2018-08-07T23:00:23.000Z
|
2021-01-05T22:46:23.000Z
|
pymatgen/analysis/tests/test_piezo.py
|
exenGT/pymatgen
|
a8ffb820ab8fc3f60251099e38c8888f45eae618
|
[
"MIT"
] | 6 |
2019-04-26T18:50:41.000Z
|
2020-03-29T17:58:34.000Z
|
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
Test for the piezo tensor class
"""
__author__ = "Shyam Dwaraknath"
__version__ = "0.1"
__maintainer__ = "Shyam Dwaraknath"
__email__ = "shyamd@lbl.gov"
__status__ = "Development"
__date__ = "4/1/16"
import os
import unittest
import numpy as np
from pymatgen.analysis.piezo import PiezoTensor
from pymatgen.util.testing import PymatgenTest
class PiezoTest(PymatgenTest):
def setUp(self):
self.piezo_struc = self.get_structure("BaNiO3")
self.voigt_matrix = np.array(
[
[0.0, 0.0, 0.0, 0.0, 0.03839, 0.0],
[0.0, 0.0, 0.0, 0.03839, 0.0, 0.0],
[6.89822, 6.89822, 27.46280, 0.0, 0.0, 0.0],
]
)
self.vasp_matrix = np.array(
[
[0.0, 0.0, 0.0, 0.0, 0.0, 0.03839],
[0.0, 0.0, 0.0, 0.0, 0.03839, 0.0, 0.0],
[6.89822, 6.89822, 27.46280, 0.0, 0.0, 0.0],
]
)
self.full_tensor_array = [
[[0.0, 0.0, 0.03839], [0.0, 0.0, 0.0], [0.03839, 0.0, 0.0]],
[[0.0, 0.0, 0.0], [0.0, 0.0, 0.03839], [0.0, 0.03839, 0.0]],
[[6.89822, 0.0, 0.0], [0.0, 6.89822, 0.0], [0.0, 0.0, 27.4628]],
]
def test_new(self):
pt = PiezoTensor(self.full_tensor_array)
self.assertArrayAlmostEqual(pt, self.full_tensor_array)
bad_dim_array = np.zeros((3, 3))
self.assertRaises(ValueError, PiezoTensor, bad_dim_array)
def test_from_voigt(self):
bad_voigt = np.zeros((3, 7))
pt = PiezoTensor.from_voigt(self.voigt_matrix)
self.assertArrayEqual(pt, self.full_tensor_array)
self.assertRaises(ValueError, PiezoTensor.from_voigt, bad_voigt)
self.assertArrayEqual(self.voigt_matrix, pt.voigt)
def test_from_vasp_voigt(self):
bad_voigt = np.zeros((3, 7))
pt = PiezoTensor.from_vasp_voigt(self.vasp_matrix)
self.assertArrayEqual(pt, self.full_tensor_array)
self.assertRaises(ValueError, PiezoTensor.from_voigt, bad_voigt)
self.assertArrayEqual(self.voigt_matrix, pt.voigt)
if __name__ == "__main__":
unittest.main()
| 31.521127 | 76 | 0.594281 | 331 | 2,238 | 3.809668 | 0.232628 | 0.137986 | 0.171293 | 0.183981 | 0.513085 | 0.478192 | 0.470262 | 0.470262 | 0.470262 | 0.470262 | 0 | 0.127108 | 0.258266 | 2,238 | 70 | 77 | 31.971429 | 0.63253 | 0.054066 | 0 | 0.192308 | 0 | 0 | 0.037951 | 0 | 0 | 0 | 0 | 0 | 0.153846 | 1 | 0.076923 | false | 0 | 0.096154 | 0 | 0.192308 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
0
| 1 |
8a1f6ceee24cfa74cb693e71048a38117f2ad54b
| 907 |
py
|
Python
|
base/admin.py
|
ExpertOfNone/expert_of_none
|
9ff4e4279a570712766546122c014c754f753485
|
[
"MIT"
] | null | null | null |
base/admin.py
|
ExpertOfNone/expert_of_none
|
9ff4e4279a570712766546122c014c754f753485
|
[
"MIT"
] | null | null | null |
base/admin.py
|
ExpertOfNone/expert_of_none
|
9ff4e4279a570712766546122c014c754f753485
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from base.models import Topic, Photo
class EONBaseAdmin(admin.ModelAdmin):
def get_changeform_initial_data(self, request):
initial = super().get_changeform_initial_data(request)
if 'add' in request.META['PATH_INFO']:
initial['created_by'] = request.user
initial['modified_by'] = request.user
return initial
def save_model(self, request, obj, form, change):
if not obj.created_by:
obj.created_by = request.user
return super().save_model(request, obj, form, change)
class TopicAdmin(EONBaseAdmin):
list_display = [
'name', 'parent_topic', 'top_level', 'modified_by', 'modified', 'created_by', 'created',
]
class PhotoAdmin(EONBaseAdmin):
# TODO Add Proper List Display
pass
admin.site.register(Topic, TopicAdmin)
admin.site.register(Photo, PhotoAdmin)
| 21.093023 | 96 | 0.675854 | 109 | 907 | 5.458716 | 0.458716 | 0.060504 | 0.065546 | 0.080672 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.216097 | 907 | 42 | 97 | 21.595238 | 0.83685 | 0.030871 | 0 | 0 | 0 | 0 | 0.107184 | 0 | 0 | 0 | 0 | 0.02381 | 0 | 1 | 0.095238 | false | 0.047619 | 0.095238 | 0 | 0.47619 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
0
| 1 |
8a2014bc56418a4e4967160efe3f9656c573b77f
| 1,432 |
py
|
Python
|
glue/__init__.py
|
HPLegion/glue
|
1843787ccb4de852dfe103ff58473da13faccf5f
|
[
"BSD-3-Clause"
] | 550 |
2015-01-08T13:51:06.000Z
|
2022-03-31T11:54:47.000Z
|
glue/__init__.py
|
HPLegion/glue
|
1843787ccb4de852dfe103ff58473da13faccf5f
|
[
"BSD-3-Clause"
] | 1,362 |
2015-01-03T19:15:52.000Z
|
2022-03-30T13:23:11.000Z
|
glue/__init__.py
|
HPLegion/glue
|
1843787ccb4de852dfe103ff58473da13faccf5f
|
[
"BSD-3-Clause"
] | 142 |
2015-01-08T13:08:00.000Z
|
2022-03-18T13:25:57.000Z
|
# Set up configuration variables
__all__ = ['custom_viewer', 'qglue', 'test']
import os
import sys
from pkg_resources import get_distribution, DistributionNotFound
try:
__version__ = get_distribution('glue-core').version
except DistributionNotFound:
__version__ = 'undefined'
from ._mpl_backend import MatplotlibBackendSetter
sys.meta_path.append(MatplotlibBackendSetter())
from glue.viewers.custom.helper import custom_viewer
# Load user's configuration file
from .config import load_configuration
env = load_configuration()
from .qglue import qglue
from .main import load_plugins # noqa
def test(no_optional_skip=False):
from pytest import main
root = os.path.abspath(os.path.dirname(__file__))
args = [root, '-x']
if no_optional_skip:
args.append('--no-optional-skip')
return main(args=args)
from glue._settings_helpers import load_settings
load_settings()
# In PyQt 5.5+, PyQt overrides the default exception catching and fatally
# crashes the Qt application without printing out any details about the error.
# Below we revert the exception hook to the original Python one. Note that we
# can't just do sys.excepthook = sys.__excepthook__ otherwise PyQt will detect
# the default excepthook is in place and override it.
def handle_exception(exc_type, exc_value, exc_traceback):
sys.__excepthook__(exc_type, exc_value, exc_traceback)
sys.excepthook = handle_exception
| 26.036364 | 78 | 0.775838 | 195 | 1,432 | 5.435897 | 0.528205 | 0.049057 | 0.039623 | 0.028302 | 0.075472 | 0.075472 | 0.075472 | 0.075472 | 0 | 0 | 0 | 0.001642 | 0.149441 | 1,432 | 54 | 79 | 26.518519 | 0.868637 | 0.293296 | 0 | 0 | 0 | 0 | 0.059821 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.074074 | false | 0 | 0.37037 | 0 | 0.481481 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 |
0
| 1 |
8a20b1d12635ada6c636b100e165021b86485320
| 2,854 |
py
|
Python
|
main.py
|
vkumarma/Complete-Interpreter
|
5ec15ea84b0e7e735328511cc504efa43638f720
|
[
"MIT"
] | null | null | null |
main.py
|
vkumarma/Complete-Interpreter
|
5ec15ea84b0e7e735328511cc504efa43638f720
|
[
"MIT"
] | null | null | null |
main.py
|
vkumarma/Complete-Interpreter
|
5ec15ea84b0e7e735328511cc504efa43638f720
|
[
"MIT"
] | null | null | null |
import re
import sys
class Lexer:
def __init__(self, inp_str):
self.index = 0
self.s = inp_str
def get_char(self):
if self.index < len(self.s):
var = self.s[self.index]
self.index += 1
return var
input_file = open(str(sys.argv[1]), 'r') # Open file for reading
line = input_file.read()
# "if z then while x * 4 - 2 do skip endwhile else x := 7 endif; y := 1"
input_string = line.strip("\n")
lexer = Lexer(input_string)
hashtable = {}
tokens_list = []
def token_check(input):
if re.fullmatch("if|then|else|endif|while|do|endwhile|skip", input):
hashtable[input] = "KEYWORD"
tokens_list.append(input)
elif re.search("([a-z]|[A-Z])([a-z]|[A-Z]|[0-9])*", input):
hashtable[input] = "IDENTIFIER"
tokens_list.append(input)
elif re.search("[0-9]+", input):
hashtable[input] = "NUMBER"
tokens_list.append(input)
elif re.fullmatch("\+|\-|\*|/|\(|\)|:=|;", input):
hashtable[input] = "SYMBOL"
tokens_list.append(input)
else:
hashtable[input] = "ERROR READING"
def digit(curr_char, lexer):
sub = ""
while (curr_char.isdigit()):
sub += curr_char
curr_char = lexer.get_char()
if curr_char == None:
break
new.append(curr_char)
return sub
def longest_sub_string(curr_char, lexer):
sub = ""
while (curr_char.isalpha() or curr_char.isdigit()):
sub += curr_char
curr_char = lexer.get_char()
if curr_char == None:
break
new.append(curr_char)
return sub
def symbol(curr_char, lexer):
# print(curr_char)
sym = curr_char
curr_char = lexer.get_char()
new.append(curr_char)
return sym
def assignment(curr_char, lexer):
sub = curr_char
next_char = lexer.get_char()
if next_char == "=":
sub += next_char
new.append(next_char)
return sub
new.append(lexer.get_char())
return sub
new = [] # keeping track of current char.
curr_char = lexer.get_char()
while (curr_char != None):
while (curr_char == ' ' or curr_char == ''):
curr_char = lexer.get_char()
if (curr_char.isdigit()):
token_check(digit(curr_char, lexer))
curr_char = new.pop()
elif (curr_char.isalpha()):
token_check(longest_sub_string(curr_char, lexer))
curr_char = new.pop()
elif curr_char in "+-/*();":
token_check(symbol(curr_char, lexer))
curr_char = new.pop()
elif curr_char == ":":
token_check(assignment(curr_char, lexer))
curr_char = new.pop()
if curr_char == "=":
curr_char = lexer.get_char()
else:
token_check(curr_char)
curr_char = lexer.get_char()
def tokens():
return hashtable
# print(tokens_list)
# print(tokens())
| 23.983193 | 72 | 0.590049 | 383 | 2,854 | 4.185379 | 0.211488 | 0.204616 | 0.121647 | 0.07985 | 0.46413 | 0.412352 | 0.355583 | 0.213974 | 0.213974 | 0.213974 | 0 | 0.005261 | 0.267344 | 2,854 | 118 | 73 | 24.186441 | 0.761358 | 0.061317 | 0 | 0.363636 | 0 | 0.011364 | 0.058757 | 0.035554 | 0 | 0 | 0 | 0 | 0 | 1 | 0.090909 | false | 0 | 0.022727 | 0.011364 | 0.204545 | 0 | 0 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
0
| 1 |
8a20dee928bb3a353769ebc5d7c40156ab5eb131
| 306 |
py
|
Python
|
deduplicate.py
|
Ghostofapacket/NewsGrabber-Deduplicate
|
0b8152af2e1c6c87cf8540970f42084b96a99d9c
|
[
"Unlicense"
] | null | null | null |
deduplicate.py
|
Ghostofapacket/NewsGrabber-Deduplicate
|
0b8152af2e1c6c87cf8540970f42084b96a99d9c
|
[
"Unlicense"
] | null | null | null |
deduplicate.py
|
Ghostofapacket/NewsGrabber-Deduplicate
|
0b8152af2e1c6c87cf8540970f42084b96a99d9c
|
[
"Unlicense"
] | null | null | null |
import sys
sys.path.append('/usr/local/lib/python3.4/site-packages/')
from warc_dedup import deduplicate
def main():
if len(sys.argv) == 1:
raise Exception('Please provide the WARC file as argument.')
deduplicate.Warc(*sys.argv[1:]).deduplicate()
if __name__ == '__main__':
main()
| 20.4 | 68 | 0.679739 | 43 | 306 | 4.627907 | 0.697674 | 0.070352 | 0.080402 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.015748 | 0.169935 | 306 | 14 | 69 | 21.857143 | 0.767717 | 0 | 0 | 0 | 0 | 0 | 0.288525 | 0.127869 | 0 | 0 | 0 | 0 | 0 | 1 | 0.111111 | true | 0 | 0.222222 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 |
0
| 1 |
8a29eefe067ae42942e4915562e64419af3d1cde
| 950 |
py
|
Python
|
scripts_python3/exchange/deleteExchange.py
|
bcvsolutions/winrm-ad-connector
|
9b45dae78d3ba24fe6b00e090f8763d3162e1570
|
[
"Apache-2.0"
] | null | null | null |
scripts_python3/exchange/deleteExchange.py
|
bcvsolutions/winrm-ad-connector
|
9b45dae78d3ba24fe6b00e090f8763d3162e1570
|
[
"Apache-2.0"
] | 2 |
2020-05-27T07:15:28.000Z
|
2020-12-17T05:22:54.000Z
|
scripts_python3/exchange/deleteExchange.py
|
bcvsolutions/winrm-ad-connector
|
9b45dae78d3ba24fe6b00e090f8763d3162e1570
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# All params from IdM is stored in environment and you can get them by os.environ["paramName"]
import sys, os
# this is needed for importing file winrm_wrapper from parent dir
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
import winrm_wrapper
import codecs
uid = os.environ["__UID__"]
winrm_wrapper.writeLog("Delete start for " + uid)
# Load PS script from file and replace params
winrm_wrapper.writeLog("loading script")
f = codecs.open(os.environ["script"], encoding='utf-8', mode='r')
command = f.read()
command = command.replace("$uid", uid)
# Call wrapper
winrm_wrapper.executeScript(os.environ["endpoint"], os.environ["authentication"], os.environ["user"],
os.environ["password"], os.environ["caTrustPath"], os.environ["ignoreCaValidation"], command, uid)
winrm_wrapper.writeLog("Delete end for " + uid)
print("__UID__=" + uid)
sys.exit()
| 35.185185 | 134 | 0.705263 | 133 | 950 | 4.902256 | 0.503759 | 0.124233 | 0.092025 | 0.070552 | 0.088957 | 0 | 0 | 0 | 0 | 0 | 0 | 0.00369 | 0.144211 | 950 | 26 | 135 | 36.538462 | 0.798278 | 0.270526 | 0 | 0 | 0 | 0 | 0.206696 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0.066667 | 0.2 | 0 | 0.2 | 0.066667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 |
0
| 1 |
8a2ac410faa6645af8d41c21c8f5834684cf1a20
| 2,152 |
py
|
Python
|
tests/registry_test.py
|
Walon1998/dace
|
95ddfd3e9a5c654f0f0d66d026e0b64ec0f028a0
|
[
"BSD-3-Clause"
] | 1 |
2022-03-11T13:36:34.000Z
|
2022-03-11T13:36:34.000Z
|
tests/registry_test.py
|
Walon1998/dace
|
95ddfd3e9a5c654f0f0d66d026e0b64ec0f028a0
|
[
"BSD-3-Clause"
] | null | null | null |
tests/registry_test.py
|
Walon1998/dace
|
95ddfd3e9a5c654f0f0d66d026e0b64ec0f028a0
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright 2019-2021 ETH Zurich and the DaCe authors. All rights reserved.
import unittest
from aenum import Enum, auto
from dace import registry
@registry.make_registry
class ExtensibleClass(object):
pass
class Extension(ExtensibleClass):
pass
@registry.extensible_enum
class ExtensibleEnumeration(Enum):
a = auto()
b = auto()
class RegistryTests(unittest.TestCase):
def test_class_registry(self):
ExtensibleClass.register(Extension)
self.assertTrue(Extension in ExtensibleClass.extensions())
ExtensibleClass.unregister(Extension)
self.assertTrue(Extension not in ExtensibleClass.extensions())
def test_autoregister(self):
@registry.autoregister
class Extension2(ExtensibleClass):
pass
self.assertTrue(Extension2 in ExtensibleClass.extensions())
def test_class_registry_args(self):
ExtensibleClass.register(Extension, a=True, b=1, c=2)
self.assertTrue(Extension in ExtensibleClass.extensions())
self.assertEqual(ExtensibleClass.extensions()[Extension], dict(a=True, b=1, c=2))
ExtensibleClass.unregister(Extension)
self.assertTrue(Extension not in ExtensibleClass.extensions())
def test_autoregister_args(self):
@registry.autoregister_params(a=False, b=0)
class Extension3(ExtensibleClass):
pass
self.assertTrue(Extension3 in ExtensibleClass.extensions())
self.assertEqual(ExtensibleClass.extensions()[Extension3], dict(a=False, b=0))
def test_autoregister_fail(self):
with self.assertRaises(TypeError):
@registry.autoregister
class Extension4(object):
pass
def test_enum_registry(self):
ExtensibleEnumeration.register('c')
self.assertTrue(ExtensibleEnumeration.c in ExtensibleEnumeration)
self.assertEqual(ExtensibleEnumeration.c.value, 3)
def test_enum_registry_fail(self):
with self.assertRaises(TypeError):
@registry.extensible_enum
class NotAnEnum(object):
pass
if __name__ == '__main__':
unittest.main()
| 29.479452 | 89 | 0.697955 | 220 | 2,152 | 6.704545 | 0.290909 | 0.135593 | 0.109831 | 0.065085 | 0.380339 | 0.357288 | 0.295593 | 0.143729 | 0.143729 | 0.143729 | 0 | 0.012419 | 0.214219 | 2,152 | 72 | 90 | 29.888889 | 0.859846 | 0.033922 | 0 | 0.352941 | 0 | 0 | 0.004333 | 0 | 0 | 0 | 0 | 0 | 0.235294 | 1 | 0.137255 | false | 0.117647 | 0.058824 | 0 | 0.392157 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 |
0
| 1 |
8a2f400a7655554fbc57b5f622cd3afad8069e45
| 427 |
py
|
Python
|
gcp-python-fn/main.py
|
FuriKuri/faas-playground
|
52618e21064e327d2874d2b73cfe5fb247d3dd6e
|
[
"MIT"
] | 1 |
2019-05-07T13:15:16.000Z
|
2019-05-07T13:15:16.000Z
|
gcp-python-fn/main.py
|
FuriKuri/faas-playground
|
52618e21064e327d2874d2b73cfe5fb247d3dd6e
|
[
"MIT"
] | null | null | null |
gcp-python-fn/main.py
|
FuriKuri/faas-playground
|
52618e21064e327d2874d2b73cfe5fb247d3dd6e
|
[
"MIT"
] | null | null | null |
def hello_world(request):
request_json = request.get_json()
name = 'World'
if request_json and 'name' in request_json:
name = request_json['name']
headers = {
'Access-Control-Allow-Origin': 'https://furikuri.net',
'Access-Control-Allow-Methods': 'GET, POST',
'Access-Control-Allow-Headers': 'Content-Type'
}
return ('Hello ' + name + '! From GCP + Python', 200, headers)
| 35.583333 | 66 | 0.620609 | 51 | 427 | 5.078431 | 0.529412 | 0.169884 | 0.208494 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.009091 | 0.227166 | 427 | 11 | 67 | 38.818182 | 0.775758 | 0 | 0 | 0 | 0 | 0 | 0.379391 | 0.194379 | 0 | 0 | 0 | 0 | 0 | 1 | 0.090909 | false | 0 | 0 | 0 | 0.181818 | 0 | 0 | 0 | 0 | null | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
0
| 1 |
8a3245f4587a32c402e78f398ab94bc52ef0cf9a
| 780 |
py
|
Python
|
PaddleOCR/deploy/hubserving/ocr_det/params.py
|
TangJiamin/Ultra_light_OCR_No.23
|
594aa286dc2f88614141838ce45c164647226cdb
|
[
"Apache-2.0"
] | null | null | null |
PaddleOCR/deploy/hubserving/ocr_det/params.py
|
TangJiamin/Ultra_light_OCR_No.23
|
594aa286dc2f88614141838ce45c164647226cdb
|
[
"Apache-2.0"
] | null | null | null |
PaddleOCR/deploy/hubserving/ocr_det/params.py
|
TangJiamin/Ultra_light_OCR_No.23
|
594aa286dc2f88614141838ce45c164647226cdb
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding:utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
class Config(object):
pass
def read_params():
cfg = Config()
#params for text detector
cfg.det_algorithm = "DB"
cfg.det_model_dir = "./inference/ch_ppocr_mobile_v2.0_det_infer/"
cfg.det_limit_side_len = 960
cfg.det_limit_type = 'max'
#DB parmas
cfg.det_db_thresh = 0.3
cfg.det_db_box_thresh = 0.5
cfg.det_db_unclip_ratio = 1.6
cfg.use_dilation = False
# #EAST parmas
# cfg.det_east_score_thresh = 0.8
# cfg.det_east_cover_thresh = 0.1
# cfg.det_east_nms_thresh = 0.2
cfg.use_pdserving = False
cfg.use_tensorrt = False
return cfg
| 22.285714 | 70 | 0.661538 | 116 | 780 | 4.034483 | 0.508621 | 0.128205 | 0.102564 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.030822 | 0.251282 | 780 | 34 | 71 | 22.941176 | 0.770548 | 0.203846 | 0 | 0 | 0 | 0 | 0.083045 | 0.074394 | 0 | 0 | 0 | 0 | 0 | 1 | 0.055556 | false | 0.055556 | 0.166667 | 0 | 0.333333 | 0.055556 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 |
0
| 1 |
8a3c0f03126e25cbd17946a5a7c81e22d30b3f4d
| 821 |
py
|
Python
|
palm_tree/coconut_1/models.py
|
m-hintz-42/a-palm-tree
|
57656874335f4dfae13cf720668f2c5391621618
|
[
"MIT"
] | null | null | null |
palm_tree/coconut_1/models.py
|
m-hintz-42/a-palm-tree
|
57656874335f4dfae13cf720668f2c5391621618
|
[
"MIT"
] | null | null | null |
palm_tree/coconut_1/models.py
|
m-hintz-42/a-palm-tree
|
57656874335f4dfae13cf720668f2c5391621618
|
[
"MIT"
] | null | null | null |
from palm_tree import db
class Data(db.Model):
id = db.Column(db.Integer, primary_key=True)
uuid = db.Column(db.Integer)
response = db.Column(db.Text)
datetime = db.Column(db.DateTime)
def __init__(self, uuid, response, datetime):
self.uuid = uuid
self.response = response
self.datetime = datetime
def __repr__(self):
return '<Data %r>' % self.response
#
# class Logs(db.Model):
# id = db.Column(db.Integer, primary_key=True)
# uuid = db.Column(db.Integer)
# payload = db.Column(db.Text)
# datetime = db.Column(db.DateTime)
#
# def __init__(self, uuid, payload, datetime):
# self.uuid = uuid
# self.payload = payload
# self.datetime = datetime
#
# def __repr__(self):
# return '<Data %r>' % self.payload
| 25.65625 | 50 | 0.613886 | 105 | 821 | 4.619048 | 0.247619 | 0.131959 | 0.164948 | 0.140206 | 0.76701 | 0.668041 | 0.668041 | 0.668041 | 0.668041 | 0.668041 | 0 | 0 | 0.252132 | 821 | 31 | 51 | 26.483871 | 0.789902 | 0.460414 | 0 | 0 | 0 | 0 | 0.021028 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.166667 | false | 0 | 0.083333 | 0.083333 | 0.75 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
0
| 1 |
8a3cfa083e8e8e57b0bc63b2f6a4954146234e99
| 6,995 |
py
|
Python
|
Chest X-Ray Multilabel Image classification using CNN - Pytorch/Arch2.py
|
farzanaaswin0708/CNN-for-Visual-recognition
|
db65db0a0b60e1ed2a4a418069de61936aaa9e85
|
[
"MIT"
] | null | null | null |
Chest X-Ray Multilabel Image classification using CNN - Pytorch/Arch2.py
|
farzanaaswin0708/CNN-for-Visual-recognition
|
db65db0a0b60e1ed2a4a418069de61936aaa9e85
|
[
"MIT"
] | null | null | null |
Chest X-Ray Multilabel Image classification using CNN - Pytorch/Arch2.py
|
farzanaaswin0708/CNN-for-Visual-recognition
|
db65db0a0b60e1ed2a4a418069de61936aaa9e85
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# coding: utf-8
# In[ ]:
################################################################################
# CSE 253: Programming Assignment 3
# Winter 2019
# Code author: Jenny Hamer (+ modifications by Tejash Desai)
#
# Filename: baseline_cnn.py
#
# Description:
#
# This file contains the starter code for the baseline architecture you will use
# to get a little practice with PyTorch and compare the results of with your
# improved architecture.
#
# Be sure to fill in the code in the areas marked #TODO.
################################################################################
# PyTorch and neural network imports
import torch
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.functional as func
import torch.nn.init as torch_init
import torch.optim as optim
# Data utils and dataloader
import torchvision
from torchvision import transforms, utils
from xray_dataloader_zscored import ChestXrayDataset, create_split_loaders
import matplotlib.pyplot as plt
import numpy as np
import os
class Arch2CNN(nn.Module):
"""
<<<<<<< HEAD
conv1 -> maxpool -> conv2 -> maxpool -> conv3 -> conv4 ->maxpool -> conv5 -> conv6 -> maxpool -> conv7 -> conv8 -> maxpool -> fc1 -> fc2 -> fc3 (outputs)
=======
conv1 -> conv2 -> maxpool -> conv3 -> conv4 -> conv5 -> maxpool -> fc1 -> fc2 -> fc3 (outputs)
>>>>>>> 6652e3cfb72835ac4a7c802c9a703b59d5f63ae6
"""
def __init__(self):
super(Arch2CNN, self).__init__()
# conv1: 1 input channel, 4 output channels, [3x3] kernel size
self.conv1 = nn.Conv2d(in_channels=1, out_channels=4, kernel_size=3)
# Add batch-normalization to the outputs of conv1
self.conv1_normed = nn.BatchNorm2d(4)
# Initialized weights using the Xavier-Normal method
torch_init.xavier_normal_(self.conv1.weight)
self.pool1 = nn.MaxPool2d(kernel_size=3, stride=1)
#TODO: Fill in the remaining initializations replacing each '_' with
# the necessary value based on the provided specs for each layer
#TODO: conv2: 4 input channels, 8 output channels, [3x3] kernel, initialization: xavier
self.conv2 = nn.Conv2d(in_channels=4, out_channels=8, kernel_size=3)
self.conv2_normed = nn.BatchNorm2d(8)
torch_init.xavier_normal_(self.conv2.weight)
#Maxpool
self.pool2 = nn.MaxPool2d(kernel_size=3, stride=1)
#TODO: conv3: X input channels, 12 output channels, [8x8] kernel, initialization: xavier
self.conv3 = nn.Conv2d(in_channels=8, out_channels=16, kernel_size=3)
self.conv3_normed = nn.BatchNorm2d(16)
torch_init.xavier_normal_(self.conv3.weight)
#TODO: conv4: X input channels, 10 output channels, [6x6] kernel, initialization: xavier
self.conv4 = nn.Conv2d(in_channels=16, out_channels=16, kernel_size=3)
self.conv4_normed = nn.BatchNorm2d(16)
torch_init.xavier_normal_(self.conv4.weight)
self.pool3 = nn.MaxPool2d(kernel_size=3, stride=1)
#TODO: conv5: X input channels, 8 output channels, [5x5] kernel, initialization: xavier
self.conv5 = nn.Conv2d(in_channels=16, out_channels=8, kernel_size=3)
self.conv5_normed = nn.BatchNorm2d(8)
torch_init.xavier_normal_(self.conv5.weight)
self.conv6 = nn.Conv2d(in_channels=8, out_channels=8, kernel_size=3)
self.conv6_normed = nn.BatchNorm2d(8)
torch_init.xavier_normal_(self.conv6.weight)
self.pool4 = nn.MaxPool2d(kernel_size=3, stride=1)
#TODO: Apply max-pooling with a [3x3] kernel using tiling (*NO SLIDING WINDOW*)
self.conv7 = nn.Conv2d(in_channels=8, out_channels=8, kernel_size=3)
self.conv7_normed = nn.BatchNorm2d(8)
torch_init.xavier_normal_(self.conv7.weight)
self.conv8 = nn.Conv2d(in_channels=8, out_channels=8, kernel_size=3)
self.conv8_normed = nn.BatchNorm2d(8)
torch_init.xavier_normal_(self.conv8.weight)
self.pool5 = nn.MaxPool2d(kernel_size=4, stride=4)
# Define 2 fully connected layers:
#TODO: fc1
self.fc1 = nn.Linear(in_features=122*122*8, out_features=512)
self.fc1_normed = nn.BatchNorm1d(512)
torch_init.xavier_normal_(self.fc1.weight)
#TODO: fc2
self.fc2 = nn.Linear(in_features=512, out_features=128)
self.fc2_normed = nn.BatchNorm1d(128)
torch_init.xavier_normal_(self.fc2.weight)
#TODO: fc3
self.fc3 = nn.Linear(in_features=128, out_features=14)
torch_init.xavier_normal_(self.fc3.weight)
#TODO: Output layer: what should out_features be?
self.out_features = 14
def forward(self, batch):
"""Pass the batch of images through each layer of the network, applying
non-linearities after each layer.
Note that this function *needs* to be called "forward" for PyTorch to
automagically perform the forward pass.
Params:
-------
- batch: (Tensor) An input batch of images
Returns:
--------
- logits: (Variable) The output of the network
"""
# Apply first convolution, followed by ReLU non-linearity;
# use batch-normalization on its outputs
batch = func.rrelu(self.conv1_normed(self.conv1(batch)))
batch = self.pool1(batch)
# Apply conv2 and conv3 similarly
batch = func.rrelu(self.conv2_normed(self.conv2(batch)))
batch = self.pool2(batch)
batch = func.rrelu(self.conv3_normed(self.conv3(batch)))
batch = func.rrelu(self.conv4_normed(self.conv4(batch)))
batch = self.pool3(batch)
batch = func.rrelu(self.conv5_normed(self.conv5(batch)))
batch = func.rrelu(self.conv6_normed(self.conv6(batch)))
# Pass the output of conv3 to the pooling layer
batch = self.pool4(batch)
batch = func.rrelu(self.conv7_normed(self.conv7(batch)))
batch = func.rrelu(self.conv8_normed(self.conv8(batch)))
# Pass the output of conv3 to the pooling layer
batch = self.pool5(batch)
# Reshape the output of the conv3 to pass to fully-connected layer
batch = batch.view(-1, self.num_flat_features(batch))
# Connect the reshaped features of the pooled conv3 to fc1
batch = func.rrelu(self.fc1_normed(self.fc1(batch)))
batch = func.rrelu(self.fc2_normed(self.fc2(batch)))
# Connect fc1 to fc2 - this layer is slightly different than the rest (why?)
batch = self.fc3(batch)
# Return the class predictions
#TODO: apply an activition function to 'batch'
#batch = func.sigmoid(batch)
return batch
def num_flat_features(self, inputs):
# Get the dimensions of the layers excluding the inputs
size = inputs.size()[1:]
# Track the number of features
num_features = 1
for s in size:
num_features *= s
return num_features
| 36.623037 | 157 | 0.653324 | 927 | 6,995 | 4.809061 | 0.248112 | 0.031404 | 0.02961 | 0.051817 | 0.27389 | 0.192463 | 0.192463 | 0.154554 | 0.124944 | 0.053836 | 0 | 0.048191 | 0.225733 | 6,995 | 190 | 158 | 36.815789 | 0.774926 | 0.368692 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.015789 | 0 | 1 | 0.038462 | false | 0 | 0.153846 | 0 | 0.230769 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
0
| 1 |
8a3f2203d02e338bbadd1c557a7d415e6e39dbbc
| 379 |
py
|
Python
|
src/temp2.py
|
FabBrolMons/frbayart
|
c2b9dde730cf6d21f1c1492d0da0351c12a4dce9
|
[
"MIT"
] | null | null | null |
src/temp2.py
|
FabBrolMons/frbayart
|
c2b9dde730cf6d21f1c1492d0da0351c12a4dce9
|
[
"MIT"
] | null | null | null |
src/temp2.py
|
FabBrolMons/frbayart
|
c2b9dde730cf6d21f1c1492d0da0351c12a4dce9
|
[
"MIT"
] | null | null | null |
from w1thermsensor import W1ThermSensor
sensor = W1ThermSensor()
temperature_in_celsius = sensor.get_temperature()
temperature_in_fahrenheit = sensor.get_temperature(W1ThermSensor.DEGREES_F)
temperature_in_all_units = sensor.get_temperatures([W1ThermSensor.DEGREES_C, W1ThermSensor.DEGREES_F, W1ThermSensor.KELVIN])
print("Sensor id:" + sensor.id)
print(temperature_in_celsius)
| 42.111111 | 124 | 0.852243 | 45 | 379 | 6.844444 | 0.4 | 0.168831 | 0.12987 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.019718 | 0.063325 | 379 | 8 | 125 | 47.375 | 0.847887 | 0 | 0 | 0 | 0 | 0 | 0.026385 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.142857 | 0 | 0.142857 | 0.285714 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
0
| 1 |
8a406525f88287f3d13cd5aee631ef0cc809c7ec
| 247 |
py
|
Python
|
src/reportlab/graphics/charts/__init__.py
|
kokinomura/reportlab
|
18e39b85d7277c2b5e9218b30a7b7b0a644a3c02
|
[
"BSD-3-Clause"
] | 52 |
2016-09-30T05:53:45.000Z
|
2021-12-26T12:07:48.000Z
|
src/reportlab/graphics/charts/__init__.py
|
kokinomura/reportlab
|
18e39b85d7277c2b5e9218b30a7b7b0a644a3c02
|
[
"BSD-3-Clause"
] | 31 |
2017-01-05T06:07:28.000Z
|
2018-05-27T13:13:06.000Z
|
src/reportlab/graphics/charts/__init__.py
|
kokinomura/reportlab
|
18e39b85d7277c2b5e9218b30a7b7b0a644a3c02
|
[
"BSD-3-Clause"
] | 15 |
2016-11-03T08:50:15.000Z
|
2022-01-14T07:04:35.000Z
|
#Copyright ReportLab Europe Ltd. 2000-2016
#see license.txt for license details
#history http://www.reportlab.co.uk/cgi-bin/viewcvs.cgi/public/reportlab/trunk/reportlab/graphics/charts/__init__.py
__version__='3.3.0'
__doc__='''Business charts'''
| 41.166667 | 116 | 0.793522 | 37 | 247 | 4.972973 | 0.810811 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.047414 | 0.060729 | 247 | 5 | 117 | 49.4 | 0.74569 | 0.773279 | 0 | 0 | 0 | 0 | 0.377358 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
0
| 1 |
8a44052cfce16663b8820adca1028bccdfa9a1aa
| 438 |
py
|
Python
|
CodeForces/A2OJ Ladder/softuni_problem.py
|
dimitrov-dimitar/competitive-programming
|
f2b022377baf6d4beff213fc513907b774c12352
|
[
"MIT"
] | null | null | null |
CodeForces/A2OJ Ladder/softuni_problem.py
|
dimitrov-dimitar/competitive-programming
|
f2b022377baf6d4beff213fc513907b774c12352
|
[
"MIT"
] | null | null | null |
CodeForces/A2OJ Ladder/softuni_problem.py
|
dimitrov-dimitar/competitive-programming
|
f2b022377baf6d4beff213fc513907b774c12352
|
[
"MIT"
] | null | null | null |
total_budget = 0
while True:
destination = input()
if destination == "End":
break
minimal_budget = float(input())
while True:
command = input()
if command == "End":
break
money = float(command)
total_budget += money
if total_budget >= minimal_budget:
print(f"Going to {destination}!")
total_budget = 0
break
| 24.333333 | 46 | 0.513699 | 43 | 438 | 5.093023 | 0.418605 | 0.200913 | 0.109589 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.007547 | 0.394977 | 438 | 17 | 47 | 25.764706 | 0.818868 | 0 | 0 | 0.4375 | 0 | 0 | 0.068884 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.0625 | 0 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
0
| 1 |
8a44b11af8b2eb998e8acb85624cce72fd9e4d1c
| 303 |
py
|
Python
|
exercicios/ex 061 a 070/ex061.py
|
CarlosWillian/python
|
f863578245fbf402e5b46f844a247355afed0d62
|
[
"MIT"
] | null | null | null |
exercicios/ex 061 a 070/ex061.py
|
CarlosWillian/python
|
f863578245fbf402e5b46f844a247355afed0d62
|
[
"MIT"
] | null | null | null |
exercicios/ex 061 a 070/ex061.py
|
CarlosWillian/python
|
f863578245fbf402e5b46f844a247355afed0d62
|
[
"MIT"
] | null | null | null |
print('Crie sua P.A. de 10 termos')
n1 = int(input('Digite o primeiro termo da P.A.: '))
r = int(input('Digite a razão: '))
termo = n1
c = 1
print('A P.A. é (', end='')
while c <= 10:
print('{}'.format(termo), end='')
print(', ' if c < 10 else '', end='')
termo += r
c += 1
print(')')
| 20.2 | 52 | 0.518152 | 52 | 303 | 3.019231 | 0.5 | 0.038217 | 0.178344 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.04329 | 0.237624 | 303 | 14 | 53 | 21.642857 | 0.636364 | 0 | 0 | 0 | 0 | 0 | 0.298013 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.416667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
0
| 1 |
8a44d6f6124cbf59eb9c835f08ecb56f0d9adf5a
| 737 |
py
|
Python
|
PythonBasics/ConditionalStatements/Exercise/toy_shop.py
|
achoraev/SoftUni
|
0cc7db470a096cc33bbe0ca6bd90060b79120573
|
[
"Apache-2.0"
] | null | null | null |
PythonBasics/ConditionalStatements/Exercise/toy_shop.py
|
achoraev/SoftUni
|
0cc7db470a096cc33bbe0ca6bd90060b79120573
|
[
"Apache-2.0"
] | null | null | null |
PythonBasics/ConditionalStatements/Exercise/toy_shop.py
|
achoraev/SoftUni
|
0cc7db470a096cc33bbe0ca6bd90060b79120573
|
[
"Apache-2.0"
] | null | null | null |
price = float(input())
puzzles = int(input())
dolls = int(input())
bears = int(input())
minions = int(input())
trucks = int(input())
total_toys = puzzles + dolls + bears + minions + trucks
price_puzzles = puzzles * 2.6
price_dolls = dolls * 3
price_bears = bears * 4.1
price_minions = minions * 8.2
price_trucks = trucks * 2
total_price = price_puzzles + price_dolls + price_bears + price_minions + price_trucks
if total_toys >= 50:
total_price = total_price - (total_price * 0.25)
rent = total_price * 0.1
total_price = total_price - rent
if total_price >= price:
print(f"Yes! {(total_price - price):.2f} lv left.")
else:
print(f"Not enough money! {(price - total_price):.2f} lv needed.")
| 25.413793 | 87 | 0.662144 | 107 | 737 | 4.35514 | 0.308411 | 0.214592 | 0.128755 | 0.128755 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.02906 | 0.206242 | 737 | 28 | 88 | 26.321429 | 0.767521 | 0 | 0 | 0 | 0 | 0 | 0.136812 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.095238 | 0 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
0
| 1 |
8a455ca53b609476797038c96b21d969bbdf51e3
| 2,234 |
py
|
Python
|
bookshelf/main/forms.py
|
thewordisbird/bookshelf
|
5166720bdc0dbffedc14b71b0f75ad78dc69b465
|
[
"MIT"
] | null | null | null |
bookshelf/main/forms.py
|
thewordisbird/bookshelf
|
5166720bdc0dbffedc14b71b0f75ad78dc69b465
|
[
"MIT"
] | null | null | null |
bookshelf/main/forms.py
|
thewordisbird/bookshelf
|
5166720bdc0dbffedc14b71b0f75ad78dc69b465
|
[
"MIT"
] | null | null | null |
import datetime
from flask_wtf import FlaskForm
from wtforms import (
StringField,
TextAreaField,
DateTimeField,
HiddenField,
PasswordField,
)
from wtforms.validators import DataRequired, ValidationError, Email, EqualTo
class NullableDateTimeField(DateTimeField):
"""Modify DateField to allow for Null values"""
def process_formdata(self, valuelist):
# Bypasses wtForms validation for blank datetime field.
if valuelist:
date_str = " ".join(valuelist).strip()
if date_str == "":
self.data = None
return
try:
self.data = datetime.datetime.strptime(date_str, self.format)
except ValueError:
self.data = None
raise ValueError(self.gettext("Not a valid date value"))
class SearchForm(FlaskForm):
search = StringField("Search", validators=[DataRequired()])
class ReviewForm(FlaskForm):
rating = HiddenField("Rating", validators=[DataRequired()])
review_title = StringField("Headline")
review_content = TextAreaField("Review")
date_started = NullableDateTimeField("Date Started", format="%m/%d/%Y")
date_finished = NullableDateTimeField("Date Finished", format="%m/%d/%Y")
def validate_date_finished(self, date_finished):
if self.date_started.data and date_finished.data:
if self.date_started.data > date_finished.data:
print("Date finished must be greater than or equal to date started")
raise ValidationError(
"Date finished must be greater than or equal to date started."
)
elif self.date_started.data or date_finished.data:
print("missing date")
raise ValidationError("If setting read dates, both dates are required.")
class EditProfileForm(FlaskForm):
display_name = StringField("Name", validators=[])
email = StringField("Email", validators=[Email(message="Invalid Email Address.")])
password = PasswordField(
"Password",
validators=[EqualTo("confirm_password", message="Passwords must match.")],
)
confirm_password = PasswordField("Confirm Password", validators=[])
| 36.032258 | 86 | 0.658013 | 230 | 2,234 | 6.3 | 0.408696 | 0.074534 | 0.031056 | 0.039337 | 0.096618 | 0.067633 | 0.067633 | 0.067633 | 0.067633 | 0.067633 | 0 | 0 | 0.243957 | 2,234 | 61 | 87 | 36.622951 | 0.857904 | 0.042972 | 0 | 0.041667 | 0 | 0 | 0.168856 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.041667 | false | 0.104167 | 0.083333 | 0 | 0.4375 | 0.041667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 |
0
| 1 |
8a490933d8b95e96a7ba4163aae03b0fe0c37be5
| 657 |
py
|
Python
|
pytorch-frontend/tools/code_coverage/oss_coverage.py
|
AndreasKaratzas/stonne
|
2915fcc46cc94196303d81abbd1d79a56d6dd4a9
|
[
"MIT"
] | 40 |
2021-06-01T07:37:59.000Z
|
2022-03-25T01:42:09.000Z
|
pytorch-frontend/tools/code_coverage/oss_coverage.py
|
AndreasKaratzas/stonne
|
2915fcc46cc94196303d81abbd1d79a56d6dd4a9
|
[
"MIT"
] | 14 |
2021-06-01T11:52:46.000Z
|
2022-03-25T02:13:08.000Z
|
pytorch-frontend/tools/code_coverage/oss_coverage.py
|
AndreasKaratzas/stonne
|
2915fcc46cc94196303d81abbd1d79a56d6dd4a9
|
[
"MIT"
] | 7 |
2021-07-20T19:34:26.000Z
|
2022-03-13T21:07:36.000Z
|
#!/usr/bin/env python
import time
from package.oss.cov_json import get_json_report
from package.oss.init import initialization
from package.tool.summarize_jsons import summarize_jsons
from package.util.setting import TestPlatform
def report_coverage() -> None:
start_time = time.time()
(options, test_list, interested_folders) = initialization()
# run cpp tests
get_json_report(test_list, options)
# collect coverage data from json profiles
if options.need_summary:
summarize_jsons(
test_list, interested_folders, [""], TestPlatform.OSS, start_time
)
if __name__ == "__main__":
report_coverage()
| 27.375 | 77 | 0.73516 | 83 | 657 | 5.506024 | 0.493976 | 0.09628 | 0.061269 | 0.109409 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.182648 | 657 | 23 | 78 | 28.565217 | 0.851024 | 0.114155 | 0 | 0 | 0 | 0 | 0.013817 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.066667 | false | 0 | 0.333333 | 0 | 0.4 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 |
0
| 1 |
8a497075ae36fc35a089004f84ef24d85e09ec1c
| 401 |
py
|
Python
|
groupthink/version.py
|
emanuelfeld/groupthink
|
d8a6f666080352d396b07096cbd6304391f7c38d
|
[
"CC0-1.0"
] | 1 |
2017-01-09T17:27:05.000Z
|
2017-01-09T17:27:05.000Z
|
groupthink/version.py
|
emanuelfeld/groupthink
|
d8a6f666080352d396b07096cbd6304391f7c38d
|
[
"CC0-1.0"
] | null | null | null |
groupthink/version.py
|
emanuelfeld/groupthink
|
d8a6f666080352d396b07096cbd6304391f7c38d
|
[
"CC0-1.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# This file is part of groupthink.
# https://github.com/emanuelfeld/groupthink
# This project is in the public domain within the United States.
# Additionally, the Government of the District of Columbia waives
# copyright and related rights in the work worldwide through the CC0 1.0
# Universal public domain dedication.
__version__ = '1.0.0' # NOQA
| 28.642857 | 72 | 0.743142 | 60 | 401 | 4.9 | 0.733333 | 0.034014 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.020896 | 0.164589 | 401 | 13 | 73 | 30.846154 | 0.856716 | 0.887781 | 0 | 0 | 0 | 0 | 0.138889 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
0
| 1 |
8a5581cd0e7ff399dcb5faaf23430dc8e5e4058e
| 4,370 |
py
|
Python
|
figure_code/rate_of_change_tc.py
|
DavisWeaver/fears
|
857cb959a3a111a41df4cf62c4c6a19d3abd33c0
|
[
"MIT"
] | null | null | null |
figure_code/rate_of_change_tc.py
|
DavisWeaver/fears
|
857cb959a3a111a41df4cf62c4c6a19d3abd33c0
|
[
"MIT"
] | null | null | null |
figure_code/rate_of_change_tc.py
|
DavisWeaver/fears
|
857cb959a3a111a41df4cf62c4c6a19d3abd33c0
|
[
"MIT"
] | 1 |
2021-11-09T14:42:01.000Z
|
2021-11-09T14:42:01.000Z
|
import matplotlib.pyplot as plt
import numpy as np
from fears.utils import results_manager, plotter, dir_manager
import os
suffix = '07212021_0001'
data_folder = 'results_' + suffix
exp_info_file = 'experiment_info_' + suffix + '.p'
exp_folders,exp_info = results_manager.get_experiment_results(data_folder,
exp_info_file)
max_cells = exp_info.populations[0].max_cells
n_sims = exp_info.n_sims
k_abs = exp_info.slopes
exp_folders.reverse()
k_abs = np.flip(k_abs)
fig,ax = plt.subplots(nrows=2,ncols=2,figsize=(4,4))
pop = exp_info.populations[0]
ax = ax.reshape((len(k_abs),))
axnum = 0
tc_axes=[]
drug_axes=[]
for exp in exp_folders:
k_abs_t = exp[exp.find('=')+1:]
k_abs_t = float(k_abs_t)
num = np.argwhere(k_abs == k_abs_t)
num = num[0,0]
# generate timecourse axes
tcax = ax[axnum]
# da = tcax.twinx()
sim_files = os.listdir(path=exp)
sim_files = sorted(sim_files)
survive_count = 0
counts_total = None
k=0
while k < len(sim_files):
# for sim in sim_files:
sim = sim_files[k]
sim = exp + os.sep + sim
data = results_manager.get_data(sim)
dc = data[:,-1]
data = data[:,0:-1]
# data = data/np.max(data)
data_t = data[-1,:]
# check to see if any genotypes are at least 10% of the max cell count
if any(data_t >= 1):
survive_count += 1
if counts_total is None:
counts_total = data
else:
counts_total += data
# data = data/np.max(data)
# exp_info.populations[num].counts_log_scale = True
data = data/max_cells
if k==0:
drug_kwargs = {'alpha':0.7,
'color':'black',
'linewidth':2,
'label':'Drug Concentration ($\u03BC$M)'
}
tcax,drug_ax = plotter.plot_timecourse_to_axes(exp_info.populations[num],
data,
tcax,
drug_curve=dc,
drug_ax_sci_notation=True,
drug_kwargs=drug_kwargs,
legend_labels=False,
grayscale=True,
color='gray',
linewidth=1,
labelsize=12,
alpha=0.7
)
drug_ax.set_ylabel('')
drug_axes.append( drug_ax )
else:
tcax,da = plotter.plot_timecourse_to_axes(exp_info.populations[num],
data,
tcax,
grayscale=True,
color='gray',
legend_labels=False,
linewidth=2,
labelsize=12,
alpha=0.2
)
# drug_ax.set_ylim(0,10**4)
k+=1
if survive_count > 0:
counts_avg = counts_total/survive_count
# counts_avg = counts_avg/np.max(counts_avg)
# counts_avg = counts_total
counts_avg = counts_avg/np.max(counts_avg)
tcax,temp = plotter.plot_timecourse_to_axes(exp_info.populations[num],
counts_avg,
tcax,
labelsize=12)
# t = np.arange(len(dc))
# t = t*exp_info.populations[0].timestep_scale/24
# da.plot(t,dc)
tc_axes.append( tcax )
axnum+=1
| 37.350427 | 85 | 0.415103 | 429 | 4,370 | 3.981352 | 0.296037 | 0.04918 | 0.07377 | 0.04918 | 0.170375 | 0.131148 | 0.131148 | 0.131148 | 0.093677 | 0.065574 | 0 | 0.027752 | 0.505263 | 4,370 | 117 | 86 | 37.350427 | 0.762257 | 0.094737 | 0 | 0.172414 | 0 | 0 | 0.027137 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.045977 | 0 | 0.045977 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
0
| 1 |
8a61523d34a63b6c1b5541a6127f60a7a5d5ec7e
| 4,684 |
py
|
Python
|
PyBank/.ipynb_checkpoints/Pymain-checkpoint.py
|
yash5OG/PythonChallengeW3-Y5
|
4a20ea5bae2d88af5a7d56f43ddc63ac64eaee67
|
[
"MIT"
] | null | null | null |
PyBank/.ipynb_checkpoints/Pymain-checkpoint.py
|
yash5OG/PythonChallengeW3-Y5
|
4a20ea5bae2d88af5a7d56f43ddc63ac64eaee67
|
[
"MIT"
] | null | null | null |
PyBank/.ipynb_checkpoints/Pymain-checkpoint.py
|
yash5OG/PythonChallengeW3-Y5
|
4a20ea5bae2d88af5a7d56f43ddc63ac64eaee67
|
[
"MIT"
] | null | null | null |
{
"cells": [
{
"cell_type": "code",
"execution_count": 64,
"metadata": {},
"outputs": [],
"source": [
"# Import libraries\n",
"import os, csv"
]
},
{
"cell_type": "code",
"execution_count": 65,
"metadata": {},
"outputs": [],
"source": [
"#variables for the script\n",
"months = [] #list of months\n",
"pl =[] #list of monthly PL\n",
"pl_changes = [] #list of P&L Changes\n",
"n_months = 0 #count of months\n",
"pl_total = 0 #total of P&L\n",
"plc = 0 #variable to track PL changes\n",
"avg_pl_change = 0 #average of changes in PL\n",
"maxpl = 0 #maximum increase in profits\n",
"minpl = 0 #maximum decrease in losses\n",
"max_i = 0 #index for max pl\n",
"min_i = 0 #index for min pl\n",
"\n",
"#read the resource file\n",
"bankcsv = os.path.join(\".\", \"Resources\", \"budget_data.csv\") #set path\n",
"\n",
"\n",
"#read file\n",
"with open(bankcsv, 'r') as csv_file:\n",
" csv_reader = csv.reader(csv_file,delimiter=\",\")\n",
" header = next(csv_reader)\n",
" \n",
" #for loop to update the counters and lists\n",
" for row in csv_reader:\n",
" n_months += 1\n",
" pl_total += int(row[1])\n",
" pl.append(row[1])\n",
" months.append(row[0])"
]
},
{
"cell_type": "code",
"execution_count": 66,
"metadata": {},
"outputs": [],
"source": [
"# loop to track the PL change values\n",
"pl_changes = [] \n",
"plc = int(pl[0])\n",
"for i in range(1, len(pl)):\n",
" pl_changes.append(int(pl[i]) - plc)\n",
" plc = int(pl[i])\n",
" i += 1\n",
"#print(pl_changes)"
]
},
{
"cell_type": "code",
"execution_count": 67,
"metadata": {},
"outputs": [],
"source": [
"#calculate the average PL Changes, max and min\n",
"avg_pl_change = sum(pl_changes) / len(pl_changes)\n",
"maxpl = max(pl_changes)\n",
"minpl = min(pl_changes)\n",
"#print(avg_pl_change, maxpl, minpl)\n",
"#print(pl_changes.index(maxpl))\n",
"#print(len(pl_changes))"
]
},
{
"cell_type": "code",
"execution_count": 68,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Financial Analysis\n",
"---------------------------------------------------------------------\n",
"Total Months: 86\n",
"Total: $38382578\n",
"Average Change: $-2315.12\n",
"Greatest Increase in Profits: Feb-2012 ($1926159)\n",
"Greatest Decrease in Profits: Sep-2013 ($-2196167)\n"
]
}
],
"source": [
"#find dates for max and min PL changes\n",
"max_i = pl_changes.index(maxpl) +1 #adding +1 since the changes are calculated one row above\n",
"min_i = pl_changes.index(minpl) +1\n",
"\n",
"maxmonth = months[max_i]\n",
"minmonth = months[min_i]\n",
"\n",
"#print output to the terminal\n",
"\n",
"print(\"Financial Analysis\")\n",
"print(\"-\"*69)\n",
"print(f\"Total Months: {n_months}\")\n",
"print(f\"Total: ${round(pl_total,2)}\")\n",
"print(f\"Average Change: ${round(avg_pl_change,2)}\")\n",
"print(f\"Greatest Increase in Profits: {maxmonth} (${maxpl})\")\n",
"print(f\"Greatest Decrease in Profits: {minmonth} (${minpl})\")\n"
]
},
{
"cell_type": "code",
"execution_count": 69,
"metadata": {},
"outputs": [],
"source": [
"# write summary to txt file\n",
"output = os.path.join(\".\",\"Analysis\", \"summary.txt\")\n",
"\n",
"# use \"\\n\" to create a new line\n",
"with open(output, 'w') as output:\n",
" output.write(\"Financial Analysis\\n\")\n",
" output.write(\"-\"*69 + \"\\n\")\n",
" output.write(f\"Total Months: {n_months}\\n\")\n",
" output.write(f\"Total: ${round(pl_total,2)}\\n\")\n",
" output.write(f\"Average Change: ${round(avg_pl_change,2)}\\n\")\n",
" output.write(f\"Greatest Increase in Profits: {maxmonth} (${maxpl})\\n\")\n",
" output.write(f\"Greatest Decrease in Profits: {minmonth} (${minpl})\\n\")"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.8.5"
}
},
"nbformat": 4,
"nbformat_minor": 4
}
| 29.093168 | 104 | 0.51281 | 592 | 4,684 | 3.952703 | 0.256757 | 0.01453 | 0.035897 | 0.053846 | 0.230342 | 0.185897 | 0.142735 | 0.095727 | 0.02735 | 0 | 0 | 0.024327 | 0.254056 | 4,684 | 160 | 105 | 29.275 | 0.645392 | 0 | 0 | 0.20625 | 0 | 0.00625 | 0.599701 | 0.058497 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0.0125 | 0 | 0.0125 | 0.075 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 |
0
| 1 |
8a64487109643353c0e84bbee6dfb1cf09044927
| 834 |
py
|
Python
|
beta_reconstruction/crystal_relations.py
|
LightForm-group/beta-reconstruction
|
67584f75ee08690226595c5f9dc75dfd164a11a0
|
[
"MIT"
] | null | null | null |
beta_reconstruction/crystal_relations.py
|
LightForm-group/beta-reconstruction
|
67584f75ee08690226595c5f9dc75dfd164a11a0
|
[
"MIT"
] | 1 |
2020-01-07T12:41:26.000Z
|
2020-01-07T12:50:40.000Z
|
beta_reconstruction/crystal_relations.py
|
LightForm-group/beta-reconstruction
|
67584f75ee08690226595c5f9dc75dfd164a11a0
|
[
"MIT"
] | null | null | null |
import numpy as np
from defdap.quat import Quat
hex_syms = Quat.symEqv("hexagonal")
# subset of hexagonal symmetries that give unique orientations when the
# Burgers transformation is applied
unq_hex_syms = [
hex_syms[0],
hex_syms[5],
hex_syms[4],
hex_syms[2],
hex_syms[10],
hex_syms[11]
]
cubic_syms = Quat.symEqv("cubic")
# subset of cubic symmetries that give unique orientations when the
# Burgers transformation is applied
unq_cub_syms = [
cubic_syms[0],
cubic_syms[7],
cubic_syms[9],
cubic_syms[1],
cubic_syms[22],
cubic_syms[16],
cubic_syms[12],
cubic_syms[15],
cubic_syms[4],
cubic_syms[8],
cubic_syms[21],
cubic_syms[20]
]
# HCP -> BCC
burg_eulers = np.array([135, 90, 354.74]) * np.pi / 180
burg_trans = Quat.fromEulerAngles(*burg_eulers).conjugate
| 22.540541 | 71 | 0.689448 | 127 | 834 | 4.314961 | 0.448819 | 0.213504 | 0.051095 | 0.087591 | 0.277372 | 0.277372 | 0.277372 | 0.277372 | 0.277372 | 0.277372 | 0 | 0.058122 | 0.195444 | 834 | 36 | 72 | 23.166667 | 0.758569 | 0.256595 | 0 | 0 | 0 | 0 | 0.022801 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.071429 | 0 | 0.071429 | 0 | 0 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
0
| 1 |
8a66a4e65b6c15a92cb15d2436631fabac501551
| 4,314 |
py
|
Python
|
pint/testsuite/test_definitions.py
|
s-avni/pint
|
4e33d44437991bf7c5e30977643f42ebd6ed40da
|
[
"BSD-3-Clause"
] | null | null | null |
pint/testsuite/test_definitions.py
|
s-avni/pint
|
4e33d44437991bf7c5e30977643f42ebd6ed40da
|
[
"BSD-3-Clause"
] | null | null | null |
pint/testsuite/test_definitions.py
|
s-avni/pint
|
4e33d44437991bf7c5e30977643f42ebd6ed40da
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import division, unicode_literals, print_function, absolute_import
from pint.util import (UnitsContainer)
from pint.converters import (ScaleConverter, OffsetConverter)
from pint.definitions import (Definition, PrefixDefinition, UnitDefinition,
DimensionDefinition, AliasDefinition)
from pint.testsuite import BaseTestCase
class TestDefinition(BaseTestCase):
def test_invalid(self):
self.assertRaises(ValueError, Definition.from_string, 'x = [time] * meter')
self.assertRaises(ValueError, Definition.from_string, '[x] = [time] * meter')
def test_prefix_definition(self):
for definition in ('m- = 1e-3', 'm- = 10**-3', 'm- = 0.001'):
x = Definition.from_string(definition)
self.assertIsInstance(x, PrefixDefinition)
self.assertEqual(x.name, 'm')
self.assertEqual(x.aliases, ())
self.assertEqual(x.converter.to_reference(1000), 1)
self.assertEqual(x.converter.from_reference(0.001), 1)
self.assertEqual(str(x), 'm')
x = Definition.from_string('kilo- = 1e-3 = k-')
self.assertIsInstance(x, PrefixDefinition)
self.assertEqual(x.name, 'kilo')
self.assertEqual(x.aliases, ())
self.assertEqual(x.symbol, 'k')
self.assertEqual(x.converter.to_reference(1000), 1)
self.assertEqual(x.converter.from_reference(.001), 1)
x = Definition.from_string('kilo- = 1e-3 = k- = anotherk-')
self.assertIsInstance(x, PrefixDefinition)
self.assertEqual(x.name, 'kilo')
self.assertEqual(x.aliases, ('anotherk', ))
self.assertEqual(x.symbol, 'k')
self.assertEqual(x.converter.to_reference(1000), 1)
self.assertEqual(x.converter.from_reference(.001), 1)
def test_baseunit_definition(self):
x = Definition.from_string('meter = [length]')
self.assertIsInstance(x, UnitDefinition)
self.assertTrue(x.is_base)
self.assertEqual(x.reference, UnitsContainer({'[length]': 1}))
def test_unit_definition(self):
x = Definition.from_string('coulomb = ampere * second')
self.assertIsInstance(x, UnitDefinition)
self.assertFalse(x.is_base)
self.assertIsInstance(x.converter, ScaleConverter)
self.assertEqual(x.converter.scale, 1)
self.assertEqual(x.reference, UnitsContainer(ampere=1, second=1))
x = Definition.from_string('faraday = 96485.3399 * coulomb')
self.assertIsInstance(x, UnitDefinition)
self.assertFalse(x.is_base)
self.assertIsInstance(x.converter, ScaleConverter)
self.assertEqual(x.converter.scale, 96485.3399)
self.assertEqual(x.reference, UnitsContainer(coulomb=1))
x = Definition.from_string('degF = 9 / 5 * kelvin; offset: 255.372222')
self.assertIsInstance(x, UnitDefinition)
self.assertFalse(x.is_base)
self.assertIsInstance(x.converter, OffsetConverter)
self.assertEqual(x.converter.scale, 9/5)
self.assertEqual(x.converter.offset, 255.372222)
self.assertEqual(x.reference, UnitsContainer(kelvin=1))
x = Definition.from_string('turn = 6.28 * radian = _ = revolution = = cycle = _')
self.assertIsInstance(x, UnitDefinition)
self.assertEqual(x.name, 'turn')
self.assertEqual(x.aliases, ('revolution', 'cycle'))
self.assertEqual(x.symbol, 'turn')
self.assertFalse(x.is_base)
self.assertIsInstance(x.converter, ScaleConverter)
self.assertEqual(x.converter.scale, 6.28)
self.assertEqual(x.reference, UnitsContainer(radian=1))
def test_dimension_definition(self):
x = DimensionDefinition('[time]', '', (), converter='')
self.assertTrue(x.is_base)
self.assertEqual(x.name, '[time]')
x = Definition.from_string('[speed] = [length]/[time]')
self.assertIsInstance(x, DimensionDefinition)
self.assertEqual(x.reference, UnitsContainer({'[length]': 1, '[time]': -1}))
def test_alias_definition(self):
x = Definition.from_string("@alias meter = metro = metr")
self.assertIsInstance(x, AliasDefinition)
self.assertEqual(x.name, "meter")
self.assertEqual(x.aliases, ("metro", "metr"))
| 44.474227 | 89 | 0.660176 | 477 | 4,314 | 5.878407 | 0.194969 | 0.171184 | 0.17689 | 0.098074 | 0.618759 | 0.502496 | 0.46505 | 0.429743 | 0.36234 | 0.322397 | 0 | 0.028338 | 0.206537 | 4,314 | 96 | 90 | 44.9375 | 0.790827 | 0.004868 | 0 | 0.35443 | 0 | 0 | 0.098112 | 0 | 0 | 0 | 0 | 0 | 0.683544 | 1 | 0.075949 | false | 0 | 0.063291 | 0 | 0.151899 | 0.012658 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
0
| 1 |
8a69c6a560d7f1d6a12a9bb69281971b56733693
| 1,637 |
py
|
Python
|
setup.py
|
xbabka01/filetype.py
|
faba42b86988bd21a50d5b20919ecff0c6a84957
|
[
"MIT"
] | null | null | null |
setup.py
|
xbabka01/filetype.py
|
faba42b86988bd21a50d5b20919ecff0c6a84957
|
[
"MIT"
] | null | null | null |
setup.py
|
xbabka01/filetype.py
|
faba42b86988bd21a50d5b20919ecff0c6a84957
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import codecs
from setuptools import find_packages, setup
setup(
name='filetype',
version='1.0.7',
description='Infer file type and MIME type of any file/buffer. '
'No external dependencies.',
long_description=codecs.open('README.rst', 'r',
encoding='utf-8', errors='ignore').read(),
keywords='file libmagic magic infer numbers magicnumbers discovery mime '
'type kind',
url='https://github.com/h2non/filetype.py',
download_url='https://github.com/h2non/filetype.py/tarball/master',
author='Tomas Aparicio',
author_email='tomas@aparicio.me',
license='MIT',
license_files=['LICENSE'],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Topic :: System',
'Topic :: System :: Filesystems',
'Topic :: Utilities'],
platforms=['any'],
packages=find_packages(exclude=['dist', 'build', 'docs', 'tests',
'examples']),
package_data={'filetype': ['LICENSE', '*.md']},
zip_safe=True)
| 38.069767 | 77 | 0.588882 | 167 | 1,637 | 5.724551 | 0.622754 | 0.099372 | 0.130753 | 0.135983 | 0.066946 | 0.066946 | 0.066946 | 0 | 0 | 0 | 0 | 0.013992 | 0.257789 | 1,637 | 42 | 78 | 38.97619 | 0.77284 | 0.025657 | 0 | 0 | 0 | 0 | 0.52919 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0.052632 | 0 | 0.052632 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 |
0
| 1 |
8a6d51f8a422fff8bc79749ffb6d71189dc006bc
| 2,509 |
py
|
Python
|
vframe_cli/commands/templates/image-mp.py
|
julescarbon/vframe
|
0798841fa9eb7e1252e4cdf71d68d991c26acab8
|
[
"MIT"
] | 1 |
2021-05-15T11:06:39.000Z
|
2021-05-15T11:06:39.000Z
|
vframe_cli/commands/templates/image-mp.py
|
julescarbon/vframe
|
0798841fa9eb7e1252e4cdf71d68d991c26acab8
|
[
"MIT"
] | null | null | null |
vframe_cli/commands/templates/image-mp.py
|
julescarbon/vframe
|
0798841fa9eb7e1252e4cdf71d68d991c26acab8
|
[
"MIT"
] | null | null | null |
#############################################################################
#
# VFRAME
# MIT License
# Copyright (c) 2020 Adam Harvey and VFRAME
# https://vframe.io
#
#############################################################################
import click
@click.command('')
@click.option('-i', '--input', 'opt_dir_in', required=True)
@click.option('-r', '--recursive', 'opt_recursive', is_flag=True)
@click.option('-e', '--ext', 'opt_exts', default=['jpg', 'png'], multiple=True,
help='Glob extension')
@click.option('--slice', 'opt_slice', type=(int, int), default=(None, None),
help='Slice list of files')
@click.option('-t', '--threads', 'opt_threads', default=None)
@click.pass_context
def cli(ctx, opt_dir_in, opt_recursive, opt_exts, opt_slice, opt_threads):
"""Multiprocessor image template"""
# ------------------------------------------------
# imports
from os.path import join
from pathlib import Path
from dataclasses import asdict
import numpy as np
import cv2 as cv
from tqdm import tqdm
from pathos.multiprocessing import ProcessingPool as Pool
from pathos.multiprocessing import cpu_count
from vframe.settings import app_cfg
from vframe.settings.modelzoo_cfg import modelzoo
from vframe.models.dnn import DNN
from vframe.image.dnn_factory import DNNFactory
from vframe.utils import file_utils
from vframe.utils.video_utils import FileVideoStream, mediainfo
log = app_cfg.LOG
# set N threads
if not opt_threads:
opt_threads = cpu_count() # maximum
# glob items
fp_items = file_utils.glob_multi(opt_dir_in, opt_exts, recursive=opt_recursive)
if any(opt_slice):
fp_items = fp_items[opt_slice[0]:opt_slice[1]]
log.info(f'Processing: {len(fp_items):,} files')
# -----------------------------------------------------------
# start pool worker
def pool_worker(pool_item):
# init threaded video reader
fp = pool_item['fp']
result = {'fp': fp}
# add media metadata
im = cv.imread(fp)
for i in range(20):
im = cv.blur(im, (35,35))
return result
# end pool worker
# -----------------------------------------------------------
# convert file list into object with
pool_items = [{'fp': fp} for fp in fp_items]
# init processing pool iterator
# use imap instead of map via @hkyi Stack Overflow 41920124
desc = f'image-mp x{opt_threads}'
with Pool(opt_threads) as p:
pool_results = list(tqdm(p.imap(pool_worker, pool_items), total=len(fp_items), desc=desc))
| 28.83908 | 94 | 0.610602 | 326 | 2,509 | 4.552147 | 0.432515 | 0.040431 | 0.016173 | 0.014825 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.009915 | 0.155839 | 2,509 | 87 | 94 | 28.83908 | 0.690746 | 0.209247 | 0 | 0 | 0 | 0 | 0.111296 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.046512 | false | 0.023256 | 0.348837 | 0 | 0.418605 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 |
0
| 1 |
8a6dd286ad198b0a16465871a4cd84854d419ad0
| 1,824 |
py
|
Python
|
lib/galaxy/tool_util/deps/container_resolvers/__init__.py
|
sneumann/galaxy
|
f6011bab5b8adbabae4986a45849bb9158ffc8bb
|
[
"CC-BY-3.0"
] | 1 |
2019-07-27T19:30:55.000Z
|
2019-07-27T19:30:55.000Z
|
lib/galaxy/tool_util/deps/container_resolvers/__init__.py
|
userssss/galaxy
|
9662164ad68b39adf5a5606a7aa8e388f6a79f1e
|
[
"CC-BY-3.0"
] | 4 |
2021-02-08T20:28:34.000Z
|
2022-03-02T02:52:55.000Z
|
lib/galaxy/tool_util/deps/container_resolvers/__init__.py
|
userssss/galaxy
|
9662164ad68b39adf5a5606a7aa8e388f6a79f1e
|
[
"CC-BY-3.0"
] | 1 |
2018-05-30T07:38:54.000Z
|
2018-05-30T07:38:54.000Z
|
"""The module defines the abstract interface for resolving container images for tool execution."""
from abc import (
ABCMeta,
abstractmethod,
abstractproperty,
)
import six
from galaxy.util.dictifiable import Dictifiable
@six.python_2_unicode_compatible
@six.add_metaclass(ABCMeta)
class ContainerResolver(Dictifiable):
"""Description of a technique for resolving container images for tool execution."""
# Keys for dictification.
dict_collection_visible_keys = ['resolver_type', 'can_uninstall_dependencies']
can_uninstall_dependencies = False
def __init__(self, app_info=None, **kwds):
"""Default initializer for ``ContainerResolver`` subclasses."""
self.app_info = app_info
self.resolver_kwds = kwds
def _get_config_option(self, key, default=None):
"""Look in resolver-specific settings for option and then fallback to
global settings.
"""
if self.app_info and hasattr(self.app_info, key):
return getattr(self.app_info, key)
else:
return default
@abstractmethod
def resolve(self, enabled_container_types, tool_info, **kwds):
"""Find a container matching all supplied requirements for tool.
The supplied argument is a :class:`galaxy.tool_util.deps.containers.ToolInfo` description
of the tool and its requirements.
"""
@abstractproperty
def resolver_type(self):
"""Short label for the type of container resolution."""
def _container_type_enabled(self, container_description, enabled_container_types):
"""Return a boolean indicating if the specified container type is enabled."""
return container_description.type in enabled_container_types
def __str__(self):
return "%s[]" % self.__class__.__name__
| 33.777778 | 98 | 0.707237 | 215 | 1,824 | 5.75814 | 0.44186 | 0.033926 | 0.044426 | 0.043619 | 0.069467 | 0.069467 | 0.069467 | 0 | 0 | 0 | 0 | 0.000695 | 0.211075 | 1,824 | 53 | 99 | 34.415094 | 0.859625 | 0.354167 | 0 | 0 | 0 | 0 | 0.039091 | 0.023636 | 0 | 0 | 0 | 0 | 0 | 1 | 0.214286 | false | 0 | 0.107143 | 0.035714 | 0.571429 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 |
0
| 1 |
8a748a255fe78209cc5338aaab9ff134d24befab
| 1,134 |
py
|
Python
|
baopig/ressources/ressources.py
|
ChreSyr/baopig
|
6264ab9a851b1ed0a031292abe7f159a53b3fc5e
|
[
"MIT"
] | null | null | null |
baopig/ressources/ressources.py
|
ChreSyr/baopig
|
6264ab9a851b1ed0a031292abe7f159a53b3fc5e
|
[
"MIT"
] | null | null | null |
baopig/ressources/ressources.py
|
ChreSyr/baopig
|
6264ab9a851b1ed0a031292abe7f159a53b3fc5e
|
[
"MIT"
] | null | null | null |
from baopig.pybao.objectutilities import Object
from baopig.pybao.issomething import *
class RessourcePack:
def config(self, **kwargs):
for name, value in kwargs.items():
self.__setattr__('_'+name, value)
class FontsRessourcePack(RessourcePack):
def __init__(self,
file=None,
height=15,
color=(0, 0, 0),
):
assert is_color(color)
self._file = file
self._height = height
self._color = color
file = property(lambda self: self._file)
color = property(lambda self: self._color)
height = property(lambda self: self._height)
class ScenesRessourcePack(RessourcePack):
def __init__(self,
background_color=(170, 170, 170),
):
assert is_color(background_color)
self._background_color = background_color
background_color = property(lambda self: self._background_color)
# TODO : ButtonRessourcePack.style.create_surface(size)
class _RessourcePack:
def __init__(self):
self.font = FontsRessourcePack()
self.scene = ScenesRessourcePack()
ressources = _RessourcePack()
| 19.894737 | 68 | 0.666667 | 120 | 1,134 | 6 | 0.358333 | 0.125 | 0.1 | 0.122222 | 0.075 | 0 | 0 | 0 | 0 | 0 | 0 | 0.016222 | 0.238977 | 1,134 | 56 | 69 | 20.25 | 0.818076 | 0.046737 | 0 | 0.129032 | 0 | 0 | 0.000929 | 0 | 0 | 0 | 0 | 0.017857 | 0.064516 | 1 | 0.129032 | false | 0 | 0.064516 | 0 | 0.451613 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
0
| 1 |
8a7922d582e70ee076c3374be8cdb74d33423c9b
| 1,038 |
py
|
Python
|
tests/ast/nodes/test_from_node.py
|
upgradvisor/vyper
|
642884ea938a25793c1b2fac866e8458e63a7b49
|
[
"Apache-2.0"
] | 1,471 |
2017-12-25T05:47:57.000Z
|
2019-11-19T07:47:53.000Z
|
tests/ast/nodes/test_from_node.py
|
upgradvisor/vyper
|
642884ea938a25793c1b2fac866e8458e63a7b49
|
[
"Apache-2.0"
] | 915 |
2019-11-21T05:48:16.000Z
|
2022-03-31T23:51:03.000Z
|
tests/ast/nodes/test_from_node.py
|
upgradvisor/vyper
|
642884ea938a25793c1b2fac866e8458e63a7b49
|
[
"Apache-2.0"
] | 321 |
2017-12-25T16:37:21.000Z
|
2019-11-15T17:44:06.000Z
|
from vyper import ast as vy_ast
def test_output_class():
old_node = vy_ast.parse_to_ast("foo = 42")
new_node = vy_ast.Int.from_node(old_node, value=666)
assert isinstance(new_node, vy_ast.Int)
def test_source():
old_node = vy_ast.parse_to_ast("foo = 42")
new_node = vy_ast.Int.from_node(old_node, value=666)
assert old_node.src == new_node.src
assert old_node.node_source_code == new_node.node_source_code
def test_kwargs():
old_node = vy_ast.parse_to_ast("42").body[0].value
new_node = vy_ast.Int.from_node(old_node, value=666)
assert old_node.value == 42
assert new_node.value == 666
def test_compare_nodes():
old_node = vy_ast.parse_to_ast("foo = 42")
new_node = vy_ast.Int.from_node(old_node, value=666)
assert not vy_ast.compare_nodes(old_node, new_node)
def test_new_node_has_no_parent():
old_node = vy_ast.parse_to_ast("foo = 42")
new_node = vy_ast.Int.from_node(old_node, value=666)
assert new_node._parent is None
assert new_node._depth == 0
| 25.95 | 65 | 0.719653 | 186 | 1,038 | 3.629032 | 0.198925 | 0.145185 | 0.146667 | 0.106667 | 0.561481 | 0.539259 | 0.539259 | 0.506667 | 0.506667 | 0.506667 | 0 | 0.037166 | 0.17052 | 1,038 | 39 | 66 | 26.615385 | 0.746806 | 0 | 0 | 0.375 | 0 | 0 | 0.032755 | 0 | 0 | 0 | 0 | 0 | 0.333333 | 1 | 0.208333 | false | 0 | 0.041667 | 0 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 |
0
| 1 |
8a79bd5eb2532e1ffdd3b87d6be696b8303afc7f
| 2,624 |
py
|
Python
|
generator/modules/opencv.py
|
dayta-ai/deepo
|
fa720e39052e63adfe0f2b9dbd8444a0d69c2540
|
[
"MIT"
] | 1 |
2021-11-18T18:34:29.000Z
|
2021-11-18T18:34:29.000Z
|
generator/modules/opencv.py
|
dayta-ai/deepo
|
fa720e39052e63adfe0f2b9dbd8444a0d69c2540
|
[
"MIT"
] | null | null | null |
generator/modules/opencv.py
|
dayta-ai/deepo
|
fa720e39052e63adfe0f2b9dbd8444a0d69c2540
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from .__module__ import Module, dependency, source, version
from .tools import Tools
from .boost import Boost
from .python import Python
@dependency(Tools, Python, Boost)
@source('git')
@version('4.0.1')
class Opencv(Module):
def build(self):
return r'''
RUN ln -fs /usr/share/zoneinfo/Asia/Hong_Kong /etc/localtime && \
DEBIAN_FRONTEND=noninteractive \
add-apt-repository "deb http://security.ubuntu.com/ubuntu xenial-security main" && \
apt update && \
$APT_INSTALL \
libatlas-base-dev \
libgflags-dev \
libgoogle-glog-dev \
libhdf5-serial-dev \
libleveldb-dev \
liblmdb-dev \
libprotobuf-dev \
libsnappy-dev \
protobuf-compiler \
libopencv-dev \
yasm \
libjpeg-dev \
libjasper-dev \
libavcodec-dev \
libavformat-dev \
libswscale-dev \
libdc1394-22-dev \
libv4l-dev \
libtbb-dev \
libqt4-dev \
libgtk2.0-dev \
libfaac-dev \
libmp3lame-dev \
libopencore-amrnb-dev \
libopencore-amrwb-dev \
libtheora-dev \
libvorbis-dev \
libxvidcore-dev \
x264 \
v4l-utils \
ffmpeg \
&& \
$GIT_CLONE --branch {0} https://github.com/opencv/opencv opencv && \
$GIT_CLONE --branch {0} https://github.com/opencv/opencv_contrib.git opencv_contrib && \
mkdir -p opencv/build && cd opencv/build && \
cmake -D CMAKE_BUILD_TYPE=RELEASE \
-D CMAKE_INSTALL_PREFIX=/usr/local \
-D WITH_IPP=OFF \
-D WITH_CUDA=OFF \
-D WITH_TBB=ON \
-D WITH_V4L=ON \
-D WITH_QT=ON \
-D WITH_OPENCL=ON \
-D WITH_GTK=ON \
-D WITH_LIBV4L=ON \
-D BUILD_TESTS=OFF \
-D BUILD_PERF_TESTS=OFF \
-D WITH_FFMPEG=ON \
-D OPENCV_EXTRA_MODULES_PATH=../../opencv_contrib/modules \
.. && \
make -j"$(nproc)" install && \
ln -s /usr/local/include/opencv4/opencv2 /usr/local/include/opencv2
'''.format(self.version)
| 35.945205 | 100 | 0.463796 | 249 | 2,624 | 4.763052 | 0.497992 | 0.037943 | 0.029511 | 0.025295 | 0.06914 | 0.06914 | 0.06914 | 0.06914 | 0.06914 | 0 | 0 | 0.018256 | 0.436357 | 2,624 | 72 | 101 | 36.444444 | 0.783638 | 0.008003 | 0 | 0 | 0 | 0.044118 | 0.889658 | 0.105729 | 0 | 0 | 0 | 0 | 0 | 1 | 0.014706 | false | 0 | 0.058824 | 0.014706 | 0.102941 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
0
| 1 |
8a84ca10fd051b6b0bb8be0088246cc71958f9d5
| 12,062 |
py
|
Python
|
oase-root/web_app/views/system/mail/action_mail.py
|
Masa-Yasuno/oase
|
90f3cee73c0d9b3153808a4a72bd19984a4873f9
|
[
"Apache-2.0"
] | 9 |
2020-03-25T07:51:47.000Z
|
2022-02-07T00:07:28.000Z
|
oase-root/web_app/views/system/mail/action_mail.py
|
Masa-Yasuno/oase
|
90f3cee73c0d9b3153808a4a72bd19984a4873f9
|
[
"Apache-2.0"
] | 1,164 |
2021-01-28T23:16:11.000Z
|
2022-03-28T07:23:10.000Z
|
oase-root/web_app/views/system/mail/action_mail.py
|
Masa-Yasuno/oase
|
90f3cee73c0d9b3153808a4a72bd19984a4873f9
|
[
"Apache-2.0"
] | 25 |
2020-03-17T06:48:30.000Z
|
2022-02-15T15:13:44.000Z
|
# Copyright 2019 NEC Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
[概要]
MAILアクション用画面表示補助クラス
"""
import pytz
import datetime
import json
import socket
import traceback
from django.http import HttpResponse
from django.http import HttpResponseServerError
from django.db import transaction
from django.conf import settings
from libs.commonlibs import define as defs
from libs.commonlibs.oase_logger import OaseLogger
from libs.commonlibs.aes_cipher import AESCipher
from web_app.models.models import ActionType
from web_app.models.mail_models import MailDriver
from web_app.templatetags.common import get_message
from web_app.serializers.unicode_check import UnicodeCheck
logger = OaseLogger.get_instance() # ロガー初期化
class mailDriverInfo():
def __init__(self, drv_id, act_id, name, ver, icon_name):
self.drv_id = drv_id
self.act_id = act_id
self.name = name
self.ver = ver
self.icon_name = icon_name
def __str__(self):
return '%s(ver%s)' % (self.name, self.ver)
def get_driver_name(self):
return '%s Driver ver%s' % (self.name, self.ver)
def get_driver_id(self):
return self.drv_id
def get_icon_name(self):
return self.icon_name
@classmethod
def get_template_file(cls):
return 'system/mail/action_mail.html'
@classmethod
def get_info_list(cls, user_groups):
try:
mail_driver_obj_list = MailDriver.objects.all()
except Exception as e:
# ここでの例外は大外で拾う
raise
protocol_dict = cls.get_define()['dict']
mail_driver_dto_list = []
cipher = AESCipher(settings.AES_KEY)
for mail_obj in mail_driver_obj_list:
mail_info = mail_obj.__dict__
if mail_obj.password:
mail_info['password'] = cipher.decrypt(mail_obj.password)
mail_info['protocol_str'] = protocol_dict[mail_obj.protocol]
mail_driver_dto_list.append(mail_info)
return mail_driver_dto_list
@classmethod
def get_group_list(cls, user_groups):
"""
[概要]
グループ一覧を取得する(システム管理グループを除く)
"""
return []
@classmethod
def get_define(cls):
protocol_dict = {key_value['v']: key_value['k'] for key_value in defs.SMTP_PROTOCOL.LIST_ALL}
defines = {
'list_all': defs.SMTP_PROTOCOL.LIST_ALL,
'dict': protocol_dict,
}
return defines
def record_lock(self, json_str, request):
logger.logic_log('LOSI00001', 'None', request=request)
driver_id = self.get_driver_id()
# 更新前にレコードロック
if json_str['json_str']['ope'] in (defs.DABASE_OPECODE.OPE_UPDATE, defs.DABASE_OPECODE.OPE_DELETE):
drvinfo_modify = int(json_str['json_str']['mail_driver_id'])
MailDriver.objects.select_for_update().filter(pk=drvinfo_modify)
logger.logic_log('LOSI00002', 'Record locked.(driver_id=%s)' % driver_id, request=request)
def modify(self, json_str, request):
"""
[メソッド概要]
グループのDB更新処理
"""
logger.logic_log('LOSI00001', 'None', request=request)
error_flag = False
error_msg = {
'mail_disp_name' : '',
'protocol' : '',
'smtp_server' : '',
'port' : '',
'user' : '',
'password' : '',
}
now = datetime.datetime.now(pytz.timezone('UTC'))
emo_chk = UnicodeCheck()
# 成功時データ
response = {"status": "success",}
try:
rq = json_str['json_str']
ope = int(rq['ope'])
#削除以外の場合の入力チェック
if ope != defs.DABASE_OPECODE.OPE_DELETE:
error_flag = self._validate(rq, error_msg, request)
if error_flag:
raise UserWarning('validation error.')
# パスワードを暗号化 空なら空文字
cipher = AESCipher(settings.AES_KEY)
if ope == defs.DABASE_OPECODE.OPE_UPDATE:
encrypted_password = cipher.encrypt(rq['password']) if rq['password'] else ''
driver_info_mod = MailDriver.objects.get(mail_driver_id=rq['mail_driver_id'])
driver_info_mod.mail_disp_name = rq['mail_disp_name']
driver_info_mod.protocol = rq['protocol']
driver_info_mod.smtp_server = rq['smtp_server']
driver_info_mod.port = rq['port']
driver_info_mod.user = rq['user']
driver_info_mod.password = encrypted_password
driver_info_mod.last_update_user = request.user.user_name
driver_info_mod.last_update_timestamp = now
driver_info_mod.save(force_update=True)
elif ope == defs.DABASE_OPECODE.OPE_DELETE:
MailDriver.objects.filter(pk=rq['mail_driver_id']).delete()
elif ope == defs.DABASE_OPECODE.OPE_INSERT:
encrypted_password = cipher.encrypt(rq['password']) if rq['password'] else ''
driver_info_reg = MailDriver(
mail_disp_name = rq['mail_disp_name'],
protocol = rq['protocol'],
smtp_server = rq['smtp_server'],
port = rq['port'],
user = rq['user'],
password = encrypted_password,
last_update_user = request.user.user_name,
last_update_timestamp = now
).save(force_insert=True)
except MailDriver.DoesNotExist:
logger.logic_log('LOSM07006', "mail_driver_id", mail_driver_id, request=request)
except Exception as e:
logger.logic_log('LOSI00005', traceback.format_exc(), request=request)
response = {
'status': 'failure',
'error_msg': error_msg, # エラー詳細(エラーアイコンで出す)
}
logger.logic_log('LOSI00002', 'response=%s' % response, request=request)
return response
def _validate(self, rq, error_msg, request):
"""
[概要]
入力チェック
[引数]
rq: dict リクエストされた入力データ
error_msg: dict
[戻り値]
"""
logger.logic_log('LOSI00001', 'data: %s, error_msg:%s'%(rq, error_msg))
error_flag = False
emo_chk = UnicodeCheck()
emo_flag = False
emo_flag_ita_disp_name = False
emo_flag_hostname = False
if len(rq['mail_disp_name']) == 0:
error_flag = True
error_msg['mail_disp_name'] += get_message('MOSJA27201', request.user.get_lang_mode()) + '\n'
logger.user_log('LOSM07001', 'mail_disp_name', request=request)
if len(rq['mail_disp_name']) > 64:
error_flag = True
error_msg['mail_disp_name'] += get_message('MOSJA27202', request.user.get_lang_mode()) + '\n'
logger.user_log('LOSM07002', 'mail_disp_name', 64, rq['mail_disp_name'], request=request)
# 絵文字チェック
value_list = emo_chk.is_emotion(rq['mail_disp_name'])
if len(value_list) > 0:
error_flag = True
emo_flag = True
error_msg['mail_disp_name'] += get_message('MOSJA27216', request.user.get_lang_mode(), showMsgId=False) + '\n'
if len(rq['protocol']) == 0:
error_flag = True
error_msg['protocol'] += get_message('MOSJA27212', request.user.get_lang_mode()) + '\n'
logger.user_log('LOSM07001', 'protocol', request=request)
if len(rq['protocol']) > 64:
error_flag = True
error_msg['protocol'] += get_message('MOSJA27213', request.user.get_lang_mode()) + '\n'
logger.user_log('LOSM07002', 'protocol', 64, rq['protocol'], request=request)
if len(rq['smtp_server']) == 0:
error_flag = True
error_msg['smtp_server'] += get_message('MOSJA27203', request.user.get_lang_mode()) + '\n'
logger.user_log('LOSM07001', 'smtp_server', request=request)
if len(rq['smtp_server']) > 128:
error_flag = True
error_msg['smtp_server'] += get_message('MOSJA27204', request.user.get_lang_mode()) + '\n'
logger.user_log('LOSM07002', 'smtp_server', 64, rq['smtp_server'], request=request)
# 絵文字チェック
value_list = emo_chk.is_emotion(rq['smtp_server'])
if len(value_list) > 0:
error_flag = True
error_msg['smtp_server'] += get_message('MOSJA27217', request.user.get_lang_mode(), showMsgId=False) + '\n'
if len(rq['port']) == 0:
error_flag = True
error_msg['port'] += get_message('MOSJA27205', request.user.get_lang_mode()) + '\n'
logger.user_log('LOSM07001', 'port', request=request)
try:
tmp_port = int(rq['port'])
if 0 > tmp_port or tmp_port > 65535:
error_flag = True
error_msg['port'] += get_message('MOSJA27206', request.user.get_lang_mode()) + '\n'
logger.user_log('LOSM07003', 'port', rq['port'], request=request)
except ValueError:
error_flag = True
error_msg['port'] += get_message('MOSJA27206', request.user.get_lang_mode()) + '\n'
logger.user_log('LOSM07003', 'port', rq['port'], request=request)
if len(rq['user']) > 64:
error_flag = True
error_msg['user'] += get_message('MOSJA27207', request.user.get_lang_mode()) + '\n'
logger.user_log('LOSM07002', 'user', 64, rq['user'], request=request)
# 絵文字チェック
value_list = emo_chk.is_emotion(rq['user'])
if len(value_list) > 0:
error_flag = True
error_msg['user'] += get_message('MOSJA27218', request.user.get_lang_mode(), showMsgId=False) + '\n'
if len(rq['password']) > 64:
error_flag = True
error_msg['password'] += get_message('MOSJA27208', request.user.get_lang_mode()) + '\n'
logger.user_log('LOSM07002', 'password', 64, rq['password'], request=request)
# 絵文字チェック
value_list = emo_chk.is_emotion(rq['password'])
if len(value_list) > 0:
error_flag = True
error_msg['password'] += get_message('MOSJA27219', request.user.get_lang_mode(), showMsgId=False) + '\n'
if not emo_flag:
duplication = MailDriver.objects.filter(mail_disp_name=rq['mail_disp_name'])
if len(duplication) == 1 and int(rq['mail_driver_id']) != duplication[0].mail_driver_id:
error_flag = True
error_msg['mail_disp_name'] += get_message('MOSJA27209', request.user.get_lang_mode()) + '\n'
logger.user_log('LOSM07004', 'mail_disp_name', rq['mail_disp_name'], request=request)
if error_flag == False:
# 疎通確認
resp_code = -1
try:
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
resp_code = sock.connect_ex((rq['smtp_server'], int(rq['port']))) # host名名前解決が必要/etc/hostsとか
sock.close()
except Exception as e:
pass
if resp_code != 0:
error_flag = True
#todo 仮でこのエラーは名前に入れている
error_msg['mail_disp_name'] += get_message('MOSJA27215', request.user.get_lang_mode()) + '\n'
logger.user_log('LOSM07005', rq['smtp_server'], rq['port'], request=request)
return error_flag
| 35.372434 | 122 | 0.596419 | 1,433 | 12,062 | 4.750174 | 0.193999 | 0.029382 | 0.035258 | 0.044954 | 0.418834 | 0.367563 | 0.327163 | 0.276627 | 0.240341 | 0.20332 | 0 | 0.026942 | 0.286105 | 12,062 | 340 | 123 | 35.476471 | 0.763558 | 0.072293 | 0 | 0.212963 | 0 | 0 | 0.122767 | 0.004443 | 0 | 0 | 0 | 0.002941 | 0 | 1 | 0.055556 | false | 0.060185 | 0.074074 | 0.023148 | 0.180556 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 |
0
| 1 |
8a89fcb6aa9605bd61ebc69c816df71f6eb1ab81
| 673 |
py
|
Python
|
indico/modules/events/abstracts/compat.py
|
aiforrural/Digital-Events-Example
|
628aaa8727b259b9367ac0ae1c5ba8e9e95eca82
|
[
"MIT"
] | 1 |
2021-02-08T09:34:27.000Z
|
2021-02-08T09:34:27.000Z
|
indico/modules/events/abstracts/compat.py
|
pamirk/indico
|
c3b4e06b11cc21ad497f74d0b2ca901bc1b2a768
|
[
"MIT"
] | null | null | null |
indico/modules/events/abstracts/compat.py
|
pamirk/indico
|
c3b4e06b11cc21ad497f74d0b2ca901bc1b2a768
|
[
"MIT"
] | null | null | null |
# This file is part of Indico.
# Copyright (C) 2002 - 2020 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from flask import redirect
from indico.modules.events.abstracts.models.abstracts import Abstract
from indico.web.flask.util import url_for
from indico.web.rh import RHSimple
@RHSimple.wrap_function
def compat_abstract(endpoint, confId, friendly_id, track_id=None, management=False):
abstract = Abstract.find(event_id=confId, friendly_id=friendly_id).first_or_404()
return redirect(url_for('abstracts.' + endpoint, abstract, management=management))
| 35.421053 | 86 | 0.786033 | 101 | 673 | 5.128713 | 0.594059 | 0.057915 | 0.050193 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.018803 | 0.130758 | 673 | 18 | 87 | 37.388889 | 0.866667 | 0.297177 | 0 | 0 | 0 | 0 | 0.021459 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.125 | false | 0 | 0.5 | 0 | 0.75 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 |
0
| 1 |
8a8bbdd35a1d135f6e6a32befca7b762678940d4
| 327 |
py
|
Python
|
Python/Higher-Or-Lower/hol/__init__.py
|
AustinTSchaffer/DailyProgrammer
|
b16d9babb298ac5e879c514f9c4646b99c6860a8
|
[
"MIT"
] | 1 |
2020-07-28T17:07:35.000Z
|
2020-07-28T17:07:35.000Z
|
Python/Higher-Or-Lower/hol/__init__.py
|
AustinTSchaffer/DailyProgrammer
|
b16d9babb298ac5e879c514f9c4646b99c6860a8
|
[
"MIT"
] | 5 |
2021-04-06T18:25:29.000Z
|
2021-04-10T15:13:28.000Z
|
Python/Higher-Or-Lower/hol/__init__.py
|
AustinTSchaffer/DailyProgrammer
|
b16d9babb298ac5e879c514f9c4646b99c6860a8
|
[
"MIT"
] | null | null | null |
r"""
Contains classes and methods that can be used when simulating the game
Higher-or-Lower and performing statistical analysis on different games.
"""
from hol import (
cards,
constants,
)
from hol._hol import (
generate_all_games,
should_pick_higher,
is_a_winning_game,
generate_win_statistics,
)
| 17.210526 | 71 | 0.737003 | 45 | 327 | 5.133333 | 0.8 | 0.060606 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.201835 | 327 | 18 | 72 | 18.166667 | 0.885057 | 0.434251 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0.181818 | 0 | 0.181818 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 |
0
| 1 |
8a9d019bec9e50c7c8d759ea60e658149d43ef2a
| 2,561 |
py
|
Python
|
audiomentations/core/utils.py
|
jeongyoonlee/audiomentations
|
7f0112ae310989430e0ef7eb32c4116114810966
|
[
"MIT"
] | 1 |
2021-02-03T19:12:04.000Z
|
2021-02-03T19:12:04.000Z
|
audiomentations/core/utils.py
|
jeongyoonlee/audiomentations
|
7f0112ae310989430e0ef7eb32c4116114810966
|
[
"MIT"
] | null | null | null |
audiomentations/core/utils.py
|
jeongyoonlee/audiomentations
|
7f0112ae310989430e0ef7eb32c4116114810966
|
[
"MIT"
] | 1 |
2021-07-08T07:33:10.000Z
|
2021-07-08T07:33:10.000Z
|
import os
from pathlib import Path
import numpy as np
AUDIO_FILENAME_ENDINGS = (".aiff", ".flac", ".m4a", ".mp3", ".ogg", ".opus", ".wav")
def get_file_paths(
root_path, filename_endings=AUDIO_FILENAME_ENDINGS, traverse_subdirectories=True
):
"""Return a list of paths to all files with the given filename extensions in a directory.
Also traverses subdirectories by default.
"""
file_paths = []
for root, dirs, filenames in os.walk(root_path):
filenames = sorted(filenames)
for filename in filenames:
input_path = os.path.abspath(root)
file_path = os.path.join(input_path, filename)
if filename.lower().endswith(filename_endings):
file_paths.append(Path(file_path))
if not traverse_subdirectories:
# prevent descending into subfolders
break
return file_paths
def calculate_rms(samples):
"""Given a numpy array of audio samples, return its Root Mean Square (RMS)."""
return np.sqrt(np.mean(np.square(samples), axis=-1))
def calculate_desired_noise_rms(clean_rms, snr):
"""
Given the Root Mean Square (RMS) of a clean sound and a desired signal-to-noise ratio (SNR),
calculate the desired RMS of a noise sound to be mixed in.
Based on https://github.com/Sato-Kunihiko/audio-SNR/blob/8d2c933b6c0afe6f1203251f4877e7a1068a6130/create_mixed_audio_file.py#L20
:param clean_rms: Root Mean Square (RMS) - a value between 0.0 and 1.0
:param snr: Signal-to-Noise (SNR) Ratio in dB - typically somewhere between -20 and 60
:return:
"""
a = float(snr) / 20
noise_rms = clean_rms / (10 ** a)
return noise_rms
def convert_decibels_to_amplitude_ratio(decibels):
return 10 ** (decibels / 20)
def is_waveform_multichannel(samples):
"""
Return bool that answers the question: Is the given ndarray a multichannel waveform or not?
:param samples: numpy ndarray
:return:
"""
return len(samples.shape) > 1
def is_spectrogram_multichannel(spectrogram):
"""
Return bool that answers the question: Is the given ndarray a multichannel spectrogram?
:param samples: numpy ndarray
:return:
"""
return len(spectrogram.shape) > 2 and spectrogram.shape[-1] > 1
def convert_float_samples_to_int16(y):
"""Convert floating-point numpy array of audio samples to int16."""
if not issubclass(y.dtype.type, np.floating):
raise ValueError("input samples not floating-point")
return (y * np.iinfo(np.int16).max).astype(np.int16)
| 31.617284 | 132 | 0.689184 | 353 | 2,561 | 4.878187 | 0.373938 | 0.034843 | 0.02439 | 0.029617 | 0.14518 | 0.117305 | 0.117305 | 0.072009 | 0.072009 | 0.072009 | 0 | 0.030243 | 0.212417 | 2,561 | 80 | 133 | 32.0125 | 0.8235 | 0.393596 | 0 | 0 | 0 | 0 | 0.043538 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.205882 | false | 0 | 0.088235 | 0.029412 | 0.5 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 |
0
| 1 |
8aae1314a34df4a8c2038ff3f05e19541e560962
| 2,489 |
py
|
Python
|
tests/integration/test_cmk_describe.py
|
oglok/CPU-Manager-for-Kubernetes
|
503f37dcb20452699ce789b6628fa3ebeb9ffb54
|
[
"Apache-2.0"
] | null | null | null |
tests/integration/test_cmk_describe.py
|
oglok/CPU-Manager-for-Kubernetes
|
503f37dcb20452699ce789b6628fa3ebeb9ffb54
|
[
"Apache-2.0"
] | null | null | null |
tests/integration/test_cmk_describe.py
|
oglok/CPU-Manager-for-Kubernetes
|
503f37dcb20452699ce789b6628fa3ebeb9ffb54
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2017 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .. import helpers
from . import integration
def test_cmk_describe_ok():
args = ["describe", "--conf-dir={}".format(helpers.conf_dir("ok"))]
assert helpers.execute(integration.cmk(), args) == b"""{
"path": "/cmk/tests/data/config/ok",
"pools": {
"exclusive": {
"cpuLists": {
"4,12": {
"cpus": "4,12",
"tasks": [
2000
]
},
"5,13": {
"cpus": "5,13",
"tasks": [
2001
]
},
"6,14": {
"cpus": "6,14",
"tasks": [
2002
]
},
"7,15": {
"cpus": "7,15",
"tasks": [
2003
]
}
},
"exclusive": true,
"name": "exclusive"
},
"infra": {
"cpuLists": {
"0-2,8-10": {
"cpus": "0-2,8-10",
"tasks": [
3000,
3001,
3002
]
}
},
"exclusive": false,
"name": "infra"
},
"shared": {
"cpuLists": {
"3,11": {
"cpus": "3,11",
"tasks": [
1000,
1001,
1002,
1003
]
}
},
"exclusive": false,
"name": "shared"
}
}
}
"""
def test_cmk_describe_minimal():
args = ["describe",
"--conf-dir={}".format(helpers.conf_dir("minimal"))]
assert helpers.execute(integration.cmk(), args) == b"""{
"path": "/cmk/tests/data/config/minimal",
"pools": {
"exclusive": {
"cpuLists": {
"0": {
"cpus": "0",
"tasks": []
}
},
"exclusive": true,
"name": "exclusive"
},
"shared": {
"cpuLists": {
"0": {
"cpus": "0",
"tasks": []
}
},
"exclusive": false,
"name": "shared"
}
}
}
"""
| 21.273504 | 74 | 0.451185 | 242 | 2,489 | 4.607438 | 0.487603 | 0.053812 | 0.04843 | 0.0287 | 0.229596 | 0.229596 | 0.179372 | 0.179372 | 0.109417 | 0.109417 | 0 | 0.0625 | 0.382885 | 2,489 | 116 | 75 | 21.456897 | 0.663411 | 0.224588 | 0 | 0.367347 | 0 | 0 | 0.810021 | 0.031837 | 0 | 0 | 0 | 0 | 0.020408 | 1 | 0.020408 | false | 0 | 0.020408 | 0 | 0.040816 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
0
| 1 |
8aafa8be4338ac950ec6be097349874901cbc17e
| 3,807 |
py
|
Python
|
tests/test_client.py
|
mgobec/python-memcached
|
8ea5fe5fca3a4f0d1201ca9aa50f9701c1baab01
|
[
"Apache-2.0"
] | 1 |
2019-07-19T18:09:38.000Z
|
2019-07-19T18:09:38.000Z
|
tests/test_client.py
|
mgobec/python-memcached
|
8ea5fe5fca3a4f0d1201ca9aa50f9701c1baab01
|
[
"Apache-2.0"
] | null | null | null |
tests/test_client.py
|
mgobec/python-memcached
|
8ea5fe5fca3a4f0d1201ca9aa50f9701c1baab01
|
[
"Apache-2.0"
] | null | null | null |
import collections
import unittest
import driver
from driver.protocol import *
_server = ('localhost', 11211)
_dead_retry = 30
_socket_timeout = 3
_max_receive_size = 4096
class MockConnection(object):
def __init__(self,
server=_server,
dead_retry=30,
socket_timeout=3):
self.server = server
self.dead_retry = dead_retry
self.socket_timeout = socket_timeout
self.closed = True
self.socket = None
self.send_buffer = collections.deque()
self.receive_buffer = collections.deque()
self.on_read = None
self.on_write = None
def open(self):
self.closed = False
self.socket = True
return True
def close(self):
self.closed = True
self.socket = None
def send(self, data):
if self.on_write is not None:
self.on_write()
self.send_buffer.append(data)
def read(self, size=_max_receive_size):
if self.on_read is not None:
self.on_read()
return self.receive_buffer.popleft()
class ClientTests(unittest.TestCase):
def setUp(self):
self.client = driver.Client(_server)
self.mock = MockConnection()
self.client._connection = self.mock
self.client.connect()
def test_initialize_and_connect(self):
self.assertFalse(self.mock.closed)
def test_disconnect(self):
self.client.disconnect()
self.assertTrue(self.mock.closed)
def test_set_value_without_response(self):
self.client.set('testkey', 'testvalue')
self.assertEqual(self.mock.send_buffer.pop(), b'set testkey 0 0 9 noreply\r\ntestvalue\r\n')
def test_set_value_with_stored_response(self):
self.mock.receive_buffer.append(StoreReply.STORED + Constants.END_LINE)
response = self.client.set('testkey', 'testvalue', 0, False)
self.assertTrue(response)
def test_set_value_with_not_stored_response(self):
self.mock.receive_buffer.append(StoreReply.NOT_STORED + Constants.END_LINE)
response = self.client.set('testkey', 'testvalue', 0, False)
self.assertFalse(response)
def test_set_value_with_exists_response(self):
self.mock.receive_buffer.append(StoreReply.EXISTS + Constants.END_LINE)
response = self.client.set('testkey', 'testvalue', 0, False)
self.assertFalse(response)
def test_set_value_with_error_response(self):
self.mock.receive_buffer.append(Errors.ERROR + Constants.END_LINE)
with self.assertRaises(driver.DriverUnknownException):
self.client.set('testkey', 'testvalue', 0, False)
def test_set_value_with_server_error_response(self):
self.mock.receive_buffer.append(Errors.SERVER_ERROR + b' Test server error' + Constants.END_LINE)
with self.assertRaises(driver.DriverServerException):
self.client.set('testkey', 'testvalue', 0, False)
def test_set_value_with_client_error_response(self):
self.mock.receive_buffer.append(Errors.CLIENT_ERROR + b' Test client error' + Constants.END_LINE)
with self.assertRaises(driver.DriverClientException):
self.client.set('testkey', 'testvalue', 0, False)
def test_set_value_exception(self):
error_message = "Test write exception"
self.mock.on_write = lambda: _raise_exception(error_message)
result = self.client.set('testkey', 'testvalue', 0, False)
self.assertFalse(result)
def test_get_value_exception(self):
error_message = "Test read exception"
self.mock.on_read = lambda: _raise_exception(error_message)
result = self.client.get('testkey')
self.assertIsNone(result)
def _raise_exception(message):
raise Exception(message)
| 34.609091 | 105 | 0.677699 | 468 | 3,807 | 5.279915 | 0.196581 | 0.05261 | 0.032376 | 0.048563 | 0.541076 | 0.492513 | 0.413193 | 0.413193 | 0.301497 | 0.176042 | 0 | 0.008452 | 0.22301 | 3,807 | 109 | 106 | 34.926606 | 0.82691 | 0 | 0 | 0.137931 | 0 | 0 | 0.068558 | 0.006304 | 0 | 0 | 0 | 0 | 0.126437 | 1 | 0.206897 | false | 0 | 0.045977 | 0 | 0.298851 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 |
0
| 1 |
8abc0d6dcbf21ec8770db13b5b8c148d9b2c8d8e
| 1,607 |
py
|
Python
|
migrations/versions/0084_add_job_stats.py
|
cds-snc/notifier-api
|
90b385ec49efbaee7e607516fc7d9f08991af813
|
[
"MIT"
] | 41 |
2019-11-28T16:58:41.000Z
|
2022-01-28T21:11:16.000Z
|
migrations/versions/0084_add_job_stats.py
|
cds-snc/notification-api
|
b1c1064f291eb860b494c3fa65ac256ad70bf47c
|
[
"MIT"
] | 1,083 |
2019-07-08T12:57:24.000Z
|
2022-03-08T18:53:40.000Z
|
migrations/versions/0084_add_job_stats.py
|
cds-snc/notifier-api
|
90b385ec49efbaee7e607516fc7d9f08991af813
|
[
"MIT"
] | 9 |
2020-01-24T19:56:43.000Z
|
2022-01-27T21:36:53.000Z
|
"""empty message
Revision ID: 0084_add_job_stats
Revises: 0083_add_perm_types_and_svc_perm
Create Date: 2017-05-12 13:16:14.147368
"""
# revision identifiers, used by Alembic.
revision = "0084_add_job_stats"
down_revision = "0083_add_perm_types_and_svc_perm"
import sqlalchemy as sa
from alembic import op
from sqlalchemy.dialects import postgresql
def upgrade():
op.create_table(
"job_statistics",
sa.Column("id", postgresql.UUID(as_uuid=True), nullable=False),
sa.Column("job_id", postgresql.UUID(as_uuid=True), nullable=False),
sa.Column("emails_sent", sa.BigInteger(), nullable=False),
sa.Column("emails_delivered", sa.BigInteger(), nullable=False),
sa.Column("emails_failed", sa.BigInteger(), nullable=False),
sa.Column("sms_sent", sa.BigInteger(), nullable=False),
sa.Column("sms_delivered", sa.BigInteger(), nullable=False),
sa.Column("sms_failed", sa.BigInteger(), nullable=False),
sa.Column("letters_sent", sa.BigInteger(), nullable=False),
sa.Column("letters_failed", sa.BigInteger(), nullable=False),
sa.Column("created_at", sa.DateTime(), nullable=True),
sa.Column("updated_at", sa.DateTime(), nullable=True),
sa.ForeignKeyConstraint(
["job_id"],
["jobs.id"],
),
sa.PrimaryKeyConstraint("id"),
)
op.create_index(op.f("ix_job_statistics_job_id"), "job_statistics", ["job_id"], unique=True)
def downgrade():
op.drop_index(op.f("ix_job_statistics_job_id"), table_name="job_statistics")
op.drop_table("job_statistics")
| 35.711111 | 96 | 0.683261 | 211 | 1,607 | 4.962085 | 0.312796 | 0.091691 | 0.143266 | 0.200573 | 0.579752 | 0.574021 | 0.524355 | 0.143266 | 0.08978 | 0.08978 | 0 | 0.026946 | 0.168637 | 1,607 | 44 | 97 | 36.522727 | 0.756737 | 0.104543 | 0 | 0 | 0 | 0 | 0.209644 | 0.055905 | 0 | 0 | 0 | 0 | 0 | 1 | 0.066667 | false | 0 | 0.1 | 0 | 0.166667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
0
| 1 |
8ac88b2d708e6c6e6407bbbd9d9661fb3c6143fd
| 495 |
py
|
Python
|
molecule/ubuntu/tests/test_grafana.py
|
fiaasco/grafana
|
6a5963e43033d88b5bb4760d47755da1069ec26b
|
[
"MIT"
] | null | null | null |
molecule/ubuntu/tests/test_grafana.py
|
fiaasco/grafana
|
6a5963e43033d88b5bb4760d47755da1069ec26b
|
[
"MIT"
] | null | null | null |
molecule/ubuntu/tests/test_grafana.py
|
fiaasco/grafana
|
6a5963e43033d88b5bb4760d47755da1069ec26b
|
[
"MIT"
] | null | null | null |
import os
import testinfra.utils.ansible_runner
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all')
def test_package(host):
""" check if packages are installed
"""
assert host.package('grafana').is_installed
def test_service(host):
""" Testing whether the service is running and enabled
"""
assert host.service('grafana-server').is_enabled
assert host.service('grafana-server').is_running
| 24.75 | 63 | 0.739394 | 63 | 495 | 5.634921 | 0.539683 | 0.084507 | 0.11831 | 0.152113 | 0.219718 | 0.219718 | 0.219718 | 0 | 0 | 0 | 0 | 0 | 0.147475 | 495 | 19 | 64 | 26.052632 | 0.841232 | 0.175758 | 0 | 0 | 0 | 0 | 0.15641 | 0.058974 | 0 | 0 | 0 | 0 | 0.333333 | 1 | 0.222222 | false | 0 | 0.222222 | 0 | 0.444444 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 |
0
| 1 |
8ad27d34811f9ef90b1af846c18b262998179e76
| 1,523 |
py
|
Python
|
tests/generation_test.py
|
stefan-feltmann/lands
|
b2f1fc3aab4895763160a135d085a17dceb5f58e
|
[
"MIT"
] | null | null | null |
tests/generation_test.py
|
stefan-feltmann/lands
|
b2f1fc3aab4895763160a135d085a17dceb5f58e
|
[
"MIT"
] | null | null | null |
tests/generation_test.py
|
stefan-feltmann/lands
|
b2f1fc3aab4895763160a135d085a17dceb5f58e
|
[
"MIT"
] | null | null | null |
import unittest
from worldengine.plates import Step, center_land, world_gen
from worldengine.world import World
from tests.draw_test import TestBase
class TestGeneration(TestBase):
def setUp(self):
super(TestGeneration, self).setUp()
def test_world_gen_does_not_explode_badly(self):
# FIXME remove me when proper tests are in place
# Very stupid test that just verify nothing explode badly
world_gen("Dummy", 32, 16, 1, step=Step.get_by_name("full"))
@staticmethod
def _mean_elevation_at_borders(world):
borders_total_elevation = 0.0
for y in range(world.height):
borders_total_elevation += world.elevation_at((0, y))
borders_total_elevation += world.elevation_at((world.width - 1, y))
for x in range(1, world.width - 1):
borders_total_elevation += world.elevation_at((x, 0))
borders_total_elevation += world.elevation_at((x, world.height - 1))
n_cells_on_border = world.width * 2 + world.height * 2 - 4
return borders_total_elevation / n_cells_on_border
def test_center_land(self):
w = World.from_pickle_file("%s/plates_279.world" % self.tests_data_dir)
# We want to have less land than before at the borders
el_before = TestGeneration._mean_elevation_at_borders(w)
center_land(w)
el_after = TestGeneration._mean_elevation_at_borders(w)
self.assertTrue(el_after <= el_before)
if __name__ == '__main__':
unittest.main()
| 35.418605 | 80 | 0.690085 | 211 | 1,523 | 4.668246 | 0.407583 | 0.078173 | 0.127919 | 0.105584 | 0.227411 | 0.227411 | 0.077157 | 0 | 0 | 0 | 0 | 0.016034 | 0.22193 | 1,523 | 42 | 81 | 36.261905 | 0.81519 | 0.101773 | 0 | 0 | 0 | 0 | 0.026393 | 0 | 0 | 0 | 0 | 0.02381 | 0.035714 | 1 | 0.142857 | false | 0 | 0.142857 | 0 | 0.357143 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
0
| 1 |
76d2dd0a16c26b25219d0d5220bf5e490de12769
| 1,627 |
py
|
Python
|
run.py
|
Bioconductor/bioc_git_transition
|
9ca29f9e8058b755163e12bf9324ec1063d0182d
|
[
"MIT"
] | 16 |
2017-03-15T18:00:35.000Z
|
2018-07-30T14:44:53.000Z
|
run.py
|
Bioconductor/bioc_git_transition
|
9ca29f9e8058b755163e12bf9324ec1063d0182d
|
[
"MIT"
] | 40 |
2017-03-29T20:04:25.000Z
|
2019-10-21T16:56:15.000Z
|
run.py
|
Bioconductor/bioc_git_transition
|
9ca29f9e8058b755163e12bf9324ec1063d0182d
|
[
"MIT"
] | 4 |
2017-05-08T11:39:07.000Z
|
2017-08-17T14:18:03.000Z
|
"""Bioconductor run git transition code.
This module assembles the classes for the SVN --> Git transition
can be run in a sequential manner.
It runs the following aspects fo the Bioconductor transition.
Note: Update the SVN dump
1. Run Bioconductor Software package transition
2. Run Bioconductor Experiment Data package transition
3. Run Workflow package transition
4. Run Manifest file transition
5. Run Rapid update of master (trunk) and RELEASE_3_5 branches on
software packages
Manual tasks which need to be done:
1. Copy over bare repos to repositories/packages
2. Copy manifest bare git repo to repositories/admin
"""
import src.run_transition as rt
import src.svn_dump_update as sdu
import logging
import time
logging.basicConfig(filename='transition.log',
format='%(levelname)s %(asctime)s %(message)s',
level=logging.DEBUG)
def svn_dump_update(config_file):
sdu.svn_root_update(config_file)
sdu.svn_experiment_root_update(config_file)
return
def run(config_file):
rt.run_software_transition(config_file, new_svn_dump=True)
rt.run_experiment_data_transition(config_file, new_svn_dump=True)
rt.run_workflow_transition(config_file, new_svn_dump=True)
rt.run_manifest_transition(config_file, new_svn_dump=True)
return
if __name__ == '__main__':
start_time = time.time()
config_file = "./settings.ini"
svn_dump_update(config_file)
run(config_file)
# TODO: Run updates after dump update
svn_dump_update(config_file)
rt.run_updates(config_file)
logging.info("--- %s seconds ---" % (time.time() - start_time))
| 30.12963 | 69 | 0.754149 | 239 | 1,627 | 4.895397 | 0.393305 | 0.111111 | 0.068376 | 0.078632 | 0.211966 | 0.12906 | 0.12906 | 0.1 | 0.1 | 0 | 0 | 0.006647 | 0.167793 | 1,627 | 53 | 70 | 30.698113 | 0.857459 | 0.406884 | 0 | 0.16 | 0 | 0 | 0.09499 | 0 | 0 | 0 | 0 | 0.018868 | 0 | 1 | 0.08 | false | 0 | 0.16 | 0 | 0.32 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
0
| 1 |
76d437c1b037e1c3fe1a171bd9eb231c53d36fc1
| 645 |
py
|
Python
|
projectparallelprogrammeren/codesimulatie.py
|
fury106/ProjectParallelProgrammeren
|
fd3c198edaca5bcb19d8e665561e8cd14824e894
|
[
"MIT"
] | null | null | null |
projectparallelprogrammeren/codesimulatie.py
|
fury106/ProjectParallelProgrammeren
|
fd3c198edaca5bcb19d8e665561e8cd14824e894
|
[
"MIT"
] | null | null | null |
projectparallelprogrammeren/codesimulatie.py
|
fury106/ProjectParallelProgrammeren
|
fd3c198edaca5bcb19d8e665561e8cd14824e894
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Module projectparallelprogrammeren.codesimulatie
=================================================================
Deze module simuleert alles.
"""
import projectparallelprogrammeren
def simulatie():
"""
Deze functie voert alle versies uit zodat deze vergeleken kunnen worden qua timing.
"""
from importlib import import_module
for i in range(4):
#alle versies van de simulatie importeren en achtereenvolgens uitvoeren.
version = f"montecarlo_v{i}"
montecarlo = import_module(version)
montecarlo.simulatie(100,50) #Deze waarden dienen enkel als test
if __name__ == "__main__":
simulatie()
#eof
| 23.035714 | 84 | 0.674419 | 70 | 645 | 6.057143 | 0.728571 | 0.051887 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.012658 | 0.142636 | 645 | 27 | 85 | 23.888889 | 0.754069 | 0.55814 | 0 | 0 | 0 | 0 | 0.087121 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.111111 | false | 0 | 0.333333 | 0 | 0.444444 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 |
0
| 1 |
76d4b9d4643322713c59c30a22d968f034c3d591
| 2,361 |
py
|
Python
|
test/test_aes.py
|
haruhi-dl/haruhi-dl
|
0526e2add4c263209cad55347efa9a2dfe6c3fa6
|
[
"Unlicense"
] | 32 |
2021-01-18T03:52:17.000Z
|
2022-02-17T20:43:39.000Z
|
test/test_aes.py
|
haruhi-dl/haruhi-dl
|
0526e2add4c263209cad55347efa9a2dfe6c3fa6
|
[
"Unlicense"
] | 12 |
2021-02-06T08:12:08.000Z
|
2021-12-11T23:17:41.000Z
|
test/test_aes.py
|
haruhi-dl/haruhi-dl
|
0526e2add4c263209cad55347efa9a2dfe6c3fa6
|
[
"Unlicense"
] | 6 |
2021-01-29T16:46:31.000Z
|
2022-01-20T18:40:03.000Z
|
#!/usr/bin/env python
from __future__ import unicode_literals
# Allow direct execution
import os
import sys
import unittest
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from haruhi_dl.aes import aes_decrypt, aes_encrypt, aes_cbc_decrypt, aes_cbc_encrypt, aes_decrypt_text
from haruhi_dl.utils import bytes_to_intlist, intlist_to_bytes
import base64
# the encrypted data can be generate with 'devscripts/generate_aes_testdata.py'
class TestAES(unittest.TestCase):
def setUp(self):
self.key = self.iv = [0x20, 0x15] + 14 * [0]
self.secret_msg = b'Secret message goes here'
def test_encrypt(self):
msg = b'message'
key = list(range(16))
encrypted = aes_encrypt(bytes_to_intlist(msg), key)
decrypted = intlist_to_bytes(aes_decrypt(encrypted, key))
self.assertEqual(decrypted, msg)
def test_cbc_decrypt(self):
data = bytes_to_intlist(
b"\x97\x92+\xe5\x0b\xc3\x18\x91ky9m&\xb3\xb5@\xe6'\xc2\x96.\xc8u\x88\xab9-[\x9e|\xf1\xcd"
)
decrypted = intlist_to_bytes(aes_cbc_decrypt(data, self.key, self.iv))
self.assertEqual(decrypted.rstrip(b'\x08'), self.secret_msg)
def test_cbc_encrypt(self):
data = bytes_to_intlist(self.secret_msg)
encrypted = intlist_to_bytes(aes_cbc_encrypt(data, self.key, self.iv))
self.assertEqual(
encrypted,
b"\x97\x92+\xe5\x0b\xc3\x18\x91ky9m&\xb3\xb5@\xe6'\xc2\x96.\xc8u\x88\xab9-[\x9e|\xf1\xcd")
def test_decrypt_text(self):
password = intlist_to_bytes(self.key).decode('utf-8')
encrypted = base64.b64encode(
intlist_to_bytes(self.iv[:8])
+ b'\x17\x15\x93\xab\x8d\x80V\xcdV\xe0\t\xcdo\xc2\xa5\xd8ksM\r\xe27N\xae'
).decode('utf-8')
decrypted = (aes_decrypt_text(encrypted, password, 16))
self.assertEqual(decrypted, self.secret_msg)
password = intlist_to_bytes(self.key).decode('utf-8')
encrypted = base64.b64encode(
intlist_to_bytes(self.iv[:8])
+ b'\x0b\xe6\xa4\xd9z\x0e\xb8\xb9\xd0\xd4i_\x85\x1d\x99\x98_\xe5\x80\xe7.\xbf\xa5\x83'
).decode('utf-8')
decrypted = (aes_decrypt_text(encrypted, password, 32))
self.assertEqual(decrypted, self.secret_msg)
if __name__ == '__main__':
unittest.main()
| 36.890625 | 102 | 0.671326 | 339 | 2,361 | 4.448378 | 0.368732 | 0.047745 | 0.074271 | 0.047745 | 0.451592 | 0.351459 | 0.302387 | 0.259947 | 0.259947 | 0.193634 | 0 | 0.063158 | 0.195256 | 2,361 | 63 | 103 | 37.47619 | 0.730526 | 0.051249 | 0 | 0.212766 | 0 | 0.085106 | 0.171658 | 0.143496 | 0 | 0 | 0.003576 | 0 | 0.106383 | 1 | 0.106383 | false | 0.085106 | 0.148936 | 0 | 0.276596 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 |
0
| 1 |
76dba06432c777d52082f512eea09a2187e28998
| 201 |
py
|
Python
|
app/retweet_graphs_v2/prep/migrate_daily_bot_probabilities.py
|
s2t2/tweet-analyzer-py
|
0a398fc47101a2d602d8c4116c970f1076a58f27
|
[
"MIT"
] | 5 |
2020-04-02T12:03:57.000Z
|
2020-10-18T19:29:15.000Z
|
app/retweet_graphs_v2/prep/migrate_daily_bot_probabilities.py
|
s2t2/tweet-analyzer-py
|
0a398fc47101a2d602d8c4116c970f1076a58f27
|
[
"MIT"
] | 22 |
2020-03-31T02:00:34.000Z
|
2021-06-30T17:59:01.000Z
|
app/retweet_graphs_v2/prep/migrate_daily_bot_probabilities.py
|
s2t2/tweet-analyzer-py
|
0a398fc47101a2d602d8c4116c970f1076a58f27
|
[
"MIT"
] | 3 |
2020-04-04T16:08:08.000Z
|
2020-10-20T01:32:46.000Z
|
from app.bq_service import BigQueryService
if __name__ == "__main__":
bq_service = BigQueryService()
bq_service.migrate_daily_bot_probabilities_table()
print("MIGRATION SUCCESSFUL!")
| 16.75 | 54 | 0.756219 | 22 | 201 | 6.227273 | 0.772727 | 0.19708 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.159204 | 201 | 11 | 55 | 18.272727 | 0.810651 | 0 | 0 | 0 | 0 | 0 | 0.145729 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.2 | 0 | 0.2 | 0.2 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
0
| 1 |
76dc3dcc93cf6f1c271c8e612a3e064f4f02ee56
| 3,258 |
py
|
Python
|
tests/bugs/core_6266_test.py
|
reevespaul/firebird-qa
|
98f16f425aa9ab8ee63b86172f959d63a2d76f21
|
[
"MIT"
] | null | null | null |
tests/bugs/core_6266_test.py
|
reevespaul/firebird-qa
|
98f16f425aa9ab8ee63b86172f959d63a2d76f21
|
[
"MIT"
] | null | null | null |
tests/bugs/core_6266_test.py
|
reevespaul/firebird-qa
|
98f16f425aa9ab8ee63b86172f959d63a2d76f21
|
[
"MIT"
] | null | null | null |
#coding:utf-8
#
# id: bugs.core_6266
# title: Deleting records from MON$ATTACHMENTS using ORDER BY clause doesn't close the corresponding attachments
# decription:
# Old title: Don't close attach while deleting record from MON$ATTACHMENTS using ORDER BY clause.
# Confirmed bug on 3.0.6.33271.
# Checked on 3.0.6.33272 (SS/CS) - works fine.
# 22.04.2020. Checked separately on 4.0.0.1931 SS/CS: all OK. FB 4.0 can also be tested since this build.
#
# tracker_id: CORE-6266
# min_versions: ['3.0.0']
# versions: 3.0
# qmid: None
import pytest
from firebird.qa import db_factory, isql_act, Action
# version: 3.0
# resources: None
substitutions_1 = []
init_script_1 = """"""
db_1 = db_factory(sql_dialect=3, init=init_script_1)
# test_script_1
#---
# import os
# import sys
# import time
# import fdb
#
# ATT_CNT=5
# ATT_DELAY=1
#
# os.environ["ISC_USER"] = user_name
# os.environ["ISC_PASSWORD"] = user_password
#
# db_conn.close()
#
# con_list={}
# for i in range(0, ATT_CNT):
# if i > 0:
# time.sleep( ATT_DELAY )
#
# c = fdb.connect(dsn = dsn)
# a = c.attachment_id
# con_list[ i ] = (a, c)
# # print('created attachment ', (a,c) )
#
# con_admin = con_list[0][1]
#
# #print(con_admin.firebird_version)
#
# # this removes ALL connections --> should NOT be used for reproducing ticket issue:
# #con_admin.execute_immediate('delete from mon$attachments where mon$attachment_id != current_connection order by mon$timestamp')
#
# # this removes ALL connections --> should NOT be used for reproducing ticket issue:
# #con_admin.execute_immediate('delete from mon$attachments where mon$system_flag is distinct from 1 and mon$attachment_id != current_connection order by mon$timestamp')
#
# # This DOES NOT remove all attachments (only 'last' in order of timestamp), but
# # DELETE statement must NOT contain phrase 'mon$attachment_id != current_connection':
# con_admin.execute_immediate('delete from mon$attachments where mon$system_flag is distinct from 1 order by mon$timestamp')
#
# con_admin.commit()
#
# cur_admin = con_admin.cursor()
# cur_admin.execute('select mon$attachment_id,mon$user from mon$attachments where mon$system_flag is distinct from 1 and mon$attachment_id != current_connection' )
# i=0
# for r in cur_admin:
# print( '### ACHTUNG ### STILL ALIVE ATTACHMENT DETECTED: ', r[0], r[1].strip(), '###' )
# i += 1
# print('Number of attachments that remains alive: ',i)
#
# cur_admin.close()
#
# #print('Final cleanup before quit from Python.')
#
# for k,v in sorted( con_list.items() ):
# #print('attempt to close attachment ', v[0] )
# try:
# v[1].close()
# #print('done.')
# except Exception as e:
# pass
# #print('Got exception:', sys.exc_info()[0])
# #print(e[0])
#
#
#---
#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)
expected_stdout_1 = """
Number of attachments that remains alive: 0
"""
@pytest.mark.version('>=3.0')
@pytest.mark.xfail
def test_1(db_1):
pytest.fail("Test not IMPLEMENTED")
| 31.028571 | 170 | 0.645795 | 465 | 3,258 | 4.380645 | 0.389247 | 0.027491 | 0.053019 | 0.045164 | 0.350515 | 0.334806 | 0.300442 | 0.265096 | 0.265096 | 0.265096 | 0 | 0.032514 | 0.225905 | 3,258 | 104 | 171 | 31.326923 | 0.775178 | 0.832413 | 0 | 0 | 0 | 0 | 0.175 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.083333 | false | 0 | 0.166667 | 0 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
0
| 1 |
76e58be1ebfa1f5a2978f0298b22ab49d27824a1
| 386 |
py
|
Python
|
initdb.py
|
dasmerlon/flunky-bot
|
19dff5a74bee6685e806f98c3f877216ef454a5d
|
[
"MIT"
] | null | null | null |
initdb.py
|
dasmerlon/flunky-bot
|
19dff5a74bee6685e806f98c3f877216ef454a5d
|
[
"MIT"
] | null | null | null |
initdb.py
|
dasmerlon/flunky-bot
|
19dff5a74bee6685e806f98c3f877216ef454a5d
|
[
"MIT"
] | null | null | null |
#!/bin/env python
"""Drop and create a new database with schema."""
from sqlalchemy_utils.functions import database_exists, create_database, drop_database
from flunkybot.db import engine, base
from flunkybot.models import * # noqa
db_url = engine.url
if database_exists(db_url):
drop_database(db_url)
create_database(db_url)
base.metadata.drop_all()
base.metadata.create_all()
| 22.705882 | 86 | 0.790155 | 58 | 386 | 5.034483 | 0.465517 | 0.068493 | 0.089041 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.11658 | 386 | 16 | 87 | 24.125 | 0.856305 | 0.168394 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.333333 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 |
0
| 1 |
76ebcd294c425806f2a19ba5ab050dfad80e8987
| 826 |
py
|
Python
|
trabalho-numerico/tridimensional.py
|
heissonwillen/tcm
|
71da46489f12e64b50436b17447721cb8f7eaf09
|
[
"MIT"
] | null | null | null |
trabalho-numerico/tridimensional.py
|
heissonwillen/tcm
|
71da46489f12e64b50436b17447721cb8f7eaf09
|
[
"MIT"
] | null | null | null |
trabalho-numerico/tridimensional.py
|
heissonwillen/tcm
|
71da46489f12e64b50436b17447721cb8f7eaf09
|
[
"MIT"
] | null | null | null |
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
from matplotlib import cm
import numpy as np
import os
import contorno
from constantes import INTERVALOS, PASSOS, TAMANHO_BARRA, DELTA_T, DELTA_X
z_temp = contorno.p_3
TAMANHO_BARRA = 2
x = np.linspace(0.0, TAMANHO_BARRA, INTERVALOS+1)
y = np.linspace(0.0, DELTA_T, PASSOS+1)
z = []
for k in range(PASSOS+1):
z_k = np.copy(z_temp)
z.append(z_k)
for i in range(1, INTERVALOS):
z_temp[i] = z_k[i] + (DELTA_T/(DELTA_X**2)) * (z_k[i+1]-2*z_k[i]+z_k[i-1])
z = np.asarray(z)
x, y = np.meshgrid(x, y)
fig = plt.figure()
ax = fig.gca(projection='3d')
surf = ax.plot_surface(x, y, z, cmap=cm.coolwarm, antialiased=False)
ax.set_xlabel('x')
ax.set_ylabel('t')
ax.set_zlabel('T(x,t)')
fig.colorbar(surf, shrink=0.5, aspect=5)
plt.show()
| 22.944444 | 82 | 0.692494 | 160 | 826 | 3.43125 | 0.39375 | 0.021858 | 0.021858 | 0.043716 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.02845 | 0.14891 | 826 | 35 | 83 | 23.6 | 0.752489 | 0 | 0 | 0 | 0 | 0 | 0.012107 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0.111111 | 0.259259 | 0 | 0.259259 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 |
0
| 1 |
76f0f94143a86c5bd1bdfebcc7fe3a026073720d
| 860 |
py
|
Python
|
SVM/SVM_12_Quiz.py
|
rohit517/Intro-to-machine-learning-Udacity
|
d0b2cc6cac1cb3408b274225cecd4afcea4ee30f
|
[
"MIT"
] | null | null | null |
SVM/SVM_12_Quiz.py
|
rohit517/Intro-to-machine-learning-Udacity
|
d0b2cc6cac1cb3408b274225cecd4afcea4ee30f
|
[
"MIT"
] | null | null | null |
SVM/SVM_12_Quiz.py
|
rohit517/Intro-to-machine-learning-Udacity
|
d0b2cc6cac1cb3408b274225cecd4afcea4ee30f
|
[
"MIT"
] | null | null | null |
import sys
from class_vis import prettyPicture
from prep_terrain_data import makeTerrainData
import matplotlib.pyplot as plt
import copy
import numpy as np
import pylab as pl
features_train, labels_train, features_test, labels_test = makeTerrainData()
########################## SVM #################################
### we handle the import statement and SVC creation for you here
from sklearn.svm import SVC
clf = SVC(kernel="linear")
#### now your job is to fit the classifier
#### using the training features/labels, and to
#### make a set of predictions on the test data
clf.fit(features_train,labels_train)
pred = clf.predict(features_test)
#### store your predictions in a list named pred
from sklearn.metrics import accuracy_score
acc = accuracy_score(pred, labels_test)
def submitAccuracy():
return acc
| 25.294118 | 77 | 0.696512 | 117 | 860 | 5.008547 | 0.564103 | 0.044369 | 0.064846 | 0.081911 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.175581 | 860 | 33 | 78 | 26.060606 | 0.826516 | 0.27093 | 0 | 0 | 0 | 0 | 0.01165 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.0625 | false | 0 | 0.5625 | 0.0625 | 0.6875 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 |
0
| 1 |
76f2637d428beecc1c55ba4761f8ecce6c4c4884
| 26,267 |
py
|
Python
|
runtime/python/Lib/site-packages/isort/output.py
|
hwaipy/InteractionFreeNode
|
88642b68430f57b028fd0f276a5709f89279e30d
|
[
"MIT"
] | 4 |
2021-10-20T12:39:09.000Z
|
2022-02-26T15:02:08.000Z
|
runtime/python/Lib/site-packages/isort/output.py
|
hwaipy/InteractionFreeNode
|
88642b68430f57b028fd0f276a5709f89279e30d
|
[
"MIT"
] | 20 |
2021-05-03T18:02:23.000Z
|
2022-03-12T12:01:04.000Z
|
runtime/python/Lib/site-packages/isort/output.py
|
hwaipy/InteractionFreeNode
|
88642b68430f57b028fd0f276a5709f89279e30d
|
[
"MIT"
] | 3 |
2021-08-28T14:22:36.000Z
|
2021-10-06T18:59:41.000Z
|
import copy
import itertools
from functools import partial
from typing import Any, Iterable, List, Optional, Set, Tuple, Type
from isort.format import format_simplified
from . import parse, sorting, wrap
from .comments import add_to_line as with_comments
from .identify import STATEMENT_DECLARATIONS
from .settings import DEFAULT_CONFIG, Config
def sorted_imports(
parsed: parse.ParsedContent,
config: Config = DEFAULT_CONFIG,
extension: str = "py",
import_type: str = "import",
) -> str:
"""Adds the imports back to the file.
(at the index of the first import) sorted alphabetically and split between groups
"""
if parsed.import_index == -1:
return _output_as_string(parsed.lines_without_imports, parsed.line_separator)
formatted_output: List[str] = parsed.lines_without_imports.copy()
remove_imports = [format_simplified(removal) for removal in config.remove_imports]
sections: Iterable[str] = itertools.chain(parsed.sections, config.forced_separate)
if config.no_sections:
parsed.imports["no_sections"] = {"straight": {}, "from": {}}
base_sections: Tuple[str, ...] = ()
for section in sections:
if section == "FUTURE":
base_sections = ("FUTURE",)
continue
parsed.imports["no_sections"]["straight"].update(
parsed.imports[section].get("straight", {})
)
parsed.imports["no_sections"]["from"].update(parsed.imports[section].get("from", {}))
sections = base_sections + ("no_sections",)
output: List[str] = []
seen_headings: Set[str] = set()
pending_lines_before = False
for section in sections:
straight_modules = parsed.imports[section]["straight"]
if not config.only_sections:
straight_modules = sorting.sort(
config,
straight_modules,
key=lambda key: sorting.module_key(
key, config, section_name=section, straight_import=True
),
reverse=config.reverse_sort,
)
from_modules = parsed.imports[section]["from"]
if not config.only_sections:
from_modules = sorting.sort(
config,
from_modules,
key=lambda key: sorting.module_key(key, config, section_name=section),
reverse=config.reverse_sort,
)
if config.star_first:
star_modules = []
other_modules = []
for module in from_modules:
if "*" in parsed.imports[section]["from"][module]:
star_modules.append(module)
else:
other_modules.append(module)
from_modules = star_modules + other_modules
straight_imports = _with_straight_imports(
parsed, config, straight_modules, section, remove_imports, import_type
)
from_imports = _with_from_imports(
parsed, config, from_modules, section, remove_imports, import_type
)
lines_between = [""] * (
config.lines_between_types if from_modules and straight_modules else 0
)
if config.from_first:
section_output = from_imports + lines_between + straight_imports
else:
section_output = straight_imports + lines_between + from_imports
if config.force_sort_within_sections:
# collapse comments
comments_above = []
new_section_output: List[str] = []
for line in section_output:
if not line:
continue
if line.startswith("#"):
comments_above.append(line)
elif comments_above:
new_section_output.append(_LineWithComments(line, comments_above))
comments_above = []
else:
new_section_output.append(line)
# only_sections options is not imposed if force_sort_within_sections is True
new_section_output = sorting.sort(
config,
new_section_output,
key=partial(sorting.section_key, config=config),
reverse=config.reverse_sort,
)
# uncollapse comments
section_output = []
for line in new_section_output:
comments = getattr(line, "comments", ())
if comments:
section_output.extend(comments)
section_output.append(str(line))
section_name = section
no_lines_before = section_name in config.no_lines_before
if section_output:
if section_name in parsed.place_imports:
parsed.place_imports[section_name] = section_output
continue
section_title = config.import_headings.get(section_name.lower(), "")
if section_title and section_title not in seen_headings:
if config.dedup_headings:
seen_headings.add(section_title)
section_comment = f"# {section_title}"
if section_comment not in parsed.lines_without_imports[0:1]: # pragma: no branch
section_output.insert(0, section_comment)
if pending_lines_before or not no_lines_before:
output += [""] * config.lines_between_sections
output += section_output
pending_lines_before = False
else:
pending_lines_before = pending_lines_before or not no_lines_before
if config.ensure_newline_before_comments:
output = _ensure_newline_before_comment(output)
while output and output[-1].strip() == "":
output.pop() # pragma: no cover
while output and output[0].strip() == "":
output.pop(0)
if config.formatting_function:
output = config.formatting_function(
parsed.line_separator.join(output), extension, config
).splitlines()
output_at = 0
if parsed.import_index < parsed.original_line_count:
output_at = parsed.import_index
formatted_output[output_at:0] = output
if output:
imports_tail = output_at + len(output)
while [
character.strip() for character in formatted_output[imports_tail : imports_tail + 1]
] == [""]:
formatted_output.pop(imports_tail)
if len(formatted_output) > imports_tail:
next_construct = ""
tail = formatted_output[imports_tail:]
for index, line in enumerate(tail): # pragma: no branch
should_skip, in_quote, *_ = parse.skip_line(
line,
in_quote="",
index=len(formatted_output),
section_comments=config.section_comments,
needs_import=False,
)
if not should_skip and line.strip():
if (
line.strip().startswith("#")
and len(tail) > (index + 1)
and tail[index + 1].strip()
):
continue
next_construct = line
break
if in_quote: # pragma: no branch
next_construct = line
break
if config.lines_after_imports != -1:
formatted_output[imports_tail:0] = [
"" for line in range(config.lines_after_imports)
]
elif extension != "pyi" and next_construct.startswith(STATEMENT_DECLARATIONS):
formatted_output[imports_tail:0] = ["", ""]
else:
formatted_output[imports_tail:0] = [""]
if parsed.place_imports:
new_out_lines = []
for index, line in enumerate(formatted_output):
new_out_lines.append(line)
if line in parsed.import_placements:
new_out_lines.extend(parsed.place_imports[parsed.import_placements[line]])
if (
len(formatted_output) <= (index + 1)
or formatted_output[index + 1].strip() != ""
):
new_out_lines.append("")
formatted_output = new_out_lines
return _output_as_string(formatted_output, parsed.line_separator)
def _with_from_imports(
parsed: parse.ParsedContent,
config: Config,
from_modules: Iterable[str],
section: str,
remove_imports: List[str],
import_type: str,
) -> List[str]:
output: List[str] = []
for module in from_modules:
if module in remove_imports:
continue
import_start = f"from {module} {import_type} "
from_imports = list(parsed.imports[section]["from"][module])
if (
not config.no_inline_sort
or (config.force_single_line and module not in config.single_line_exclusions)
) and not config.only_sections:
from_imports = sorting.sort(
config,
from_imports,
key=lambda key: sorting.module_key(
key,
config,
True,
config.force_alphabetical_sort_within_sections,
section_name=section,
),
reverse=config.reverse_sort,
)
if remove_imports:
from_imports = [
line for line in from_imports if f"{module}.{line}" not in remove_imports
]
sub_modules = [f"{module}.{from_import}" for from_import in from_imports]
as_imports = {
from_import: [
f"{from_import} as {as_module}" for as_module in parsed.as_map["from"][sub_module]
]
for from_import, sub_module in zip(from_imports, sub_modules)
if sub_module in parsed.as_map["from"]
}
if config.combine_as_imports and not ("*" in from_imports and config.combine_star):
if not config.no_inline_sort:
for as_import in as_imports:
if not config.only_sections:
as_imports[as_import] = sorting.sort(config, as_imports[as_import])
for from_import in copy.copy(from_imports):
if from_import in as_imports:
idx = from_imports.index(from_import)
if parsed.imports[section]["from"][module][from_import]:
from_imports[(idx + 1) : (idx + 1)] = as_imports.pop(from_import)
else:
from_imports[idx : (idx + 1)] = as_imports.pop(from_import)
only_show_as_imports = False
comments = parsed.categorized_comments["from"].pop(module, ())
above_comments = parsed.categorized_comments["above"]["from"].pop(module, None)
while from_imports:
if above_comments:
output.extend(above_comments)
above_comments = None
if "*" in from_imports and config.combine_star:
import_statement = wrap.line(
with_comments(
_with_star_comments(parsed, module, list(comments or ())),
f"{import_start}*",
removed=config.ignore_comments,
comment_prefix=config.comment_prefix,
),
parsed.line_separator,
config,
)
from_imports = [
from_import for from_import in from_imports if from_import in as_imports
]
only_show_as_imports = True
elif config.force_single_line and module not in config.single_line_exclusions:
import_statement = ""
while from_imports:
from_import = from_imports.pop(0)
single_import_line = with_comments(
comments,
import_start + from_import,
removed=config.ignore_comments,
comment_prefix=config.comment_prefix,
)
comment = (
parsed.categorized_comments["nested"].get(module, {}).pop(from_import, None)
)
if comment:
single_import_line += (
f"{comments and ';' or config.comment_prefix} " f"{comment}"
)
if from_import in as_imports:
if (
parsed.imports[section]["from"][module][from_import]
and not only_show_as_imports
):
output.append(
wrap.line(single_import_line, parsed.line_separator, config)
)
from_comments = parsed.categorized_comments["straight"].get(
f"{module}.{from_import}"
)
if not config.only_sections:
output.extend(
with_comments(
from_comments,
wrap.line(
import_start + as_import, parsed.line_separator, config
),
removed=config.ignore_comments,
comment_prefix=config.comment_prefix,
)
for as_import in sorting.sort(config, as_imports[from_import])
)
else:
output.extend(
with_comments(
from_comments,
wrap.line(
import_start + as_import, parsed.line_separator, config
),
removed=config.ignore_comments,
comment_prefix=config.comment_prefix,
)
for as_import in as_imports[from_import]
)
else:
output.append(wrap.line(single_import_line, parsed.line_separator, config))
comments = None
else:
while from_imports and from_imports[0] in as_imports:
from_import = from_imports.pop(0)
if not config.only_sections:
as_imports[from_import] = sorting.sort(config, as_imports[from_import])
from_comments = (
parsed.categorized_comments["straight"].get(f"{module}.{from_import}") or []
)
if (
parsed.imports[section]["from"][module][from_import]
and not only_show_as_imports
):
specific_comment = (
parsed.categorized_comments["nested"]
.get(module, {})
.pop(from_import, None)
)
if specific_comment:
from_comments.append(specific_comment)
output.append(
wrap.line(
with_comments(
from_comments,
import_start + from_import,
removed=config.ignore_comments,
comment_prefix=config.comment_prefix,
),
parsed.line_separator,
config,
)
)
from_comments = []
for as_import in as_imports[from_import]:
specific_comment = (
parsed.categorized_comments["nested"]
.get(module, {})
.pop(as_import, None)
)
if specific_comment:
from_comments.append(specific_comment)
output.append(
wrap.line(
with_comments(
from_comments,
import_start + as_import,
removed=config.ignore_comments,
comment_prefix=config.comment_prefix,
),
parsed.line_separator,
config,
)
)
from_comments = []
if "*" in from_imports:
output.append(
with_comments(
_with_star_comments(parsed, module, []),
f"{import_start}*",
removed=config.ignore_comments,
comment_prefix=config.comment_prefix,
)
)
from_imports.remove("*")
for from_import in copy.copy(from_imports):
comment = (
parsed.categorized_comments["nested"].get(module, {}).pop(from_import, None)
)
if comment:
from_imports.remove(from_import)
if from_imports:
use_comments = []
else:
use_comments = comments
comments = None
single_import_line = with_comments(
use_comments,
import_start + from_import,
removed=config.ignore_comments,
comment_prefix=config.comment_prefix,
)
single_import_line += (
f"{use_comments and ';' or config.comment_prefix} " f"{comment}"
)
output.append(wrap.line(single_import_line, parsed.line_separator, config))
from_import_section = []
while from_imports and (
from_imports[0] not in as_imports
or (
config.combine_as_imports
and parsed.imports[section]["from"][module][from_import]
)
):
from_import_section.append(from_imports.pop(0))
if config.combine_as_imports:
comments = (comments or []) + list(
parsed.categorized_comments["from"].pop(f"{module}.__combined_as__", ())
)
import_statement = with_comments(
comments,
import_start + (", ").join(from_import_section),
removed=config.ignore_comments,
comment_prefix=config.comment_prefix,
)
if not from_import_section:
import_statement = ""
do_multiline_reformat = False
force_grid_wrap = config.force_grid_wrap
if force_grid_wrap and len(from_import_section) >= force_grid_wrap:
do_multiline_reformat = True
if len(import_statement) > config.line_length and len(from_import_section) > 1:
do_multiline_reformat = True
# If line too long AND have imports AND we are
# NOT using GRID or VERTICAL wrap modes
if (
len(import_statement) > config.line_length
and len(from_import_section) > 0
and config.multi_line_output
not in (wrap.Modes.GRID, wrap.Modes.VERTICAL) # type: ignore
):
do_multiline_reformat = True
if do_multiline_reformat:
import_statement = wrap.import_statement(
import_start=import_start,
from_imports=from_import_section,
comments=comments,
line_separator=parsed.line_separator,
config=config,
)
if config.multi_line_output == wrap.Modes.GRID: # type: ignore
other_import_statement = wrap.import_statement(
import_start=import_start,
from_imports=from_import_section,
comments=comments,
line_separator=parsed.line_separator,
config=config,
multi_line_output=wrap.Modes.VERTICAL_GRID, # type: ignore
)
if (
max(
len(import_line)
for import_line in import_statement.split(parsed.line_separator)
)
> config.line_length
):
import_statement = other_import_statement
if not do_multiline_reformat and len(import_statement) > config.line_length:
import_statement = wrap.line(import_statement, parsed.line_separator, config)
if import_statement:
output.append(import_statement)
return output
def _with_straight_imports(
parsed: parse.ParsedContent,
config: Config,
straight_modules: Iterable[str],
section: str,
remove_imports: List[str],
import_type: str,
) -> List[str]:
output: List[str] = []
as_imports = any((module in parsed.as_map["straight"] for module in straight_modules))
# combine_straight_imports only works for bare imports, 'as' imports not included
if config.combine_straight_imports and not as_imports:
if not straight_modules:
return []
above_comments: List[str] = []
inline_comments: List[str] = []
for module in straight_modules:
if module in parsed.categorized_comments["above"]["straight"]:
above_comments.extend(parsed.categorized_comments["above"]["straight"].pop(module))
if module in parsed.categorized_comments["straight"]:
inline_comments.extend(parsed.categorized_comments["straight"][module])
combined_straight_imports = ", ".join(straight_modules)
if inline_comments:
combined_inline_comments = " ".join(inline_comments)
else:
combined_inline_comments = ""
output.extend(above_comments)
if combined_inline_comments:
output.append(
f"{import_type} {combined_straight_imports} # {combined_inline_comments}"
)
else:
output.append(f"{import_type} {combined_straight_imports}")
return output
for module in straight_modules:
if module in remove_imports:
continue
import_definition = []
if module in parsed.as_map["straight"]:
if parsed.imports[section]["straight"][module]:
import_definition.append((f"{import_type} {module}", module))
import_definition.extend(
(f"{import_type} {module} as {as_import}", f"{module} as {as_import}")
for as_import in parsed.as_map["straight"][module]
)
else:
import_definition.append((f"{import_type} {module}", module))
comments_above = parsed.categorized_comments["above"]["straight"].pop(module, None)
if comments_above:
output.extend(comments_above)
output.extend(
with_comments(
parsed.categorized_comments["straight"].get(imodule),
idef,
removed=config.ignore_comments,
comment_prefix=config.comment_prefix,
)
for idef, imodule in import_definition
)
return output
def _output_as_string(lines: List[str], line_separator: str) -> str:
return line_separator.join(_normalize_empty_lines(lines))
def _normalize_empty_lines(lines: List[str]) -> List[str]:
while lines and lines[-1].strip() == "":
lines.pop(-1)
lines.append("")
return lines
class _LineWithComments(str):
comments: List[str]
def __new__(
cls: Type["_LineWithComments"], value: Any, comments: List[str]
) -> "_LineWithComments":
instance = super().__new__(cls, value)
instance.comments = comments
return instance
def _ensure_newline_before_comment(output: List[str]) -> List[str]:
new_output: List[str] = []
def is_comment(line: Optional[str]) -> bool:
return line.startswith("#") if line else False
for line, prev_line in zip(output, [None] + output): # type: ignore
if is_comment(line) and prev_line != "" and not is_comment(prev_line):
new_output.append("")
new_output.append(line)
return new_output
def _with_star_comments(parsed: parse.ParsedContent, module: str, comments: List[str]) -> List[str]:
star_comment = parsed.categorized_comments["nested"].get(module, {}).pop("*", None)
if star_comment:
return comments + [star_comment]
return comments
| 40.91433 | 100 | 0.516199 | 2,439 | 26,267 | 5.272653 | 0.082411 | 0.034992 | 0.031104 | 0.023328 | 0.477916 | 0.384059 | 0.331882 | 0.288802 | 0.214075 | 0.204355 | 0 | 0.001991 | 0.407317 | 26,267 | 641 | 101 | 40.978159 | 0.824062 | 0.019682 | 0 | 0.377495 | 0 | 0 | 0.034796 | 0.00832 | 0 | 0 | 0 | 0 | 0 | 1 | 0.016334 | false | 0 | 0.30127 | 0.00363 | 0.344828 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 |
0
| 1 |
76f7e1b302002b518c986240747a14b0f7bf282f
| 4,291 |
py
|
Python
|
src/manifest.py
|
silent1mezzo/lightsaber
|
e470be7fb84b810fe846ff0ede78d06bf69cd5e3
|
[
"MIT"
] | 13 |
2020-08-12T12:04:19.000Z
|
2022-03-12T03:53:07.000Z
|
src/manifest.py
|
silent1mezzo/lightsaber
|
e470be7fb84b810fe846ff0ede78d06bf69cd5e3
|
[
"MIT"
] | 46 |
2020-09-03T06:00:18.000Z
|
2022-03-25T10:03:53.000Z
|
src/manifest.py
|
silent1mezzo/lightsaber
|
e470be7fb84b810fe846ff0ede78d06bf69cd5e3
|
[
"MIT"
] | 3 |
2021-08-11T19:12:37.000Z
|
2021-11-09T15:19:59.000Z
|
MANIFEST = {
"hilt": {
"h1": {
"offsets": {"blade": 0, "button": {"x": (8, 9), "y": (110, 111)}},
"colours": {
"primary": (216, 216, 216), # d8d8d8
"secondary": (141, 141, 141), # 8d8d8d
"tertiary": (180, 97, 19), # b46113
},
"length": 24,
"materials": "Alloy metal/Salvaged materials",
},
"h2": {
"offsets": {"blade": 20, "button": {"x": (8, 8), "y": (100, 105)}},
"colours": {
"primary": (112, 112, 112), # 707070
"secondary": (0, 0, 0), # 000000
"tertiary": (212, 175, 55), # 000000
},
"length": 24,
"materials": "Alloy metal and carbon composite",
},
"h3": {
"offsets": {"blade": 0, "button": {"x": (10, 10), "y": (100, 118)}},
"colours": {
"primary": (157, 157, 157), # 707070
"secondary": (0, 0, 0), # 000000
"tertiary": (180, 97, 19), # b46113
},
"length": 24,
"materials": "Alloy metal",
},
"h4": {
"offsets": {"blade": 7, "button": {"x": (8, 9), "y": (92, 100)}},
"colours": {
"primary": (0, 0, 0), # 000000
"secondary": (157, 157, 157), # 9d9d9d
"tertiary": (180, 97, 19), # b46113
},
"length": 13,
"materials": "Alloy metal",
},
"h5": {
"offsets": {"blade": 0, "button": {"x": (8, 8), "y": (92, 105)}},
"colours": {
"primary": (111, 111, 111), # 6f6f6f
"secondary": (0, 0, 0), # 000000
"tertiary": (180, 97, 19), # b46113
},
"length": 24,
"materials": "Alloy metal",
},
"h6": {
"offsets": {"blade": 2, "button": {"x": (8, 9), "y": (112, 113)}},
"colours": {
"primary": (120, 120, 120), # 787878
"secondary": (0, 0, 0), # 000000
"tertiary": (180, 97, 19), # b46113
},
"length": 22,
"materials": "Alloy metal/Salvaged materials",
},
"h7": {
"offsets": {"blade": 0, "button": {"x": (8, 9), "y": (105, 113)}},
"colours": {
"primary": (192, 192, 192), # c0c0c0
"secondary": (255, 215, 0), # ffd700
"tertiary": (0, 0, 0), # 000000
},
"length": 22,
"materials": "Alloy metal and Gold",
},
"h8": {
"offsets": {"blade": 0, "button": {"x": (8, 9), "y": (100, 111)}},
"colours": {
"primary": (216, 216, 216), # d8d8d8
"secondary": (180, 97, 19), # b46113
"tertiary": (0, 0, 0), # 000000
},
"length": 24,
"materials": "Alloy metal/Copper",
},
},
"blade": {
"b1": {"colour": "Red", "crystal": "Adegan crystal", "type": "Sith"},
"b2": {"colour": "Blue", "crystal": "Zophis crystal", "type": "Jedi"},
"b3": {"colour": "Green", "crystal": "Nishalorite stone", "type": "Jedi"},
"b4": {"colour": "Yellow", "crystal": "Kimber stone", "type": "Jedi"},
"b5": {"colour": "White", "crystal": "Dragite gem", "type": "Jedi"},
"b6": {"colour": "Purple", "crystal": "Krayt dragon pearl", "type": "Jedi"},
"b7": {"colour": "Blue/Green", "crystal": "Dantari crystal", "type": "Jedi"},
"b8": {
"colour": "Orange",
"crystal": ["Ilum crystal", "Ultima Pearl"],
"type": "Sith",
},
"b9": {
"colour": "Black",
"crystal": "Obsidian",
"type": ["Jedi", "Mandalorian"],
},
},
"pommel": {
"p1": {"length": 5,},
"p2": {"length": 14,},
"p3": {"length": 3,},
"p4": {"length": 8,},
"p5": {"length": 5,},
"p6": {"length": 5,},
"p7": {"length": 8,},
},
# These are lightsabers for a specific Jedi or Sith. Should use their name instead of
"unique_urls": {""},
}
| 37.313043 | 89 | 0.381496 | 384 | 4,291 | 4.260417 | 0.346354 | 0.017115 | 0.09291 | 0.038509 | 0.42176 | 0.343521 | 0.253056 | 0.229829 | 0.137531 | 0.137531 | 0 | 0.154055 | 0.399441 | 4,291 | 114 | 90 | 37.640351 | 0.480792 | 0.058495 | 0 | 0.292035 | 0 | 0 | 0.296462 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
0
| 1 |
76f93238491c8f0f67d7813df6d0b4a6c7ed0a80
| 245 |
py
|
Python
|
.ipython/profile_pytube/startup/init.py
|
showa-yojyo/dotfiles
|
994cc7df0643d69f62cb59550bdd48a42751c345
|
[
"MIT"
] | null | null | null |
.ipython/profile_pytube/startup/init.py
|
showa-yojyo/dotfiles
|
994cc7df0643d69f62cb59550bdd48a42751c345
|
[
"MIT"
] | 3 |
2018-03-27T14:10:18.000Z
|
2018-03-30T14:06:11.000Z
|
.ipython/profile_pytube/startup/init.py
|
showa-yojyo/dotfiles
|
994cc7df0643d69f62cb59550bdd48a42751c345
|
[
"MIT"
] | null | null | null |
from pytube import YouTube
def download_video(watch_url):
yt = YouTube(watch_url)
(yt.streams
.filter(progressive=True, file_extension='mp4')
.order_by('resolution')
.desc()
.first()
.download())
| 22.272727 | 55 | 0.608163 | 27 | 245 | 5.333333 | 0.814815 | 0.111111 | 0.138889 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.005556 | 0.265306 | 245 | 10 | 56 | 24.5 | 0.794444 | 0 | 0 | 0 | 0 | 0 | 0.053061 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.111111 | false | 0 | 0.111111 | 0 | 0.222222 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
0
| 1 |
0a03cda07d112635217a5bbdc7ec5274c0658a7a
| 3,258 |
py
|
Python
|
requests/UpdateWorkbookConnectionRequest.py
|
divinorum-webb/python-tableau-api
|
9d3f130d63b15307ad2b23e2273b52790b8d9018
|
[
"Apache-2.0"
] | 1 |
2019-06-08T22:19:40.000Z
|
2019-06-08T22:19:40.000Z
|
requests/UpdateWorkbookConnectionRequest.py
|
divinorum-webb/python-tableau-api
|
9d3f130d63b15307ad2b23e2273b52790b8d9018
|
[
"Apache-2.0"
] | null | null | null |
requests/UpdateWorkbookConnectionRequest.py
|
divinorum-webb/python-tableau-api
|
9d3f130d63b15307ad2b23e2273b52790b8d9018
|
[
"Apache-2.0"
] | null | null | null |
from .BaseRequest import BaseRequest
class UpdateWorkbookConnectionRequest(BaseRequest):
"""
Update workbook connection request for sending API requests to Tableau Server.
:param ts_connection: The Tableau Server connection object.
:type ts_connection: class
:param server_address: The new server for the connection.
:type server_address: string
:param port: The new port for the connection.
:type port: string
:param connection_username: The new username for the connection.
:type connection_username: string
:param connection_password: The new password for the connection.
:type connection_password: string
:param embed_password_flag: Boolean; True to embed the password in the connection, False otherwise.
:type embed_password_flag: boolean
"""
def __init__(self,
ts_connection,
server_address=None,
port=None,
connection_username=None,
connection_password=None,
embed_password_flag=None):
super().__init__(ts_connection)
self._server_address = server_address
self._port = port
self._connection_username = connection_username
self._connection_password = connection_password
self._embed_password_flag = embed_password_flag
self.base_update_workbook_connection_request
@property
def optional_parameter_keys(self):
return [
'serverAddress',
'serverPort',
'userName',
'password',
'embedPassword'
]
@property
def optional_parameter_values_exist(self):
return [
self._server_address,
self._port,
self._connection_username,
self._connection_password,
True if self._embed_password_flag is not None else None
]
@property
def optional_parameter_values(self):
return [
self._server_address,
self._port,
self._connection_username,
self._connection_password,
self._embed_password_flag
]
@property
def base_update_workbook_connection_request(self):
self._request_body.update({'connection': {}})
return self._request_body
@property
def modified_update_workbook_connection_request(self):
if any(self.optional_parameter_values_exist):
self._request_body['connection'].update(
self._get_parameters_dict(self.optional_parameter_keys,
self.optional_parameter_values))
return self._request_body
@staticmethod
def _get_parameters_dict(param_keys, param_values):
"""Override the inherited _get_parameters_dict() method to allow passing boolean values directly"""
params_dict = {}
for i, key in enumerate(param_keys):
if param_values[i] is not None:
params_dict.update({key: param_values[i]})
return params_dict
def get_request(self):
return self.modified_update_workbook_connection_request
| 36.2 | 107 | 0.634131 | 331 | 3,258 | 5.885196 | 0.214502 | 0.046715 | 0.061088 | 0.079569 | 0.297228 | 0.11191 | 0.081109 | 0.081109 | 0.081109 | 0.081109 | 0 | 0 | 0.305402 | 3,258 | 89 | 108 | 36.606742 | 0.860804 | 0.261203 | 0 | 0.285714 | 0 | 0 | 0.030796 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.126984 | false | 0.15873 | 0.015873 | 0.063492 | 0.269841 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 |
0
| 1 |
0a1cc533cda21da8b86ba8309652b8179ef12637
| 1,371 |
py
|
Python
|
Episode11-Menu/Pygame/explosion.py
|
Inksaver/Shmup_With_Pygame_Love2D_Monogame
|
84838516d9dd9d6639b1b699dca546bfdfec73dc
|
[
"CC0-1.0"
] | 1 |
2022-02-01T04:05:04.000Z
|
2022-02-01T04:05:04.000Z
|
Episode11-Menu/Pygame/explosion.py
|
Inksaver/Shmup_With_Pygame_Love2D_Monogame
|
84838516d9dd9d6639b1b699dca546bfdfec73dc
|
[
"CC0-1.0"
] | null | null | null |
Episode11-Menu/Pygame/explosion.py
|
Inksaver/Shmup_With_Pygame_Love2D_Monogame
|
84838516d9dd9d6639b1b699dca546bfdfec73dc
|
[
"CC0-1.0"
] | null | null | null |
import pygame
import shared
class Explosion():
def __init__(self, images:list, centre:tuple, key:str) -> None:
''' Class variables. key: 'sm', 'lg', 'player '''
self.images = images # list of 8 images
self.centre = centre # use for all frames
self.key = key # key used later
self.image = images[key][0] # set to first image in the sequence
self.rect = self.image.get_rect() # define rectangle from image size
self.rect.center = self.centre # set centre for all frames
self.frame = 0 # no of first frame
self.time_passed = 0 # set timer to 0
self.frame_rate = 0.1 # 8 images played at 1 frame per 0.1 secs = 0.8 seconds
self.active = True
def update(self, dt):
self.time_passed += dt
if self.time_passed >= self.frame_rate: # 0.1 seconds has passed
self.time_passed = 0 # reset timer
self.frame += 1 # increase frame number
if self.frame >= len(self.images[self.key]): # check if end of list?
self.active = False # animation finished
else:
self.image = self.images[self.key][self.frame] # next frame
self.rect = self.image.get_rect() # new rectangle
self.rect.center = self.centre # set centre to parameter value
return self.active
def draw(self):
shared.screen.blit(self.image, self.rect) # draw current frame
| 41.545455 | 84 | 0.644055 | 206 | 1,371 | 4.228155 | 0.383495 | 0.061998 | 0.064294 | 0.036739 | 0.165327 | 0.130884 | 0.075775 | 0 | 0 | 0 | 0 | 0.016569 | 0.251641 | 1,371 | 33 | 85 | 41.545455 | 0.832359 | 0.326039 | 0 | 0.214286 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.107143 | false | 0.142857 | 0.071429 | 0 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 |
0
| 1 |
0a21ba878c2e6396a56688811ff51897970088c4
| 3,361 |
py
|
Python
|
tinc/tests/parameter_space_test.py
|
AlloSphere-Research-Group/tinc-python
|
4c3390df9911a391833244de1eb1d33a2e19d330
|
[
"BSD-3-Clause"
] | 1 |
2020-11-23T22:42:50.000Z
|
2020-11-23T22:42:50.000Z
|
tinc/tests/parameter_space_test.py
|
AlloSphere-Research-Group/tinc-python
|
4c3390df9911a391833244de1eb1d33a2e19d330
|
[
"BSD-3-Clause"
] | null | null | null |
tinc/tests/parameter_space_test.py
|
AlloSphere-Research-Group/tinc-python
|
4c3390df9911a391833244de1eb1d33a2e19d330
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Mon Jun 14 11:49:43 2021
@author: Andres
"""
import sys,time
import unittest
from tinc import *
class ParameterSpaceTest(unittest.TestCase):
def test_parameter(self):
p1 = Parameter("param1")
p2 = Parameter("param2")
ps = ParameterSpace("ps")
ps.register_parameters([p1, p2])
def test_process(self):
p1 = Parameter("param1")
p1.values = [0, 1,2,3,4]
p2 = Parameter("param2")
p2.values = [-0.3,-0.2, -0.1, 0]
ps = ParameterSpace("ps")
ps.register_parameters([p1, p2])
def func(param1, param2):
return param1 * param2
result = ps.run_process(func)
self.assertAlmostEqual(result, p1.value * p2.value)
p1.value = 3
p2.value = -0.1
result = ps.run_process(func)
self.assertAlmostEqual(result, p1.value * p2.value)
p1.value = 3
p2.value = -0.1
def test_sweep_cache(self):
p1 = Parameter("param1")
p1.values = [0, 1,2,3,4]
p2 = Parameter("param2")
p2.values = [-0.3,-0.2, -0.1, 0]
ps = ParameterSpace("ps")
ps.register_parameters([p1, p2])
ps.enable_cache("ps_test")
def func(param1, param2):
return param1 * param2
ps.sweep(func)
def test_data_directories(self):
dim1 = Parameter("dim1")
dim1.values = [0.1,0.2,0.3,0.4, 0.5]
dim2 = Parameter("dim2")
dim2.set_space_representation_type(parameter_space_representation_types.INDEX)
dim2.values = [0.1,0.2,0.3,0.4, 0.5]
dim3 = Parameter("dim3")
dim3.set_space_representation_type(parameter_space_representation_types.ID)
dim2.values = [0.1,0.2,0.3,0.4, 0.5]
ps = ParameterSpace("ps")
ps.register_parameters([dim1, dim2, dim3])
ps.set_current_path_template("file_%%dim1%%_%%dim2:INDEX%%")
dim1.value=0.2
dim2.value=0.2
self.assertEqual(ps.get_current_relative_path(), 'file_0.2_1')
# TODO ML complete tests see C++ tests for parameter space
def test_common_id(self):
dim1 = Parameter("dim1")
dim1.values = [0.1, 0.1, 0.2, 0.2, 0.3, 0.3]
dim1.ids = ["0.1_1" ,"0.1_2","0.2_1" ,"0.2_2", "0.3_1" ,"0.3_2"]
dim2 = Parameter("dim2")
dim2.set_space_representation_type(parameter_space_representation_types.INDEX)
dim2.values = [1,1,1,2,2,2]
dim2.ids = ["0.1_1", "0.2_1", "0.3_1", "0.1_2", "0.2_2", "0.3_2"]
ps = ParameterSpace("ps")
ps.register_parameters([dim1, dim2])
dim1.value = 0.1
dim2.value = 1
self.assertEqual(ps.get_common_id([dim1, dim2]), "0.1_1")
dim1.value = 0.2
dim2.value = 1
self.assertEqual(ps.get_common_id([dim1, dim2]), "0.2_1")
dim1.value = 0.1
dim2.value = 2
self.assertEqual(ps.get_common_id([dim1, dim2]), "0.1_2")
dim1.value = 0.2
dim2.value = 2
self.assertEqual(ps.get_common_id([dim1, dim2]), "0.2_2")
dim1.value = 0.3
dim2.value = 2
self.assertEqual(ps.get_common_id([dim1, dim2]), "0.3_2")
if __name__ == '__main__':
unittest.main()
| 28.974138 | 86 | 0.555489 | 476 | 3,361 | 3.754202 | 0.17437 | 0.021265 | 0.011752 | 0.067152 | 0.730274 | 0.702854 | 0.660325 | 0.620593 | 0.536094 | 0.476217 | 0 | 0.105818 | 0.294258 | 3,361 | 115 | 87 | 29.226087 | 0.647555 | 0.039274 | 0 | 0.607595 | 0 | 0 | 0.063354 | 0.008696 | 0 | 0 | 0 | 0.008696 | 0.101266 | 1 | 0.088608 | false | 0 | 0.037975 | 0.025316 | 0.164557 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
0
| 1 |
0a2ad964a50ee086e447a623b3863c7fbb9ef26a
| 1,977 |
py
|
Python
|
src/com/python/email/send_mail.py
|
Leeo1124/pythonDemo
|
72e2209c095301a3f1f61edfe03ea69c3c05be40
|
[
"Apache-2.0"
] | null | null | null |
src/com/python/email/send_mail.py
|
Leeo1124/pythonDemo
|
72e2209c095301a3f1f61edfe03ea69c3c05be40
|
[
"Apache-2.0"
] | null | null | null |
src/com/python/email/send_mail.py
|
Leeo1124/pythonDemo
|
72e2209c095301a3f1f61edfe03ea69c3c05be40
|
[
"Apache-2.0"
] | null | null | null |
'''
Created on 2016年8月10日
@author: Administrator
'''
from email import encoders
from email.header import Header
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
from email.mime.multipart import MIMEBase
from email.utils import parseaddr, formataddr
import smtplib
def _format_addr(s):
name, addr = parseaddr(s)
return formataddr((Header(name, 'utf-8').encode(), addr))
from_addr = 'leeo1124@163.com'#input('From: ')
password = input('Password: ')
to_addr = '450475851@qq.com'#input('To: ')
smtp_server = 'smtp.163.com'#input('SMTP server: ')
# 发送纯文本邮件
# msg = MIMEText('hello, send by Python...', 'plain', 'utf-8')
# 发送HTML邮件
# msg = MIMEText('<html><body><h1>Hello</h1>' +
# '<p>send by <a href="http://www.python.org">Python</a>...</p>' +
# '</body></html>', 'html', 'utf-8')
# 发送带附件的邮件
# 邮件对象:
msg = MIMEMultipart()
msg['From'] = _format_addr('Python爱好者 <%s>' % from_addr)
msg['To'] = _format_addr('管理员 <%s>' % to_addr)
msg['Subject'] = Header('来自SMTP的问候……', 'utf-8').encode()
# 邮件正文是MIMEText:
msg.attach(MIMEText('send with file...', 'plain', 'utf-8'))
# 添加附件就是加上一个MIMEBase,从本地读取一个图片:
with open('D:/pythonWorkspace/pthonDemo/src/com/python/email/test.jpg', 'rb') as f:
# 设置附件的MIME和文件名,这里是png类型:
mime = MIMEBase('image', 'png', filename='test.png')
# 加上必要的头信息:
mime.add_header('Content-Disposition', 'attachment', filename='test.png')
mime.add_header('Content-ID', '<0>')
mime.add_header('X-Attachment-Id', '0')
# 把附件的内容读进来:
mime.set_payload(f.read())
# 用Base64编码:
encoders.encode_base64(mime)
# 添加到MIMEMultipart:
msg.attach(mime)
msg['From'] = _format_addr('Python爱好者 <%s>' % from_addr)
msg['To'] = _format_addr('管理员 <%s>' % to_addr)
msg['Subject'] = Header('来自SMTP的问候……', 'utf-8').encode()
server = smtplib.SMTP(smtp_server, 25)
server.set_debuglevel(1)
server.login(from_addr, password)
server.sendmail(from_addr, [to_addr], msg.as_string())
server.quit()
| 29.073529 | 83 | 0.676277 | 267 | 1,977 | 4.947566 | 0.397004 | 0.040878 | 0.029523 | 0.033308 | 0.195307 | 0.152914 | 0.152914 | 0.152914 | 0.152914 | 0.152914 | 0 | 0.025015 | 0.130501 | 1,977 | 68 | 84 | 29.073529 | 0.736475 | 0.233687 | 0 | 0.171429 | 0 | 0 | 0.221328 | 0.0389 | 0 | 0 | 0 | 0 | 0 | 1 | 0.028571 | false | 0.057143 | 0.2 | 0 | 0.257143 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 |
0
| 1 |
0a4ab6a6c7a8f22ae4262d99f43041e035e6b535
| 602 |
py
|
Python
|
project/settings/production.py
|
chiehtu/kissaten
|
a7aad01de569107d5fd5ed2cd781bca6e5750871
|
[
"MIT"
] | null | null | null |
project/settings/production.py
|
chiehtu/kissaten
|
a7aad01de569107d5fd5ed2cd781bca6e5750871
|
[
"MIT"
] | null | null | null |
project/settings/production.py
|
chiehtu/kissaten
|
a7aad01de569107d5fd5ed2cd781bca6e5750871
|
[
"MIT"
] | null | null | null |
from .base import *
SECRET_KEY = get_env_var('SECRET_KEY')
CSRF_COOKIE_SECURE = True
SESSION_COOKIE_SECURE = True
TEMPLATE_LOADERS = (
('django.template.loaders.cached.Loader', (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)),
)
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_HOST_USER = get_env_var('EMAIL_HOST_USER')
EMAIL_HOST_PASSWORD = get_env_var('EMAIL_HOST_PASSWORD')
EMAIL_PORT = 587
EMAIL_USE_TLS = True
DEFAULT_FROM_EMAIL = ''
USERENA_USE_HTTPS = True
| 18.8125 | 61 | 0.750831 | 81 | 602 | 5.197531 | 0.493827 | 0.106888 | 0.064133 | 0.128266 | 0.085511 | 0 | 0 | 0 | 0 | 0 | 0 | 0.00578 | 0.137874 | 602 | 31 | 62 | 19.419355 | 0.805395 | 0 | 0 | 0 | 0 | 0 | 0.373754 | 0.277409 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0.055556 | 0.055556 | 0 | 0.055556 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 |
0
| 1 |
0a4ed29474e7c8d2e3be0b36b2cae77e32eb65c8
| 376 |
py
|
Python
|
controller/base_service.py
|
oopsteams/pansite
|
11896842da66efc72c26eab071f7f802b982f435
|
[
"MIT"
] | null | null | null |
controller/base_service.py
|
oopsteams/pansite
|
11896842da66efc72c26eab071f7f802b982f435
|
[
"MIT"
] | 1 |
2021-06-02T01:00:41.000Z
|
2021-06-02T01:00:41.000Z
|
controller/base_service.py
|
oopsteams/pansite
|
11896842da66efc72c26eab071f7f802b982f435
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created by susy at 2019/11/8
"""
from dao.dao import DataDao
import pytz
from dao.models import PanAccounts
from cfg import PAN_SERVICE, MASTER_ACCOUNT_ID
class BaseService:
def __init__(self):
self.default_tz = pytz.timezone('Asia/Chongqing')
# self.pan_acc: PanAccounts = DataDao.pan_account_list(MASTER_ACCOUNT_ID, False)
| 23.5 | 88 | 0.726064 | 54 | 376 | 4.814815 | 0.648148 | 0.053846 | 0.115385 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.025559 | 0.167553 | 376 | 15 | 89 | 25.066667 | 0.805112 | 0.345745 | 0 | 0 | 0 | 0 | 0.059072 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.142857 | false | 0 | 0.571429 | 0 | 0.857143 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 |
0
| 1 |
0a57479ced46772f03d9c9dc023a3217a695d37d
| 345 |
py
|
Python
|
lambdataalchemani/lambda_test.py
|
Full-Data-Alchemist/lambdata-Mani-alch
|
90dcbc091d8f9841d5a1046e64437058a4156dc5
|
[
"MIT"
] | null | null | null |
lambdataalchemani/lambda_test.py
|
Full-Data-Alchemist/lambdata-Mani-alch
|
90dcbc091d8f9841d5a1046e64437058a4156dc5
|
[
"MIT"
] | null | null | null |
lambdataalchemani/lambda_test.py
|
Full-Data-Alchemist/lambdata-Mani-alch
|
90dcbc091d8f9841d5a1046e64437058a4156dc5
|
[
"MIT"
] | null | null | null |
"""
"""
import unittest
from example_module import COLORS, increment
class ExampleTest(unittest.TestCase):
"""
#TODO
"""
def test_increment(self):
x0 = 0
y0 = increment(x0) #y0 == 1
self.assertEqual(y0, 1)
x1 = 100
y1 = increment(x1) #y1 == 101
self.assertEqual(y1, 101)
| 15.681818 | 44 | 0.550725 | 39 | 345 | 4.820513 | 0.589744 | 0.031915 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.094421 | 0.324638 | 345 | 21 | 45 | 16.428571 | 0.712446 | 0.063768 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.047619 | 0.2 | 1 | 0.1 | false | 0 | 0.2 | 0 | 0.4 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
0
| 1 |
0a5cd9823d91b39775866f431a665d36a045cbd2
| 2,450 |
py
|
Python
|
Code/all-starter-code/search.py
|
diyarkudrat/CS-1.3-Core-Data-Structures
|
7d7d48ad7913cded7b0ea75ced144d0a08989924
|
[
"MIT"
] | null | null | null |
Code/all-starter-code/search.py
|
diyarkudrat/CS-1.3-Core-Data-Structures
|
7d7d48ad7913cded7b0ea75ced144d0a08989924
|
[
"MIT"
] | null | null | null |
Code/all-starter-code/search.py
|
diyarkudrat/CS-1.3-Core-Data-Structures
|
7d7d48ad7913cded7b0ea75ced144d0a08989924
|
[
"MIT"
] | null | null | null |
#!python
"""
ANNOTATE FUNCTIONS WITH TIME AND SPACE COMPLEXITY!!!!!
"""
def linear_search(array, item):
"""return the first index of item in array or None if item is not found"""
return linear_search_iterative(array, item)
# return linear_search_recursive(array, item)
def linear_search_iterative(array, item):
"""Time complexity: O(n) because you iterate through n amount of items in array
Space Complexity: O(n) because there are n amount of items"""
# loop over all array values until item is found
for index, value in enumerate(array): #O(n)
if item == value: #O(1)
return index # found O(1)
return None # not found O(1)
def linear_search_recursive(array, item, index=0):
"""Time complexity: O(n) because you are returning the function continuously until index equals to nth-item
"""
if len(array) <= index:
return index
if array[index] == item:
return index
else:
return linear_search_recursive(array, item, index + 1)
def binary_search(array, item):
"""return the index of item in sorted array or None if item is not found"""
return binary_search_iterative(array, item)
# return binary_search_recursive(array, item)
def binary_search_iterative(array, item):
"""Time Complexity: O(log*n) because you are constantly dividing the length of array by 2 until array length is 1
Space Complexity: O(1) """
left, right = 0, len(array) - 1
if len(array) == 0:
return None
while left <= right:
middle = left + (right - left) // 2
if item == array[middle]:
return middle
elif item > array[middle]:
left = middle + 1
else:
right = middle - 1
return None
def binary_search_recursive(array, item, left=None, right=None):
"""Time Complexity: O(log*n)
Space Complexity: 0(log*n) recursion call stack space"""
# TODO: implement binary search recursively here
if left is None and right is None:
left, right = 0, len(array) - 1
middle = left + (right - left) // 2
if left > right:
return None
if array[middle] == item:
return middle
elif item > array[middle]:
return binary_search_recursive(array, item, middle + 1, right)
else:
return binary_search_recursive(array, item, left, middle - 1)
| 27.222222 | 117 | 0.628571 | 335 | 2,450 | 4.525373 | 0.223881 | 0.077177 | 0.092348 | 0.110818 | 0.48285 | 0.384565 | 0.094987 | 0.043536 | 0.043536 | 0 | 0 | 0.011306 | 0.277959 | 2,450 | 89 | 118 | 27.52809 | 0.845676 | 0.365306 | 0 | 0.439024 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.011236 | 0 | 1 | 0.146341 | false | 0 | 0 | 0 | 0.487805 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
0
| 1 |
0a61c9cfc48e56723e2d98bba70acd01045f443c
| 1,357 |
py
|
Python
|
cv_recommender/account/urls.py
|
hhhameem/CV-Recommender
|
b85d53934f0d888835ab8201be388d7d69f0693d
|
[
"MIT"
] | 1 |
2021-09-14T17:40:17.000Z
|
2021-09-14T17:40:17.000Z
|
cv_recommender/account/urls.py
|
mjohra/Cv-Recommender-Python-Django
|
d231092f7bd989b513210dd6031fb23e28bd5dfe
|
[
"MIT"
] | 1 |
2021-03-31T17:45:15.000Z
|
2021-03-31T17:45:15.000Z
|
cv_recommender/account/urls.py
|
mjohra/Cv-Recommender-Python-Django
|
d231092f7bd989b513210dd6031fb23e28bd5dfe
|
[
"MIT"
] | 1 |
2021-03-31T16:58:50.000Z
|
2021-03-31T16:58:50.000Z
|
from django.urls import path
from django.contrib.auth import views as auth_views
from . import views
urlpatterns = [
path('register/', views.register, name='register'),
path('login/', views.userlogin, name='login'),
path('logout/', views.userlogout, name='logout'),
path('password_change/', auth_views.PasswordChangeView.as_view(),
name='password_change'),
path('password_change/done/', auth_views.PasswordChangeDoneView.as_view(),
name='password_change_done'),
path('password_reset/', auth_views.PasswordResetView.as_view(),
name='password_reset'),
path('password_reset/done/', auth_views.PasswordResetDoneView.as_view(),
name='password_reset_done'),
path('reset/<uidb64>/<token>/', auth_views.PasswordResetConfirmView.as_view(),
name='password_reset_confirm'),
path('reset/done/', auth_views.PasswordResetCompleteView.as_view(),
name='password_reset_complete'),
path('applicantdashboard/', views.applicantdashboard,
name='applicantdashboard'),
path('recruiterdashboard/', views.recruiterdashboard,
name='recruiterdashboard'),
path('applicantdashboard/profile-edit/', views.applicantedit,
name='editapplicantprofile'),
path('recruiterdashboard/profile-edit/', views.recruiteredit,
name='editrecruiterprofile'),
]
| 45.233333 | 82 | 0.709654 | 137 | 1,357 | 6.832117 | 0.284672 | 0.067308 | 0.064103 | 0.115385 | 0.149573 | 0 | 0 | 0 | 0 | 0 | 0 | 0.001729 | 0.147384 | 1,357 | 29 | 83 | 46.793103 | 0.80726 | 0 | 0 | 0 | 0 | 0 | 0.322771 | 0.112749 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0.428571 | 0.107143 | 0 | 0.107143 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 |
0
| 1 |
6a5a09a1f1eb09c5b1fb6c4e179dd1021a0b354e
| 47,088 |
py
|
Python
|
perturbed_images_generation_multiProcess.py
|
gwxie/Synthesize-Distorted-Image-and-Its-Control-Points
|
ed6de3e05a7ee1f3aecf65fcbb87c11d2ede41e7
|
[
"Apache-2.0"
] | 8 |
2022-03-27T18:37:57.000Z
|
2022-03-30T09:17:26.000Z
|
perturbed_images_generation_multiProcess.py
|
gwxie/Synthesize-Distorted-Image-and-Its-Control-Points
|
ed6de3e05a7ee1f3aecf65fcbb87c11d2ede41e7
|
[
"Apache-2.0"
] | null | null | null |
perturbed_images_generation_multiProcess.py
|
gwxie/Synthesize-Distorted-Image-and-Its-Control-Points
|
ed6de3e05a7ee1f3aecf65fcbb87c11d2ede41e7
|
[
"Apache-2.0"
] | 1 |
2022-03-31T02:22:58.000Z
|
2022-03-31T02:22:58.000Z
|
'''
GuoWang xie
set up :2020-1-9
intergrate img and label into one file
-- fiducial1024_v1
'''
import argparse
import sys, os
import pickle
import random
import collections
import json
import numpy as np
import scipy.io as io
import scipy.misc as m
import matplotlib.pyplot as plt
import glob
import math
import time
import threading
import multiprocessing as mp
from multiprocessing import Pool
import re
import cv2
# sys.path.append('/lustre/home/gwxie/hope/project/dewarp/datasets/') # /lustre/home/gwxie/program/project/unwarp/perturbed_imgaes/GAN
import utils
def getDatasets(dir):
return os.listdir(dir)
class perturbed(utils.BasePerturbed):
def __init__(self, path, bg_path, save_path, save_suffix):
self.path = path
self.bg_path = bg_path
self.save_path = save_path
self.save_suffix = save_suffix
def save_img(self, m, n, fold_curve='fold', repeat_time=4, fiducial_points = 16, relativeShift_position='relativeShift_v2'):
origin_img = cv2.imread(self.path, flags=cv2.IMREAD_COLOR)
save_img_shape = [512*2, 480*2] # 320
# reduce_value = np.random.choice([2**4, 2**5, 2**6, 2**7, 2**8], p=[0.01, 0.1, 0.4, 0.39, 0.1])
reduce_value = np.random.choice([2*2, 4*2, 8*2, 16*2, 24*2, 32*2, 40*2, 48*2], p=[0.02, 0.18, 0.2, 0.3, 0.1, 0.1, 0.08, 0.02])
# reduce_value = np.random.choice([8*2, 16*2, 24*2, 32*2, 40*2, 48*2], p=[0.01, 0.02, 0.2, 0.4, 0.19, 0.18])
# reduce_value = np.random.choice([16, 24, 32, 40, 48, 64], p=[0.01, 0.1, 0.2, 0.4, 0.2, 0.09])
base_img_shrink = save_img_shape[0] - reduce_value
# enlarge_img_shrink = [1024, 768]
# enlarge_img_shrink = [896, 672] # 420
enlarge_img_shrink = [512*4, 480*4] # 420
# enlarge_img_shrink = [896*2, 768*2] # 420
# enlarge_img_shrink = [896, 768] # 420
# enlarge_img_shrink = [768, 576] # 420
# enlarge_img_shrink = [640, 480] # 420
''''''
im_lr = origin_img.shape[0]
im_ud = origin_img.shape[1]
reduce_value_v2 = np.random.choice([2*2, 4*2, 8*2, 16*2, 24*2, 28*2, 32*2, 48*2], p=[0.02, 0.18, 0.2, 0.2, 0.1, 0.1, 0.1, 0.1])
# reduce_value_v2 = np.random.choice([16, 24, 28, 32, 48, 64], p=[0.01, 0.1, 0.2, 0.3, 0.25, 0.14])
if im_lr > im_ud:
im_ud = min(int(im_ud / im_lr * base_img_shrink), save_img_shape[1] - reduce_value_v2)
im_lr = save_img_shape[0] - reduce_value
else:
base_img_shrink = save_img_shape[1] - reduce_value
im_lr = min(int(im_lr / im_ud * base_img_shrink), save_img_shape[0] - reduce_value_v2)
im_ud = base_img_shrink
if round(im_lr / im_ud, 2) < 0.5 or round(im_ud / im_lr, 2) < 0.5:
repeat_time = min(repeat_time, 8)
edge_padding = 3
im_lr -= im_lr % (fiducial_points-1) - (2*edge_padding) # im_lr % (fiducial_points-1) - 1
im_ud -= im_ud % (fiducial_points-1) - (2*edge_padding) # im_ud % (fiducial_points-1) - 1
im_hight = np.linspace(edge_padding, im_lr - edge_padding, fiducial_points, dtype=np.int64)
im_wide = np.linspace(edge_padding, im_ud - edge_padding, fiducial_points, dtype=np.int64)
# im_lr -= im_lr % (fiducial_points-1) - (1+2*edge_padding) # im_lr % (fiducial_points-1) - 1
# im_ud -= im_ud % (fiducial_points-1) - (1+2*edge_padding) # im_ud % (fiducial_points-1) - 1
# im_hight = np.linspace(edge_padding, im_lr - (1+edge_padding), fiducial_points, dtype=np.int64)
# im_wide = np.linspace(edge_padding, im_ud - (1+edge_padding), fiducial_points, dtype=np.int64)
im_x, im_y = np.meshgrid(im_hight, im_wide)
segment_x = (im_lr) // (fiducial_points-1)
segment_y = (im_ud) // (fiducial_points-1)
# plt.plot(im_x, im_y,
# color='limegreen',
# marker='.',
# linestyle='')
# plt.grid(True)
# plt.show()
self.origin_img = cv2.resize(origin_img, (im_ud, im_lr), interpolation=cv2.INTER_CUBIC)
perturbed_bg_ = getDatasets(self.bg_path)
perturbed_bg_img_ = self.bg_path+random.choice(perturbed_bg_)
perturbed_bg_img = cv2.imread(perturbed_bg_img_, flags=cv2.IMREAD_COLOR)
mesh_shape = self.origin_img.shape[:2]
self.synthesis_perturbed_img = np.full((enlarge_img_shrink[0], enlarge_img_shrink[1], 3), 256, dtype=np.float32)#np.zeros_like(perturbed_bg_img)
# self.synthesis_perturbed_img = np.full((enlarge_img_shrink[0], enlarge_img_shrink[1], 3), 0, dtype=np.int16)#np.zeros_like(perturbed_bg_img)
self.new_shape = self.synthesis_perturbed_img.shape[:2]
perturbed_bg_img = cv2.resize(perturbed_bg_img, (save_img_shape[1], save_img_shape[0]), cv2.INPAINT_TELEA)
origin_pixel_position = np.argwhere(np.zeros(mesh_shape, dtype=np.uint32) == 0).reshape(mesh_shape[0], mesh_shape[1], 2)
pixel_position = np.argwhere(np.zeros(self.new_shape, dtype=np.uint32) == 0).reshape(self.new_shape[0], self.new_shape[1], 2)
self.perturbed_xy_ = np.zeros((self.new_shape[0], self.new_shape[1], 2))
# self.perturbed_xy_ = pixel_position.copy().astype(np.float32)
# fiducial_points_grid = origin_pixel_position[im_x, im_y]
self.synthesis_perturbed_label = np.zeros((self.new_shape[0], self.new_shape[1], 2))
x_min, y_min, x_max, y_max = self.adjust_position_v2(0, 0, mesh_shape[0], mesh_shape[1], save_img_shape)
origin_pixel_position += [x_min, y_min]
x_min, y_min, x_max, y_max = self.adjust_position(0, 0, mesh_shape[0], mesh_shape[1])
x_shift = random.randint(-enlarge_img_shrink[0]//16, enlarge_img_shrink[0]//16)
y_shift = random.randint(-enlarge_img_shrink[1]//16, enlarge_img_shrink[1]//16)
x_min += x_shift
x_max += x_shift
y_min += y_shift
y_max += y_shift
'''im_x,y'''
im_x += x_min
im_y += y_min
self.synthesis_perturbed_img[x_min:x_max, y_min:y_max] = self.origin_img
self.synthesis_perturbed_label[x_min:x_max, y_min:y_max] = origin_pixel_position
synthesis_perturbed_img_map = self.synthesis_perturbed_img.copy()
synthesis_perturbed_label_map = self.synthesis_perturbed_label.copy()
foreORbackground_label = np.full((mesh_shape), 1, dtype=np.int16)
foreORbackground_label_map = np.full((self.new_shape), 0, dtype=np.int16)
foreORbackground_label_map[x_min:x_max, y_min:y_max] = foreORbackground_label
# synthesis_perturbed_img_map = self.pad(self.synthesis_perturbed_img.copy(), x_min, y_min, x_max, y_max)
# synthesis_perturbed_label_map = self.pad(synthesis_perturbed_label_map, x_min, y_min, x_max, y_max)
'''*****************************************************************'''
is_normalizationFun_mixture = self.is_perform(0.2, 0.8)
# if not is_normalizationFun_mixture:
normalizationFun_0_1 = False
# normalizationFun_0_1 = self.is_perform(0.5, 0.5)
if fold_curve == 'fold':
fold_curve_random = True
# is_normalizationFun_mixture = False
normalizationFun_0_1 = self.is_perform(0.2, 0.8)
if is_normalizationFun_mixture:
alpha_perturbed = random.randint(80, 120) / 100
else:
if normalizationFun_0_1 and repeat_time < 8:
alpha_perturbed = random.randint(50, 70) / 100
else:
alpha_perturbed = random.randint(70, 130) / 100
else:
fold_curve_random = self.is_perform(0.1, 0.9) # False # self.is_perform(0.01, 0.99)
alpha_perturbed = random.randint(80, 160) / 100
# is_normalizationFun_mixture = False # self.is_perform(0.01, 0.99)
synthesis_perturbed_img = np.full_like(self.synthesis_perturbed_img, 256)
# synthesis_perturbed_img = np.full_like(self.synthesis_perturbed_img, 0, dtype=np.int16)
synthesis_perturbed_label = np.zeros_like(self.synthesis_perturbed_label)
alpha_perturbed_change = self.is_perform(0.5, 0.5)
p_pp_choice = self.is_perform(0.8, 0.2) if fold_curve == 'fold' else self.is_perform(0.1, 0.9)
for repeat_i in range(repeat_time):
if alpha_perturbed_change:
if fold_curve == 'fold':
if is_normalizationFun_mixture:
alpha_perturbed = random.randint(80, 120) / 100
else:
if normalizationFun_0_1 and repeat_time < 8:
alpha_perturbed = random.randint(50, 70) / 100
else:
alpha_perturbed = random.randint(70, 130) / 100
else:
alpha_perturbed = random.randint(80, 160) / 100
''''''
linspace_x = [0, (self.new_shape[0] - im_lr) // 2 - 1,
self.new_shape[0] - (self.new_shape[0] - im_lr) // 2 - 1, self.new_shape[0] - 1]
linspace_y = [0, (self.new_shape[1] - im_ud) // 2 - 1,
self.new_shape[1] - (self.new_shape[1] - im_ud) // 2 - 1, self.new_shape[1] - 1]
linspace_x_seq = [1, 2, 3]
linspace_y_seq = [1, 2, 3]
r_x = random.choice(linspace_x_seq)
r_y = random.choice(linspace_y_seq)
perturbed_p = np.array(
[random.randint(linspace_x[r_x-1] * 10, linspace_x[r_x] * 10),
random.randint(linspace_y[r_y-1] * 10, linspace_y[r_y] * 10)])/10
if ((r_x == 1 or r_x == 3) and (r_y == 1 or r_y == 3)) and p_pp_choice:
linspace_x_seq.remove(r_x)
linspace_y_seq.remove(r_y)
r_x = random.choice(linspace_x_seq)
r_y = random.choice(linspace_y_seq)
perturbed_pp = np.array(
[random.randint(linspace_x[r_x-1] * 10, linspace_x[r_x] * 10),
random.randint(linspace_y[r_y-1] * 10, linspace_y[r_y] * 10)])/10
# perturbed_p, perturbed_pp = np.array(
# [random.randint(0, self.new_shape[0] * 10) / 10,
# random.randint(0, self.new_shape[1] * 10) / 10]) \
# , np.array([random.randint(0, self.new_shape[0] * 10) / 10,
# random.randint(0, self.new_shape[1] * 10) / 10])
# perturbed_p, perturbed_pp = np.array(
# [random.randint((self.new_shape[0]-im_lr)//2*10, (self.new_shape[0]-(self.new_shape[0]-im_lr)//2) * 10) / 10,
# random.randint((self.new_shape[1]-im_ud)//2*10, (self.new_shape[1]-(self.new_shape[1]-im_ud)//2) * 10) / 10]) \
# , np.array([random.randint((self.new_shape[0]-im_lr)//2*10, (self.new_shape[0]-(self.new_shape[0]-im_lr)//2) * 10) / 10,
# random.randint((self.new_shape[1]-im_ud)//2*10, (self.new_shape[1]-(self.new_shape[1]-im_ud)//2) * 10) / 10])
''''''
perturbed_vp = perturbed_pp - perturbed_p
perturbed_vp_norm = np.linalg.norm(perturbed_vp)
perturbed_distance_vertex_and_line = np.dot((perturbed_p - pixel_position), perturbed_vp) / perturbed_vp_norm
''''''
# perturbed_v = np.array([random.randint(-3000, 3000) / 100, random.randint(-3000, 3000) / 100])
# perturbed_v = np.array([random.randint(-4000, 4000) / 100, random.randint(-4000, 4000) / 100])
if fold_curve == 'fold' and self.is_perform(0.6, 0.4): # self.is_perform(0.3, 0.7):
# perturbed_v = np.array([random.randint(-9000, 9000) / 100, random.randint(-9000, 9000) / 100])
perturbed_v = np.array([random.randint(-10000, 10000) / 100, random.randint(-10000, 10000) / 100])
# perturbed_v = np.array([random.randint(-11000, 11000) / 100, random.randint(-11000, 11000) / 100])
else:
# perturbed_v = np.array([random.randint(-9000, 9000) / 100, random.randint(-9000, 9000) / 100])
# perturbed_v = np.array([random.randint(-16000, 16000) / 100, random.randint(-16000, 16000) / 100])
perturbed_v = np.array([random.randint(-8000, 8000) / 100, random.randint(-8000, 8000) / 100])
# perturbed_v = np.array([random.randint(-3500, 3500) / 100, random.randint(-3500, 3500) / 100])
# perturbed_v = np.array([random.randint(-600, 600) / 10, random.randint(-600, 600) / 10])
''''''
if fold_curve == 'fold':
if is_normalizationFun_mixture:
if self.is_perform(0.5, 0.5):
perturbed_d = np.abs(self.get_normalize(perturbed_distance_vertex_and_line))
else:
perturbed_d = self.get_0_1_d(np.abs(perturbed_distance_vertex_and_line), random.randint(1, 2))
else:
if normalizationFun_0_1:
perturbed_d = self.get_0_1_d(np.abs(perturbed_distance_vertex_and_line), 2)
else:
perturbed_d = np.abs(self.get_normalize(perturbed_distance_vertex_and_line))
else:
if is_normalizationFun_mixture:
if self.is_perform(0.5, 0.5):
perturbed_d = np.abs(self.get_normalize(perturbed_distance_vertex_and_line))
else:
perturbed_d = self.get_0_1_d(np.abs(perturbed_distance_vertex_and_line), random.randint(1, 2))
else:
if normalizationFun_0_1:
perturbed_d = self.get_0_1_d(np.abs(perturbed_distance_vertex_and_line), 2)
else:
perturbed_d = np.abs(self.get_normalize(perturbed_distance_vertex_and_line))
''''''
if fold_curve_random:
# omega_perturbed = (alpha_perturbed+0.2) / (perturbed_d + alpha_perturbed)
# omega_perturbed = alpha_perturbed**perturbed_d
omega_perturbed = alpha_perturbed / (perturbed_d + alpha_perturbed)
else:
omega_perturbed = 1 - perturbed_d ** alpha_perturbed
'''shadow'''
if self.is_perform(0.6, 0.4):
synthesis_perturbed_img_map[x_min:x_max, y_min:y_max] = np.minimum(np.maximum(synthesis_perturbed_img_map[x_min:x_max, y_min:y_max] - np.int16(np.round(omega_perturbed[x_min:x_max, y_min:y_max].repeat(3).reshape(x_max-x_min, y_max-y_min, 3) * abs(np.linalg.norm(perturbed_v//2))*np.array([0.4-random.random()*0.1, 0.4-random.random()*0.1, 0.4-random.random()*0.1]))), 0), 255)
''''''
if relativeShift_position in ['position', 'relativeShift_v2']:
self.perturbed_xy_ += np.array([omega_perturbed * perturbed_v[0], omega_perturbed * perturbed_v[1]]).transpose(1, 2, 0)
else:
print('relativeShift_position error')
exit()
'''
flat_position = np.argwhere(np.zeros(self.new_shape, dtype=np.uint32) == 0).reshape(
self.new_shape[0] * self.new_shape[1], 2)
vtx, wts = self.interp_weights(self.perturbed_xy_.reshape(self.new_shape[0] * self.new_shape[1], 2), flat_position)
wts_sum = np.abs(wts).sum(-1)
# flat_img.reshape(flat_shape[0] * flat_shape[1], 3)[:] = interpolate(pixel, vtx, wts)
wts = wts[wts_sum <= 1, :]
vtx = vtx[wts_sum <= 1, :]
synthesis_perturbed_img.reshape(self.new_shape[0] * self.new_shape[1], 3)[wts_sum <= 1,
:] = self.interpolate(synthesis_perturbed_img_map.reshape(self.new_shape[0] * self.new_shape[1], 3), vtx, wts)
synthesis_perturbed_label.reshape(self.new_shape[0] * self.new_shape[1], 2)[wts_sum <= 1,
:] = self.interpolate(synthesis_perturbed_label_map.reshape(self.new_shape[0] * self.new_shape[1], 2), vtx, wts)
foreORbackground_label = np.zeros(self.new_shape)
foreORbackground_label.reshape(self.new_shape[0] * self.new_shape[1], 1)[wts_sum <= 1, :] = self.interpolate(foreORbackground_label_map.reshape(self.new_shape[0] * self.new_shape[1], 1), vtx, wts)
foreORbackground_label[foreORbackground_label < 0.99] = 0
foreORbackground_label[foreORbackground_label >= 0.99] = 1
# synthesis_perturbed_img = np.around(synthesis_perturbed_img).astype(np.uint8)
synthesis_perturbed_label[:, :, 0] *= foreORbackground_label
synthesis_perturbed_label[:, :, 1] *= foreORbackground_label
synthesis_perturbed_img[:, :, 0] *= foreORbackground_label
synthesis_perturbed_img[:, :, 1] *= foreORbackground_label
synthesis_perturbed_img[:, :, 2] *= foreORbackground_label
self.synthesis_perturbed_img = synthesis_perturbed_img
self.synthesis_perturbed_label = synthesis_perturbed_label
'''
'''perspective'''
perspective_shreshold = random.randint(26, 36)*10 # 280
x_min_per, y_min_per, x_max_per, y_max_per = self.adjust_position(perspective_shreshold, perspective_shreshold, self.new_shape[0]-perspective_shreshold, self.new_shape[1]-perspective_shreshold)
pts1 = np.float32([[x_min_per, y_min_per], [x_max_per, y_min_per], [x_min_per, y_max_per], [x_max_per, y_max_per]])
e_1_ = x_max_per - x_min_per
e_2_ = y_max_per - y_min_per
e_3_ = e_2_
e_4_ = e_1_
perspective_shreshold_h = e_1_*0.02
perspective_shreshold_w = e_2_*0.02
a_min_, a_max_ = 70, 110
# if self.is_perform(1, 0):
if fold_curve == 'curve' and self.is_perform(0.5, 0.5):
if self.is_perform(0.5, 0.5):
while True:
pts2 = np.around(
np.float32([[x_min_per - (random.random()) * perspective_shreshold, y_min_per + (random.random()) * perspective_shreshold],
[x_max_per - (random.random()) * perspective_shreshold, y_min_per - (random.random()) * perspective_shreshold],
[x_min_per + (random.random()) * perspective_shreshold, y_max_per + (random.random()) * perspective_shreshold],
[x_max_per + (random.random()) * perspective_shreshold, y_max_per - (random.random()) * perspective_shreshold]])) # right
e_1 = np.linalg.norm(pts2[0]-pts2[1])
e_2 = np.linalg.norm(pts2[0]-pts2[2])
e_3 = np.linalg.norm(pts2[1]-pts2[3])
e_4 = np.linalg.norm(pts2[2]-pts2[3])
if e_1_+perspective_shreshold_h > e_1 and e_2_+perspective_shreshold_w > e_2 and e_3_+perspective_shreshold_w > e_3 and e_4_+perspective_shreshold_h > e_4 and \
e_1_ - perspective_shreshold_h < e_1 and e_2_ - perspective_shreshold_w < e_2 and e_3_ - perspective_shreshold_w < e_3 and e_4_ - perspective_shreshold_h < e_4 and \
abs(e_1-e_4) < perspective_shreshold_h and abs(e_2-e_3) < perspective_shreshold_w:
a0_, a1_, a2_, a3_ = self.get_angle_4(pts2)
if (a0_ > a_min_ and a0_ < a_max_) or (a1_ > a_min_ and a1_ < a_max_) or (a2_ > a_min_ and a2_ < a_max_) or (a3_ > a_min_ and a3_ < a_max_):
break
else:
while True:
pts2 = np.around(
np.float32([[x_min_per + (random.random()) * perspective_shreshold, y_min_per - (random.random()) * perspective_shreshold],
[x_max_per + (random.random()) * perspective_shreshold, y_min_per + (random.random()) * perspective_shreshold],
[x_min_per - (random.random()) * perspective_shreshold, y_max_per - (random.random()) * perspective_shreshold],
[x_max_per - (random.random()) * perspective_shreshold, y_max_per + (random.random()) * perspective_shreshold]]))
e_1 = np.linalg.norm(pts2[0]-pts2[1])
e_2 = np.linalg.norm(pts2[0]-pts2[2])
e_3 = np.linalg.norm(pts2[1]-pts2[3])
e_4 = np.linalg.norm(pts2[2]-pts2[3])
if e_1_+perspective_shreshold_h > e_1 and e_2_+perspective_shreshold_w > e_2 and e_3_+perspective_shreshold_w > e_3 and e_4_+perspective_shreshold_h > e_4 and \
e_1_ - perspective_shreshold_h < e_1 and e_2_ - perspective_shreshold_w < e_2 and e_3_ - perspective_shreshold_w < e_3 and e_4_ - perspective_shreshold_h < e_4 and \
abs(e_1-e_4) < perspective_shreshold_h and abs(e_2-e_3) < perspective_shreshold_w:
a0_, a1_, a2_, a3_ = self.get_angle_4(pts2)
if (a0_ > a_min_ and a0_ < a_max_) or (a1_ > a_min_ and a1_ < a_max_) or (a2_ > a_min_ and a2_ < a_max_) or (a3_ > a_min_ and a3_ < a_max_):
break
else:
while True:
pts2 = np.around(np.float32([[x_min_per+(random.random()-0.5)*perspective_shreshold, y_min_per+(random.random()-0.5)*perspective_shreshold],
[x_max_per+(random.random()-0.5)*perspective_shreshold, y_min_per+(random.random()-0.5)*perspective_shreshold],
[x_min_per+(random.random()-0.5)*perspective_shreshold, y_max_per+(random.random()-0.5)*perspective_shreshold],
[x_max_per+(random.random()-0.5)*perspective_shreshold, y_max_per+(random.random()-0.5)*perspective_shreshold]]))
e_1 = np.linalg.norm(pts2[0]-pts2[1])
e_2 = np.linalg.norm(pts2[0]-pts2[2])
e_3 = np.linalg.norm(pts2[1]-pts2[3])
e_4 = np.linalg.norm(pts2[2]-pts2[3])
if e_1_+perspective_shreshold_h > e_1 and e_2_+perspective_shreshold_w > e_2 and e_3_+perspective_shreshold_w > e_3 and e_4_+perspective_shreshold_h > e_4 and \
e_1_ - perspective_shreshold_h < e_1 and e_2_ - perspective_shreshold_w < e_2 and e_3_ - perspective_shreshold_w < e_3 and e_4_ - perspective_shreshold_h < e_4 and \
abs(e_1-e_4) < perspective_shreshold_h and abs(e_2-e_3) < perspective_shreshold_w:
a0_, a1_, a2_, a3_ = self.get_angle_4(pts2)
if (a0_ > a_min_ and a0_ < a_max_) or (a1_ > a_min_ and a1_ < a_max_) or (a2_ > a_min_ and a2_ < a_max_) or (a3_ > a_min_ and a3_ < a_max_):
break
M = cv2.getPerspectiveTransform(pts1, pts2)
one = np.ones((self.new_shape[0], self.new_shape[1], 1), dtype=np.int16)
matr = np.dstack((pixel_position, one))
new = np.dot(M, matr.reshape(-1, 3).T).T.reshape(self.new_shape[0], self.new_shape[1], 3)
x = new[:, :, 0]/new[:, :, 2]
y = new[:, :, 1]/new[:, :, 2]
perturbed_xy_ = np.dstack((x, y))
# perturbed_xy_round_int = np.around(cv2.bilateralFilter(perturbed_xy_round_int, 9, 75, 75))
# perturbed_xy_round_int = np.around(cv2.blur(perturbed_xy_, (17, 17)))
# perturbed_xy_round_int = cv2.blur(perturbed_xy_round_int, (17, 17))
# perturbed_xy_round_int = cv2.GaussianBlur(perturbed_xy_round_int, (7, 7), 0)
perturbed_xy_ = perturbed_xy_-np.min(perturbed_xy_.T.reshape(2, -1), 1)
# perturbed_xy_round_int = np.around(perturbed_xy_round_int-np.min(perturbed_xy_round_int.T.reshape(2, -1), 1)).astype(np.int16)
self.perturbed_xy_ += perturbed_xy_
'''perspective end'''
'''to img'''
flat_position = np.argwhere(np.zeros(self.new_shape, dtype=np.uint32) == 0).reshape(
self.new_shape[0] * self.new_shape[1], 2)
# self.perturbed_xy_ = cv2.blur(self.perturbed_xy_, (7, 7))
self.perturbed_xy_ = cv2.GaussianBlur(self.perturbed_xy_, (7, 7), 0)
'''get fiducial points'''
fiducial_points_coordinate = self.perturbed_xy_[im_x, im_y]
vtx, wts = self.interp_weights(self.perturbed_xy_.reshape(self.new_shape[0] * self.new_shape[1], 2), flat_position)
wts_sum = np.abs(wts).sum(-1)
# flat_img.reshape(flat_shape[0] * flat_shape[1], 3)[:] = interpolate(pixel, vtx, wts)
wts = wts[wts_sum <= 1, :]
vtx = vtx[wts_sum <= 1, :]
synthesis_perturbed_img.reshape(self.new_shape[0] * self.new_shape[1], 3)[wts_sum <= 1,
:] = self.interpolate(synthesis_perturbed_img_map.reshape(self.new_shape[0] * self.new_shape[1], 3), vtx, wts)
synthesis_perturbed_label.reshape(self.new_shape[0] * self.new_shape[1], 2)[wts_sum <= 1,
:] = self.interpolate(synthesis_perturbed_label_map.reshape(self.new_shape[0] * self.new_shape[1], 2), vtx, wts)
foreORbackground_label = np.zeros(self.new_shape)
foreORbackground_label.reshape(self.new_shape[0] * self.new_shape[1], 1)[wts_sum <= 1, :] = self.interpolate(foreORbackground_label_map.reshape(self.new_shape[0] * self.new_shape[1], 1), vtx, wts)
foreORbackground_label[foreORbackground_label < 0.99] = 0
foreORbackground_label[foreORbackground_label >= 0.99] = 1
self.synthesis_perturbed_img = synthesis_perturbed_img
self.synthesis_perturbed_label = synthesis_perturbed_label
self.foreORbackground_label = foreORbackground_label
'''draw fiducial points
stepSize = 0
fiducial_points_synthesis_perturbed_img = self.synthesis_perturbed_img.copy()
for l in fiducial_points_coordinate.astype(np.int64).reshape(-1,2):
cv2.circle(fiducial_points_synthesis_perturbed_img, (l[1] + math.ceil(stepSize / 2), l[0] + math.ceil(stepSize / 2)), 5, (0, 0, 255), -1)
cv2.imwrite('/lustre/home/gwxie/program/project/unwarp/unwarp_perturbed/TPS/img/cv_TPS_large.jpg', fiducial_points_synthesis_perturbed_img)
'''
'''clip'''
perturbed_x_min, perturbed_y_min, perturbed_x_max, perturbed_y_max = -1, -1, self.new_shape[0], self.new_shape[1]
for x in range(self.new_shape[0] // 2, perturbed_x_max):
if np.sum(self.synthesis_perturbed_img[x, :]) == 768 * self.new_shape[1] and perturbed_x_max - 1 > x:
perturbed_x_max = x
break
for x in range(self.new_shape[0] // 2, perturbed_x_min, -1):
if np.sum(self.synthesis_perturbed_img[x, :]) == 768 * self.new_shape[1] and x > 0:
perturbed_x_min = x
break
for y in range(self.new_shape[1] // 2, perturbed_y_max):
if np.sum(self.synthesis_perturbed_img[:, y]) == 768 * self.new_shape[0] and perturbed_y_max - 1 > y:
perturbed_y_max = y
break
for y in range(self.new_shape[1] // 2, perturbed_y_min, -1):
if np.sum(self.synthesis_perturbed_img[:, y]) == 768 * self.new_shape[0] and y > 0:
perturbed_y_min = y
break
if perturbed_x_min == 0 or perturbed_x_max == self.new_shape[0] or perturbed_y_min == self.new_shape[1] or perturbed_y_max == self.new_shape[1]:
raise Exception('clip error')
if perturbed_x_max - perturbed_x_min < im_lr//2 or perturbed_y_max - perturbed_y_min < im_ud//2:
raise Exception('clip error')
perfix_ = self.save_suffix+'_'+str(m)+'_'+str(n)
is_shrink = False
if perturbed_x_max - perturbed_x_min > save_img_shape[0] or perturbed_y_max - perturbed_y_min > save_img_shape[1]:
is_shrink = True
synthesis_perturbed_img = cv2.resize(self.synthesis_perturbed_img[perturbed_x_min:perturbed_x_max, perturbed_y_min:perturbed_y_max, :].copy(), (im_ud, im_lr), interpolation=cv2.INTER_LINEAR)
synthesis_perturbed_label = cv2.resize(self.synthesis_perturbed_label[perturbed_x_min:perturbed_x_max, perturbed_y_min:perturbed_y_max, :].copy(), (im_ud, im_lr), interpolation=cv2.INTER_LINEAR)
foreORbackground_label = cv2.resize(self.foreORbackground_label[perturbed_x_min:perturbed_x_max, perturbed_y_min:perturbed_y_max].copy(), (im_ud, im_lr), interpolation=cv2.INTER_LINEAR)
foreORbackground_label[foreORbackground_label < 0.99] = 0
foreORbackground_label[foreORbackground_label >= 0.99] = 1
'''shrink fiducial points'''
center_x_l, center_y_l = perturbed_x_min + (perturbed_x_max - perturbed_x_min) // 2, perturbed_y_min + (perturbed_y_max - perturbed_y_min) // 2
fiducial_points_coordinate_copy = fiducial_points_coordinate.copy()
shrink_x = im_lr/(perturbed_x_max - perturbed_x_min)
shrink_y = im_ud/(perturbed_y_max - perturbed_y_min)
fiducial_points_coordinate *= [shrink_x, shrink_y]
center_x_l *= shrink_x
center_y_l *= shrink_y
# fiducial_points_coordinate[1:, 1:] *= [shrink_x, shrink_y]
# fiducial_points_coordinate[1:, :1, 0] *= shrink_x
# fiducial_points_coordinate[:1, 1:, 1] *= shrink_y
# perturbed_x_min_copy, perturbed_y_min_copy, perturbed_x_max_copy, perturbed_y_max_copy = perturbed_x_min, perturbed_y_min, perturbed_x_max, perturbed_y_max
perturbed_x_min, perturbed_y_min, perturbed_x_max, perturbed_y_max = self.adjust_position_v2(0, 0, im_lr, im_ud, self.new_shape)
self.synthesis_perturbed_img = np.full_like(self.synthesis_perturbed_img, 256)
self.synthesis_perturbed_label = np.zeros_like(self.synthesis_perturbed_label)
self.foreORbackground_label = np.zeros_like(self.foreORbackground_label)
self.synthesis_perturbed_img[perturbed_x_min:perturbed_x_max, perturbed_y_min:perturbed_y_max, :] = synthesis_perturbed_img
self.synthesis_perturbed_label[perturbed_x_min:perturbed_x_max, perturbed_y_min:perturbed_y_max, :] = synthesis_perturbed_label
self.foreORbackground_label[perturbed_x_min:perturbed_x_max, perturbed_y_min:perturbed_y_max] = foreORbackground_label
center_x, center_y = perturbed_x_min + (perturbed_x_max - perturbed_x_min) // 2, perturbed_y_min + (perturbed_y_max - perturbed_y_min) // 2
if is_shrink:
fiducial_points_coordinate += [center_x-center_x_l, center_y-center_y_l]
'''draw fiducial points
stepSize = 0
fiducial_points_synthesis_perturbed_img = self.synthesis_perturbed_img.copy()
for l in fiducial_points_coordinate.astype(np.int64).reshape(-1, 2):
cv2.circle(fiducial_points_synthesis_perturbed_img,
(l[1] + math.ceil(stepSize / 2), l[0] + math.ceil(stepSize / 2)), 5, (0, 0, 255), -1)
cv2.imwrite('/lustre/home/gwxie/program/project/unwarp/unwarp_perturbed/TPS/img/cv_TPS_small.jpg',fiducial_points_synthesis_perturbed_img)
'''
self.new_shape = save_img_shape
self.synthesis_perturbed_img = self.synthesis_perturbed_img[
center_x - self.new_shape[0] // 2:center_x + self.new_shape[0] // 2,
center_y - self.new_shape[1] // 2:center_y + self.new_shape[1] // 2,
:].copy()
self.synthesis_perturbed_label = self.synthesis_perturbed_label[
center_x - self.new_shape[0] // 2:center_x + self.new_shape[0] // 2,
center_y - self.new_shape[1] // 2:center_y + self.new_shape[1] // 2,
:].copy()
self.foreORbackground_label = self.foreORbackground_label[
center_x - self.new_shape[0] // 2:center_x + self.new_shape[0] // 2,
center_y - self.new_shape[1] // 2:center_y + self.new_shape[1] // 2].copy()
perturbed_x_ = max(self.new_shape[0] - (perturbed_x_max - perturbed_x_min), 0)
perturbed_x_min = perturbed_x_ // 2
perturbed_x_max = self.new_shape[0] - perturbed_x_ // 2 if perturbed_x_%2 == 0 else self.new_shape[0] - (perturbed_x_ // 2 + 1)
perturbed_y_ = max(self.new_shape[1] - (perturbed_y_max - perturbed_y_min), 0)
perturbed_y_min = perturbed_y_ // 2
perturbed_y_max = self.new_shape[1] - perturbed_y_ // 2 if perturbed_y_%2 == 0 else self.new_shape[1] - (perturbed_y_ // 2 + 1)
'''clip
perturbed_x_min, perturbed_y_min, perturbed_x_max, perturbed_y_max = -1, -1, self.new_shape[0], self.new_shape[1]
for x in range(self.new_shape[0] // 2, perturbed_x_max):
if np.sum(self.synthesis_perturbed_img[x, :]) == 768 * self.new_shape[1] and perturbed_x_max - 1 > x:
perturbed_x_max = x
break
for x in range(self.new_shape[0] // 2, perturbed_x_min, -1):
if np.sum(self.synthesis_perturbed_img[x, :]) == 768 * self.new_shape[1] and x > 0:
perturbed_x_min = x
break
for y in range(self.new_shape[1] // 2, perturbed_y_max):
if np.sum(self.synthesis_perturbed_img[:, y]) == 768 * self.new_shape[0] and perturbed_y_max - 1 > y:
perturbed_y_max = y
break
for y in range(self.new_shape[1] // 2, perturbed_y_min, -1):
if np.sum(self.synthesis_perturbed_img[:, y]) == 768 * self.new_shape[0] and y > 0:
perturbed_y_min = y
break
center_x, center_y = perturbed_x_min+(perturbed_x_max - perturbed_x_min)//2, perturbed_y_min+(perturbed_y_max - perturbed_y_min)//2
perfix_ = self.save_suffix+'_'+str(m)+'_'+str(n)
self.new_shape = save_img_shape
perturbed_x_ = max(self.new_shape[0] - (perturbed_x_max - perturbed_x_min), 0)
perturbed_x_min = perturbed_x_ // 2
perturbed_x_max = self.new_shape[0] - perturbed_x_ // 2 if perturbed_x_%2 == 0 else self.new_shape[0] - (perturbed_x_ // 2 + 1)
perturbed_y_ = max(self.new_shape[1] - (perturbed_y_max - perturbed_y_min), 0)
perturbed_y_min = perturbed_y_ // 2
perturbed_y_max = self.new_shape[1] - perturbed_y_ // 2 if perturbed_y_%2 == 0 else self.new_shape[1] - (perturbed_y_ // 2 + 1)
self.synthesis_perturbed_img = self.synthesis_perturbed_img[center_x-self.new_shape[0]//2:center_x+self.new_shape[0]//2, center_y-self.new_shape[1]//2:center_y+self.new_shape[1]//2, :].copy()
self.synthesis_perturbed_label = self.synthesis_perturbed_label[center_x-self.new_shape[0]//2:center_x+self.new_shape[0]//2, center_y-self.new_shape[1]//2:center_y+self.new_shape[1]//2, :].copy()
self.foreORbackground_label = self.foreORbackground_label[center_x-self.new_shape[0]//2:center_x+self.new_shape[0]//2, center_y-self.new_shape[1]//2:center_y+self.new_shape[1]//2].copy()
'''
'''save'''
pixel_position = np.argwhere(np.zeros(self.new_shape, dtype=np.uint32) == 0).reshape(self.new_shape[0], self.new_shape[1], 2)
if relativeShift_position == 'relativeShift_v2':
self.synthesis_perturbed_label -= pixel_position
fiducial_points_coordinate -= [center_x - self.new_shape[0] // 2, center_y - self.new_shape[1] // 2]
self.synthesis_perturbed_label[:, :, 0] *= self.foreORbackground_label
self.synthesis_perturbed_label[:, :, 1] *= self.foreORbackground_label
self.synthesis_perturbed_img[:, :, 0] *= self.foreORbackground_label
self.synthesis_perturbed_img[:, :, 1] *= self.foreORbackground_label
self.synthesis_perturbed_img[:, :, 2] *= self.foreORbackground_label
'''
synthesis_perturbed_img_filter = self.synthesis_perturbed_img.copy()
synthesis_perturbed_img_filter = cv2.GaussianBlur(synthesis_perturbed_img_filter, (3, 3), 0)
# if self.is_perform(0.9, 0.1) or repeat_time > 5:
# # if self.is_perform(0.1, 0.9) and repeat_time > 9:
# # synthesis_perturbed_img_filter = cv2.GaussianBlur(synthesis_perturbed_img_filter, (7, 7), 0)
# # else:
# synthesis_perturbed_img_filter = cv2.GaussianBlur(synthesis_perturbed_img_filter, (5, 5), 0)
# else:
# synthesis_perturbed_img_filter = cv2.GaussianBlur(synthesis_perturbed_img_filter, (3, 3), 0)
self.synthesis_perturbed_img[self.foreORbackground_label == 1] = synthesis_perturbed_img_filter[self.foreORbackground_label == 1]
'''
'''
perturbed_bg_img = perturbed_bg_img.astype(np.float32)
perturbed_bg_img[:, :, 0] *= 1 - self.foreORbackground_label
perturbed_bg_img[:, :, 1] *= 1 - self.foreORbackground_label
perturbed_bg_img[:, :, 2] *= 1 - self.foreORbackground_label
self.synthesis_perturbed_img += perturbed_bg_img
HSV
perturbed_bg_img = perturbed_bg_img.astype(np.float32)
if self.is_perform(0.1, 0.9):
if self.is_perform(0.2, 0.8):
synthesis_perturbed_img_clip_HSV = self.synthesis_perturbed_img.copy()
synthesis_perturbed_img_clip_HSV = cv2.cvtColor(synthesis_perturbed_img_clip_HSV, cv2.COLOR_RGB2HSV)
H_, S_, V_ = (random.random()-0.2)*20, (random.random()-0.2)/8, (random.random()-0.2)*20
synthesis_perturbed_img_clip_HSV[:, :, 0], synthesis_perturbed_img_clip_HSV[:, :, 1], synthesis_perturbed_img_clip_HSV[:, :, 2] = synthesis_perturbed_img_clip_HSV[:, :, 0]-H_, synthesis_perturbed_img_clip_HSV[:, :, 1]-S_, synthesis_perturbed_img_clip_HSV[:, :, 2]-V_
synthesis_perturbed_img_clip_HSV = cv2.cvtColor(synthesis_perturbed_img_clip_HSV, cv2.COLOR_HSV2RGB)
perturbed_bg_img[:, :, 0] *= 1-self.foreORbackground_label
perturbed_bg_img[:, :, 1] *= 1-self.foreORbackground_label
perturbed_bg_img[:, :, 2] *= 1-self.foreORbackground_label
synthesis_perturbed_img_clip_HSV += perturbed_bg_img
self.synthesis_perturbed_img = synthesis_perturbed_img_clip_HSV
else:
perturbed_bg_img_HSV = perturbed_bg_img
perturbed_bg_img_HSV = cv2.cvtColor(perturbed_bg_img_HSV, cv2.COLOR_RGB2HSV)
H_, S_, V_ = (random.random()-0.5)*20, (random.random()-0.5)/8, (random.random()-0.2)*20
perturbed_bg_img_HSV[:, :, 0], perturbed_bg_img_HSV[:, :, 1], perturbed_bg_img_HSV[:, :, 2] = perturbed_bg_img_HSV[:, :, 0]-H_, perturbed_bg_img_HSV[:, :, 1]-S_, perturbed_bg_img_HSV[:, :, 2]-V_
perturbed_bg_img_HSV = cv2.cvtColor(perturbed_bg_img_HSV, cv2.COLOR_HSV2RGB)
perturbed_bg_img_HSV[:, :, 0] *= 1-self.foreORbackground_label
perturbed_bg_img_HSV[:, :, 1] *= 1-self.foreORbackground_label
perturbed_bg_img_HSV[:, :, 2] *= 1-self.foreORbackground_label
self.synthesis_perturbed_img += perturbed_bg_img_HSV
# self.synthesis_perturbed_img[np.sum(self.synthesis_perturbed_img, 2) == 771] = perturbed_bg_img_HSV[np.sum(self.synthesis_perturbed_img, 2) == 771]
else:
synthesis_perturbed_img_clip_HSV = self.synthesis_perturbed_img.copy()
perturbed_bg_img[:, :, 0] *= 1 - self.foreORbackground_label
perturbed_bg_img[:, :, 1] *= 1 - self.foreORbackground_label
perturbed_bg_img[:, :, 2] *= 1 - self.foreORbackground_label
synthesis_perturbed_img_clip_HSV += perturbed_bg_img
# synthesis_perturbed_img_clip_HSV[np.sum(self.synthesis_perturbed_img, 2) == 771] = perturbed_bg_img[np.sum(self.synthesis_perturbed_img, 2) == 771]
synthesis_perturbed_img_clip_HSV = cv2.cvtColor(synthesis_perturbed_img_clip_HSV, cv2.COLOR_RGB2HSV)
H_, S_, V_ = (random.random()-0.5)*20, (random.random()-0.5)/10, (random.random()-0.4)*20
synthesis_perturbed_img_clip_HSV[:, :, 0], synthesis_perturbed_img_clip_HSV[:, :, 1], synthesis_perturbed_img_clip_HSV[:, :, 2] = synthesis_perturbed_img_clip_HSV[:, :, 0]-H_, synthesis_perturbed_img_clip_HSV[:, :, 1]-S_, synthesis_perturbed_img_clip_HSV[:, :, 2]-V_
synthesis_perturbed_img_clip_HSV = cv2.cvtColor(synthesis_perturbed_img_clip_HSV, cv2.COLOR_HSV2RGB)
self.synthesis_perturbed_img = synthesis_perturbed_img_clip_HSV
'''
'''HSV_v2'''
perturbed_bg_img = perturbed_bg_img.astype(np.float32)
# if self.is_perform(1, 0):
# if self.is_perform(1, 0):
if self.is_perform(0.1, 0.9):
if self.is_perform(0.2, 0.8):
synthesis_perturbed_img_clip_HSV = self.synthesis_perturbed_img.copy()
synthesis_perturbed_img_clip_HSV = self.HSV_v1(synthesis_perturbed_img_clip_HSV)
perturbed_bg_img[:, :, 0] *= 1-self.foreORbackground_label
perturbed_bg_img[:, :, 1] *= 1-self.foreORbackground_label
perturbed_bg_img[:, :, 2] *= 1-self.foreORbackground_label
synthesis_perturbed_img_clip_HSV += perturbed_bg_img
self.synthesis_perturbed_img = synthesis_perturbed_img_clip_HSV
else:
perturbed_bg_img_HSV = perturbed_bg_img
perturbed_bg_img_HSV = self.HSV_v1(perturbed_bg_img_HSV)
perturbed_bg_img_HSV[:, :, 0] *= 1-self.foreORbackground_label
perturbed_bg_img_HSV[:, :, 1] *= 1-self.foreORbackground_label
perturbed_bg_img_HSV[:, :, 2] *= 1-self.foreORbackground_label
self.synthesis_perturbed_img += perturbed_bg_img_HSV
# self.synthesis_perturbed_img[np.sum(self.synthesis_perturbed_img, 2) == 771] = perturbed_bg_img_HSV[np.sum(self.synthesis_perturbed_img, 2) == 771]
else:
synthesis_perturbed_img_clip_HSV = self.synthesis_perturbed_img.copy()
perturbed_bg_img[:, :, 0] *= 1 - self.foreORbackground_label
perturbed_bg_img[:, :, 1] *= 1 - self.foreORbackground_label
perturbed_bg_img[:, :, 2] *= 1 - self.foreORbackground_label
synthesis_perturbed_img_clip_HSV += perturbed_bg_img
synthesis_perturbed_img_clip_HSV = self.HSV_v1(synthesis_perturbed_img_clip_HSV)
self.synthesis_perturbed_img = synthesis_perturbed_img_clip_HSV
''''''
# cv2.imwrite(self.save_path+'clip/'+perfix_+'_'+fold_curve+str(perturbed_time)+'-'+str(repeat_time)+'.png', synthesis_perturbed_img_clip)
self.synthesis_perturbed_img[self.synthesis_perturbed_img < 0] = 0
self.synthesis_perturbed_img[self.synthesis_perturbed_img > 255] = 255
self.synthesis_perturbed_img = np.around(self.synthesis_perturbed_img).astype(np.uint8)
label = np.zeros_like(self.synthesis_perturbed_img, dtype=np.float32)
label[:, :, :2] = self.synthesis_perturbed_label
label[:, :, 2] = self.foreORbackground_label
# grey = np.around(self.synthesis_perturbed_img[:, :, 0] * 0.2989 + self.synthesis_perturbed_img[:, :, 1] * 0.5870 + self.synthesis_perturbed_img[:, :, 0] * 0.1140).astype(np.int16)
# synthesis_perturbed_grey = np.concatenate((grey.reshape(self.new_shape[0], self.new_shape[1], 1), label), axis=2)
synthesis_perturbed_color = np.concatenate((self.synthesis_perturbed_img, label), axis=2)
self.synthesis_perturbed_color = np.zeros_like(synthesis_perturbed_color, dtype=np.float32)
# self.synthesis_perturbed_grey = np.zeros_like(synthesis_perturbed_grey, dtype=np.float32)
reduce_value_x = int(round(min((random.random() / 2) * (self.new_shape[0] - (perturbed_x_max - perturbed_x_min)), min(reduce_value, reduce_value_v2))))
reduce_value_y = int(round(min((random.random() / 2) * (self.new_shape[1] - (perturbed_y_max - perturbed_y_min)), min(reduce_value, reduce_value_v2))))
perturbed_x_min = max(perturbed_x_min - reduce_value_x, 0)
perturbed_x_max = min(perturbed_x_max + reduce_value_x, self.new_shape[0])
perturbed_y_min = max(perturbed_y_min - reduce_value_y, 0)
perturbed_y_max = min(perturbed_y_max + reduce_value_y, self.new_shape[1])
if im_lr >= im_ud:
self.synthesis_perturbed_color[:, perturbed_y_min:perturbed_y_max, :] = synthesis_perturbed_color[:, perturbed_y_min:perturbed_y_max, :]
# self.synthesis_perturbed_grey[:, perturbed_y_min:perturbed_y_max, :] = synthesis_perturbed_grey[:, perturbed_y_min:perturbed_y_max, :]
else:
self.synthesis_perturbed_color[perturbed_x_min:perturbed_x_max, :, :] = synthesis_perturbed_color[perturbed_x_min:perturbed_x_max, :, :]
# self.synthesis_perturbed_grey[perturbed_x_min:perturbed_x_max, :, :] = synthesis_perturbed_grey[perturbed_x_min:perturbed_x_max, :, :]
'''blur'''
if self.is_perform(0.1, 0.9):
synthesis_perturbed_img_filter = self.synthesis_perturbed_color[:, :, :3].copy()
if self.is_perform(0.1, 0.9):
synthesis_perturbed_img_filter = cv2.GaussianBlur(synthesis_perturbed_img_filter, (5, 5), 0)
else:
synthesis_perturbed_img_filter = cv2.GaussianBlur(synthesis_perturbed_img_filter, (3, 3), 0)
if self.is_perform(0.5, 0.5):
self.synthesis_perturbed_color[:, :, :3][self.synthesis_perturbed_color[:, :, 5] == 1] = synthesis_perturbed_img_filter[self.synthesis_perturbed_color[:, :, 5] == 1]
else:
self.synthesis_perturbed_color[:, :, :3] = synthesis_perturbed_img_filter
fiducial_points_coordinate = fiducial_points_coordinate[:, :, ::-1]
'''draw fiducial points'''
stepSize = 0
fiducial_points_synthesis_perturbed_img = self.synthesis_perturbed_color[:, :, :3].copy()
for l in fiducial_points_coordinate.astype(np.int64).reshape(-1, 2):
cv2.circle(fiducial_points_synthesis_perturbed_img, (l[0] + math.ceil(stepSize / 2), l[1] + math.ceil(stepSize / 2)), 2, (0, 0, 255), -1)
cv2.imwrite(self.save_path + 'fiducial_points/' + perfix_ + '_' + fold_curve + '.png', fiducial_points_synthesis_perturbed_img)
cv2.imwrite(self.save_path + 'png/' + perfix_ + '_' + fold_curve + '.png', self.synthesis_perturbed_color[:, :, :3])
'''forward-begin'''
self.forward_mapping = np.full((save_img_shape[0], save_img_shape[1], 2), 0, dtype=np.float32)
forward_mapping = np.full((save_img_shape[0], save_img_shape[1], 2), 0, dtype=np.float32)
forward_position = (self.synthesis_perturbed_color[:, :, 3:5] + pixel_position)[self.synthesis_perturbed_color[:, :, 5] != 0, :]
flat_position = np.argwhere(np.zeros(save_img_shape, dtype=np.uint32) == 0)
vtx, wts = self.interp_weights(forward_position, flat_position)
wts_sum = np.abs(wts).sum(-1)
wts = wts[wts_sum <= 1, :]
vtx = vtx[wts_sum <= 1, :]
flat_position_forward = flat_position.reshape(save_img_shape[0], save_img_shape[1], 2)[self.synthesis_perturbed_color[:, :, 5] != 0, :]
forward_mapping.reshape(save_img_shape[0] * save_img_shape[1], 2)[wts_sum <= 1, :] = self.interpolate(flat_position_forward, vtx, wts)
forward_mapping = forward_mapping.reshape(save_img_shape[0], save_img_shape[1], 2)
mapping_x_min_, mapping_y_min_, mapping_x_max_, mapping_y_max_ = self.adjust_position_v2(0, 0, im_lr, im_ud, self.new_shape)
shreshold_zoom_out = 2
mapping_x_min = mapping_x_min_ + shreshold_zoom_out
mapping_y_min = mapping_y_min_ + shreshold_zoom_out
mapping_x_max = mapping_x_max_ - shreshold_zoom_out
mapping_y_max = mapping_y_max_ - shreshold_zoom_out
self.forward_mapping[mapping_x_min:mapping_x_max, mapping_y_min:mapping_y_max] = forward_mapping[mapping_x_min:mapping_x_max, mapping_y_min:mapping_y_max]
self.scan_img = np.full((save_img_shape[0], save_img_shape[1], 3), 0, dtype=np.float32)
self.scan_img[mapping_x_min_:mapping_x_max_, mapping_y_min_:mapping_y_max_] = self.origin_img
self.origin_img = self.scan_img
# flat_img = np.full((save_img_shape[0], save_img_shape[1], 3), 0, dtype=np.float32)
# cv2.remap(self.synthesis_perturbed_color[:, :, :3], self.forward_mapping[:, :, 1], self.forward_mapping[:, :, 0], cv2.INTER_LINEAR, flat_img)
# cv2.imwrite(self.save_path + 'outputs/1.jpg', flat_img)
'''forward-end'''
synthesis_perturbed_data = {
'fiducial_points': fiducial_points_coordinate,
'segment': np.array((segment_x, segment_y))
}
cv2.imwrite(self.save_path + 'png/' + perfix_ + '_' + fold_curve + '.png', self.synthesis_perturbed_color[:, :, :3])
with open(self.save_path+'color/'+perfix_+'_'+fold_curve+'.gw', 'wb') as f:
pickle_perturbed_data = pickle.dumps(synthesis_perturbed_data)
f.write(pickle_perturbed_data)
# with open(self.save_path+'grey/'+perfix_+'_'+fold_curve+'.gw', 'wb') as f:
# pickle_perturbed_data = pickle.dumps(self.synthesis_perturbed_grey)
# f.write(pickle_perturbed_data)
# cv2.imwrite(self.save_path+'grey_im/'+perfix_+'_'+fold_curve+'.png', self.synthesis_perturbed_color[:, :, :1])
# cv2.imwrite(self.save_path + 'scan/' + self.save_suffix + '_' + str(m) + '.png', self.origin_img)
trian_t = time.time() - begin_train
mm, ss = divmod(trian_t, 60)
hh, mm = divmod(mm, 60)
print(str(m)+'_'+str(n)+'_'+fold_curve+' '+str(repeat_time)+" Time : %02d:%02d:%02d\n" % (hh, mm, ss))
def multiThread(m, n, img_path_, bg_path_, save_path, save_suffix):
saveFold = perturbed(img_path_, bg_path_, save_path, save_suffix)
saveCurve = perturbed(img_path_, bg_path_, save_path, save_suffix)
repeat_time = min(max(round(np.random.normal(10, 3)), 5), 16)
fold = threading.Thread(target=saveFold.save_img, args=(m, n, 'fold', repeat_time, 'relativeShift_v2'), name='fold')
curve = threading.Thread(target=saveCurve.save_img, args=(m, n, 'curve', repeat_time, 'relativeShift_v2'), name='curve')
fold.start()
curve.start()
curve.join()
fold.join()
def xgw(args):
path = args.path
bg_path = args.bg_path
if args.output_path is None:
save_path = '/lustre/home/gwxie/data/unwarp_new/train/general1024/general1024_v1/'
else:
save_path = args.output_path
# if not os.path.exists(save_path + 'grey/'):
# os.makedirs(save_path + 'grey/')
if not os.path.exists(save_path + 'color/'):
os.makedirs(save_path + 'color/')
if not os.path.exists(save_path + 'fiducial_points/'):
os.makedirs(save_path + 'fiducial_points/')
if not os.path.exists(save_path + 'png/'):
os.makedirs(save_path + 'png/')
if not os.path.exists(save_path + 'scan/'):
os.makedirs(save_path + 'scan/')
if not os.path.exists(save_path + 'outputs/'):
os.makedirs(save_path + 'outputs/')
save_suffix = str.split(args.path, '/')[-2]
all_img_path = getDatasets(path)
all_bgImg_path = getDatasets(bg_path)
global begin_train
begin_train = time.time()
fiducial_points = 61 # 31
process_pool = Pool(2)
for m, img_path in enumerate(all_img_path):
for n in range(args.sys_num):
img_path_ = path+img_path
bg_path_ = bg_path+random.choice(all_bgImg_path)+'/'
for m_n in range(10):
try:
saveFold = perturbed(img_path_, bg_path_, save_path, save_suffix)
saveCurve = perturbed(img_path_, bg_path_, save_path, save_suffix)
repeat_time = min(max(round(np.random.normal(12, 4)), 1), 18)
# repeat_time = min(max(round(np.random.normal(8, 4)), 1), 12) # random.randint(1, 2) # min(max(round(np.random.normal(8, 4)), 1), 12)
process_pool.apply_async(func=saveFold.save_img, args=(m, n, 'fold', repeat_time, fiducial_points, 'relativeShift_v2'))
repeat_time = min(max(round(np.random.normal(8, 4)), 1), 13)
# repeat_time = min(max(round(np.random.normal(6, 4)), 1), 10)
process_pool.apply_async(func=saveCurve.save_img, args=(m, n, 'curve', repeat_time, fiducial_points, 'relativeShift_v2'))
except BaseException as err:
print(err)
continue
break
# print('end')
process_pool.close()
process_pool.join()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Hyperparams')
parser.add_argument('--path',
default='./scan/new/', type=str,
help='the path of origin img.')
parser.add_argument('--bg_path',
default='./background/', type=str,
help='the path of bg img.')
parser.add_argument('--output_path',
default='./output/', type=str,
help='the path of origin img.')
# parser.set_defaults(output_path='test')
parser.add_argument('--count_from', '-p', default=0, type=int,
metavar='N', help='print frequency (default: 10)') # print frequency
parser.add_argument('--repeat_T', default=0, type=int)
parser.add_argument('--sys_num', default=6, type=int)
args = parser.parse_args()
xgw(args)
| 53.692132 | 380 | 0.720417 | 7,535 | 47,088 | 4.138421 | 0.052157 | 0.120065 | 0.057339 | 0.028766 | 0.79088 | 0.720938 | 0.670558 | 0.626014 | 0.595773 | 0.560562 | 0 | 0.048875 | 0.13445 | 47,088 | 876 | 381 | 53.753425 | 0.716221 | 0.144156 | 0 | 0.301205 | 0 | 0 | 0.022177 | 0.002927 | 0 | 0 | 0 | 0 | 0 | 1 | 0.01004 | false | 0 | 0.038153 | 0.002008 | 0.052209 | 0.008032 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
0
| 1 |
6a5ce615b33cd197b365d6e3673610f15fbcf59b
| 12,289 |
py
|
Python
|
assignment1/cs231n/classifiers/neural_net.py
|
zeevikal/CS231n-spring2018
|
50691a947b877047099e7a1fe99a3fdea4a4fcf8
|
[
"MIT"
] | null | null | null |
assignment1/cs231n/classifiers/neural_net.py
|
zeevikal/CS231n-spring2018
|
50691a947b877047099e7a1fe99a3fdea4a4fcf8
|
[
"MIT"
] | 3 |
2019-12-09T06:04:00.000Z
|
2019-12-09T06:05:23.000Z
|
assignment1/cs231n/classifiers/neural_net.py
|
zeevikal/CS231n-spring2018
|
50691a947b877047099e7a1fe99a3fdea4a4fcf8
|
[
"MIT"
] | null | null | null |
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
class TwoLayerNet(object):
"""
A two-layer fully-connected neural network. The net has an input dimension
of N, a hidden layer dimension of H, and performs classification over C
classes.
We train the network with a softmax loss function and L2 regularization on
the weight matrices. The network uses a ReLU nonlinearity after the first
fully connected layer.
In other words, the network has the following architecture:
input - fully connected layer - ReLU - fully connected layer - softmax
The outputs of the second fully-connected layer are the scores for each
class.
"""
def __init__(self, input_size, hidden_size, output_size, std=1e-4):
"""
Initialize the model. Weights are initialized to small random values
and biases are initialized to zero. Weights and biases are stored in
the variable self.params, which is a dictionary with the following keys
W1: First layer weights; has shape (D, H)
b1: First layer biases; has shape (H,)
W2: Second layer weights; has shape (H, C)
b2: Second layer biases; has shape (C,)
Inputs:
- input_size: The dimension D of the input data.
- hidden_size: The number of neurons H in the hidden layer.
- output_size: The number of classes C.
"""
self.params = {}
self.params['W1'] = std * np.random.randn(input_size, hidden_size)
self.params['b1'] = np.zeros(hidden_size)
self.params['W2'] = std * np.random.randn(hidden_size, output_size)
self.params['b2'] = np.zeros(output_size)
def loss(self, X, y=None, reg=0.0):
"""
Compute the loss and gradients for a two layer fully connected neural
network.
Inputs:
- X: Input data of shape (N, D). Each X[i] is a training sample.
- y: Vector of training labels. y[i] is the label for X[i], and each
y[i] is an integer in the range 0 <= y[i] < C. This parameter is
optional; if it is not passed then we only return scores, and if it
is passed then we instead return the loss and gradients.
- reg: Regularization strength.
Returns:
If y is None, return a matrix scores of shape (N, C) where scores[i, c]
is the score for class c on input X[i].
If y is not None, instead return a tuple of:
- loss: Loss (data loss and regularization loss) for this batch of
training samples.
- grads: Dictionary mapping parameter names to gradients of those
parameters with respect to the loss function; has the same keys as
self.params.
"""
# Unpack variables from the params dictionary
W1, b1 = self.params['W1'], self.params['b1']
W2, b2 = self.params['W2'], self.params['b2']
N, D = X.shape
# Compute the forward pass
scores = None
#######################################################################
# TODO: Perform the forward pass, computing the class scores for the #
# input. Store the result in the scores variable, which should be an #
# array of shape (N, C). #
#######################################################################
scores1 = X.dot(W1) + b1 # FC1
X2 = np.maximum(0, scores1) # ReLU FC1
scores = X2.dot(W2) + b2 # FC2
#######################################################################
# END OF YOUR CODE #
#######################################################################
# If the targets are not given then jump out, we're done
if y is None:
return scores
scores -= np.max(scores) # Fix Number instability
scores_exp = np.exp(scores)
probs = scores_exp / np.sum(scores_exp, axis=1, keepdims=True)
# Compute the loss
loss = None
#######################################################################
# TODO: Finish the forward pass, and compute the loss. This should #
# include both the data loss and L2 regularization for W1 and W2. #
# Store the result in the variable loss, which should be a scalar. Use#
# the Softmax classifier loss. #
#######################################################################
correct_probs = -np.log(probs[np.arange(N), y])
# L_i = -log(e^correct_score/sum(e^scores))) = -log(correct_probs)
loss = np.sum(correct_probs)
loss /= N
# L2 regularization WRT W1 and W2
loss += reg * (np.sum(W1 * W1) + np.sum(W2 * W2))
#######################################################################
# END OF YOUR CODE #
#######################################################################
# Backward pass: compute gradients
grads = {}
#############################################################################
# TODO: Compute the backward pass, computing the derivatives of the weights #
# and biases. Store the results in the grads dictionary. For example, #
# grads['W1'] should store the gradient on W1, and be a matrix of same size #
#############################################################################
# gradient of loss_i WRT scores_k
# dL_i/ds_k = probs_k-1(y_i == k)
# this means the gradient is the score for "other" classes and score-1
# for the target class
d_scores = probs.copy()
d_scores[np.arange(N), y] -= 1
d_scores /= N
# W2 were multiplied with X2, by chain rule and multiplication
# derivative, WRT W2 we need to multiply downstream derivative by X2
d_W2 = X2.T.dot(d_scores)
# b2 was added, so it's d is 1 but we must multiply it with chain rule
# (downstream), in this case d_scores
d_b2 = np.sum(d_scores, axis=0)
# W1 is upstream of X2, so we continue this way
d_X2 = d_scores.dot(W2.T)
# ReLU derivative is 1 for > 0, else 0
d_scores1 = d_X2 * (scores1 > 0)
d_W1 = X.T.dot(d_scores1)
# b1 gradient
d_b1 = d_scores1.sum(axis=0)
# regularization gradient (reg*W2^2)
d_W2 += reg * 2 * W2
d_W1 += reg * 2 * W1
grads['W1'] = d_W1
grads['b1'] = d_b1
grads['W2'] = d_W2
grads['b2'] = d_b2
#######################################################################
# END OF YOUR CODE #
#######################################################################
return loss, grads
def train(self, X, y, X_val, y_val,
learning_rate=1e-3, learning_rate_decay=0.95,
reg=5e-6, num_iters=100,
batch_size=200, verbose=False):
"""
Train this neural network using stochastic gradient descent.
Inputs:
- X: A numpy array of shape (N, D) giving training data.
- y: A numpy array f shape (N,) giving training labels; y[i] = c means
that X[i] has label c, where 0 <= c < C.
- X_val: A numpy array of shape (N_val, D) giving validation data.
- y_val: A numpy array of shape (N_val,) giving validation labels.
- learning_rate: Scalar giving learning rate for optimization.
- learning_rate_decay: Scalar giving factor used to decay the learning
rate after each epoch.
- reg: Scalar giving regularization strength.
- num_iters: Number of steps to take when optimizing.
- batch_size: Number of training examples to use per step.
- verbose: boolean; if true print progress during optimization.
"""
num_train = X.shape[0]
iterations_per_epoch = max(num_train / batch_size, 1)
# Use SGD to optimize the parameters in self.model
loss_history = []
train_acc_history = []
val_acc_history = []
for it in range(num_iters):
X_batch = None
y_batch = None
###################################################################
# TODO: Create a random minibatch of training data and labels, #
# storing them in X_batch and y_batch respectively. #
###################################################################
# random indexes to sample training data/labels
sample_idx = np.random.choice(num_train, batch_size, replace=True)
X_batch = X[sample_idx]
y_batch = y[sample_idx]
###################################################################
# END OF YOUR CODE #
###################################################################
# Compute loss and gradients using the current minibatch
loss, grads = self.loss(X_batch, y=y_batch, reg=reg)
loss_history.append(loss)
###################################################################
# TODO: Use the gradients in the grads dictionary to update the #
# parameters of the network (stored in the dictionary self.params)#
# using stochastic gradient descent. You'll need to use the #
# gradients stored in the grads dictionary defined above. #
###################################################################
# For each weight in network parameters, update it with the
# corresponding calculated gradient
for key in self.params:
self.params[key] -= learning_rate * grads[key]
###################################################################
# END OF YOUR CODE #
###################################################################
if verbose and it % 100 == 0:
print('iteration %d / %d: loss %f' % (it, num_iters, loss))
# Every epoch, check train and val accuracy and decay learning rate
if it % iterations_per_epoch == 0:
# Check accuracy
train_acc = (self.predict(X_batch) == y_batch).mean()
val_acc = (self.predict(X_val) == y_val).mean()
train_acc_history.append(train_acc)
val_acc_history.append(val_acc)
# Decay learning rate
learning_rate *= learning_rate_decay
return {
'loss_history': loss_history,
'train_acc_history': train_acc_history,
'val_acc_history': val_acc_history,
}
def predict(self, X):
"""
Use the trained weights of this two-layer network to predict labels for
data points. For each data point we predict scores for each of the C
classes, and assign each data point to the class with the highest score
Inputs:
- X: A numpy array of shape (N, D) giving N D-dimensional data points
to classify.
Returns:
- y_pred: A numpy array of shape (N,) giving predicted labels for each
of the elements of X. For all i, y_pred[i] = c means that X[i] is
predicted to have class c, where 0 <= c < C.
"""
y_pred = None
#######################################################################
# TODO: Implement this function; it should be VERY simple! #
#######################################################################
y_pred = np.argmax(self.loss(X), axis=1)
#######################################################################
# END OF YOUR CODE #
#######################################################################
return y_pred
| 45.854478 | 85 | 0.487509 | 1,421 | 12,289 | 4.123153 | 0.214638 | 0.023895 | 0.010923 | 0.013313 | 0.084144 | 0.051715 | 0.044035 | 0.019799 | 0.011265 | 0.011265 | 0 | 0.013716 | 0.311824 | 12,289 | 268 | 86 | 45.854478 | 0.679082 | 0.480104 | 0 | 0 | 0 | 0 | 0.02309 | 0 | 0 | 0 | 0 | 0.011194 | 0 | 1 | 0.049383 | false | 0 | 0.037037 | 0 | 0.148148 | 0.024691 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
0
| 1 |
6a61c6ef3ad58f9b8003931de1870b0f5ad404c7
| 1,247 |
py
|
Python
|
python/example_code/s3/s3-python-example-get-bucket-policy.py
|
onehitcombo/aws-doc-sdk-examples
|
03e2e0c5dee75c5decbbb99e849c51417521fd82
|
[
"Apache-2.0"
] | 3 |
2021-01-19T20:23:17.000Z
|
2021-01-19T21:38:59.000Z
|
python/example_code/s3/s3-python-example-get-bucket-policy.py
|
onehitcombo/aws-doc-sdk-examples
|
03e2e0c5dee75c5decbbb99e849c51417521fd82
|
[
"Apache-2.0"
] | null | null | null |
python/example_code/s3/s3-python-example-get-bucket-policy.py
|
onehitcombo/aws-doc-sdk-examples
|
03e2e0c5dee75c5decbbb99e849c51417521fd82
|
[
"Apache-2.0"
] | 2 |
2019-12-27T13:58:00.000Z
|
2020-05-21T18:35:40.000Z
|
# Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# This file is licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License. A copy of the
# License is located at
#
# http://aws.amazon.com/apache2.0/
#
# This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS
# OF ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import boto3
# Create an S3 client
s3 = boto3.client('s3')
# Call to S3 to retrieve the policy for the given bucket
result = s3.get_bucket_policy(Bucket='my-bucket')
print(result)
# snippet-comment:[These are tags for the AWS doc team's sample catalog. Do not remove.]
# snippet-sourcedescription:[s3-python-example-get-bucket-policy.py demonstrates how to list the Amazon S3 Buckets in your account.]
# snippet-keyword:[Python]
# snippet-keyword:[AWS SDK for Python (Boto3)]
# snippet-keyword:[Code Sample]
# snippet-keyword:[Amazon S3]
# snippet-service:[s3]
# snippet-sourcetype:[full-example]
# snippet-sourcedate:[2018-06-25]
# snippet-sourceauthor:[jschwarzwalder (AWS)]
| 35.628571 | 133 | 0.735365 | 186 | 1,247 | 4.919355 | 0.586022 | 0.054645 | 0.021858 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.03074 | 0.165196 | 1,247 | 34 | 134 | 36.676471 | 0.848223 | 0.847634 | 0 | 0 | 0 | 0 | 0.083969 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.25 | 0 | 0.25 | 0.25 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
0
| 1 |
6a630004921c5a5ff2ec4e4b2d0a96b0bf000baa
| 897 |
py
|
Python
|
data_io/util/value_blob_erosion.py
|
Rekrau/PyGreentea
|
457d7dc5be12b15c3c7663ceaf6d74301de56e43
|
[
"BSD-2-Clause"
] | null | null | null |
data_io/util/value_blob_erosion.py
|
Rekrau/PyGreentea
|
457d7dc5be12b15c3c7663ceaf6d74301de56e43
|
[
"BSD-2-Clause"
] | 4 |
2016-04-22T15:39:21.000Z
|
2016-11-15T21:23:58.000Z
|
data_io/util/value_blob_erosion.py
|
Rekrau/PyGreentea
|
457d7dc5be12b15c3c7663ceaf6d74301de56e43
|
[
"BSD-2-Clause"
] | 4 |
2017-05-12T00:17:55.000Z
|
2019-07-01T19:23:32.000Z
|
import numpy as np
from scipy import ndimage
def erode_value_blobs(array, steps=1, values_to_ignore=tuple(), new_value=0):
unique_values = list(np.unique(array))
all_entries_to_keep = np.zeros(shape=array.shape, dtype=np.bool)
for unique_value in unique_values:
entries_of_this_value = array == unique_value
if unique_value in values_to_ignore:
all_entries_to_keep = np.logical_or(entries_of_this_value, all_entries_to_keep)
else:
eroded_unique_indicator = ndimage.binary_erosion(entries_of_this_value, iterations=steps)
all_entries_to_keep = np.logical_or(eroded_unique_indicator, all_entries_to_keep)
result = array * all_entries_to_keep
if new_value != 0:
eroded_entries = np.logical_not(all_entries_to_keep)
new_values = new_value * eroded_entries
result += new_values
return result
| 42.714286 | 101 | 0.733556 | 132 | 897 | 4.560606 | 0.340909 | 0.116279 | 0.139535 | 0.186047 | 0.162791 | 0.089701 | 0.089701 | 0 | 0 | 0 | 0 | 0.004172 | 0.198439 | 897 | 20 | 102 | 44.85 | 0.833102 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.055556 | false | 0 | 0.111111 | 0 | 0.222222 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
0
| 1 |
6a6b124cb7b2cd1d6d09ae5b84d5b49e63612508
| 679 |
py
|
Python
|
test_f_login_andy.py
|
KotoLLC/peacenik-tests
|
760f7799ab2b9312fe0cce373890195151c48fce
|
[
"Apache-2.0"
] | null | null | null |
test_f_login_andy.py
|
KotoLLC/peacenik-tests
|
760f7799ab2b9312fe0cce373890195151c48fce
|
[
"Apache-2.0"
] | null | null | null |
test_f_login_andy.py
|
KotoLLC/peacenik-tests
|
760f7799ab2b9312fe0cce373890195151c48fce
|
[
"Apache-2.0"
] | null | null | null |
from helpers import *
def test_f_login_andy():
url = "http://central.orbits.local/rpc.AuthService/Login"
raw_payload = {"name": "andy","password": "12345"}
payload = json.dumps(raw_payload)
headers = {'Content-Type': 'application/json'}
# convert dict to json by json.dumps() for body data.
response = requests.request("POST", url, headers=headers, data=payload)
save_cookies(response.cookies,"cookies.txt")
# Validate response headers and body contents, e.g. status code.
assert response.status_code == 200
# print full request and response
pretty_print_request(response.request)
pretty_print_response(response)
| 35.736842 | 75 | 0.696613 | 86 | 679 | 5.372093 | 0.604651 | 0.04329 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.014467 | 0.185567 | 679 | 19 | 76 | 35.736842 | 0.820976 | 0.216495 | 0 | 0 | 0 | 0 | 0.213611 | 0 | 0 | 0 | 0 | 0 | 0.090909 | 1 | 0.090909 | false | 0.090909 | 0.090909 | 0 | 0.181818 | 0.181818 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 |
0
| 1 |
6a6dcc4d9c3e1b2437b6c8b26173ce12b1dfa929
| 7,761 |
py
|
Python
|
week2/Assignment2Answer.py
|
RayshineRen/Introduction_to_Data_Science_in_Python
|
b19aa781a8f8d0e25853c4e86dadd4c9bebbcd71
|
[
"MIT"
] | 1 |
2020-09-22T15:06:02.000Z
|
2020-09-22T15:06:02.000Z
|
week2/Assignment2Answer.py
|
RayshineRen/Introduction_to_Data_Science_in_Python
|
b19aa781a8f8d0e25853c4e86dadd4c9bebbcd71
|
[
"MIT"
] | 1 |
2020-11-03T14:11:02.000Z
|
2020-11-03T14:24:50.000Z
|
week2/Assignment2Answer.py
|
RayshineRen/Introduction_to_Data_Science_in_Python
|
b19aa781a8f8d0e25853c4e86dadd4c9bebbcd71
|
[
"MIT"
] | 2 |
2020-09-22T05:27:09.000Z
|
2020-11-05T10:39:49.000Z
|
# -*- coding: utf-8 -*-
"""
Created on Fri Sep 18 21:56:15 2020
@author: Ray
@email: 1324789704@qq.com
@wechat: RayTing0305
"""
'''
Question 1
Write a function called proportion_of_education which returns the proportion of children in the dataset who had a mother with the education levels equal to less than high school (<12), high school (12), more than high school but not a college graduate (>12) and college degree.
This function should return a dictionary in the form of (use the correct numbers, do not round numbers):
{"less than high school":0.2,
"high school":0.4,
"more than high school but not college":0.2,
"college":0.2}
'''
import scipy.stats as stats
import numpy as np
import pandas as pd
df = pd.read_csv("./assets/NISPUF17.csv")
def proportion_of_education():
# your code goes here
# YOUR CODE HERE
df_edu = df.EDUC1
edu_list = [1, 2, 3, 4]
zero_df = pd.DataFrame(np.zeros((df_edu.shape[0], len(edu_list))), columns=edu_list)
for edu in edu_list:
zero_df[edu][df_edu==edu]=1
#zero_df
sum_ret = zero_df.sum(axis=0)
name_l = ["less than high school", "high school", "more than high school but not college", "college"]
rat = sum_ret.values/sum(sum_ret.values)
dic = dict()
for i in range(4):
dic[name_l[i]] = rat[i]
return dic
raise NotImplementedError()
assert type(proportion_of_education())==type({}), "You must return a dictionary."
assert len(proportion_of_education()) == 4, "You have not returned a dictionary with four items in it."
assert "less than high school" in proportion_of_education().keys(), "You have not returned a dictionary with the correct keys."
assert "high school" in proportion_of_education().keys(), "You have not returned a dictionary with the correct keys."
assert "more than high school but not college" in proportion_of_education().keys(), "You have not returned a dictionary with the correct keys."
assert "college" in proportion_of_education().keys(), "You have not returned a dictionary with the correct"
'''
Question 2
Let's explore the relationship between being fed breastmilk as a child and getting a seasonal influenza vaccine from a healthcare provider. Return a tuple of the average number of influenza vaccines for those children we know received breastmilk as a child and those who know did not.
This function should return a tuple in the form (use the correct numbers:
(2.5, 0.1)
'''
def average_influenza_doses():
# YOUR CODE HERE
#是否喂养母乳
fed_breastmilk = list(df.groupby(by='CBF_01'))
be_fed_breastmilk = fed_breastmilk[0][1]
not_fed_breastmilk = fed_breastmilk[1][1]
#喂养母乳的influenza数目
be_fed_breastmilk_influenza = be_fed_breastmilk.P_NUMFLU
num_be_fed_breastmilk_influenza = be_fed_breastmilk_influenza.dropna().mean()
#未喂养母乳的influenza数目
not_be_fed_breastmilk_influenza = not_fed_breastmilk.P_NUMFLU
num_not_be_fed_breastmilk_influenza = not_be_fed_breastmilk_influenza.dropna().mean()
return num_be_fed_breastmilk_influenza, num_not_be_fed_breastmilk_influenza
raise NotImplementedError()
assert len(average_influenza_doses())==2, "Return two values in a tuple, the first for yes and the second for no."
'''
Question 3
It would be interesting to see if there is any evidence of a link between vaccine effectiveness and sex of the child. Calculate the ratio of the number of children who contracted chickenpox but were vaccinated against it (at least one varicella dose) versus those who were vaccinated but did not contract chicken pox. Return results by sex.
This function should return a dictionary in the form of (use the correct numbers):
{"male":0.2,
"female":0.4}
Note: To aid in verification, the chickenpox_by_sex()['female'] value the autograder is looking for starts with the digits 0.0077.
'''
def chickenpox_by_sex():
# YOUR CODE HERE
#是否感染Varicella
cpox = df.HAD_CPOX
#cpox.value_counts()
cpox_group = list(df.groupby(by='HAD_CPOX'))
have_cpox = cpox_group[0][1]
not_have_cpox = cpox_group[1][1]
#男女分开
have_cpox_group = list(have_cpox.groupby(by='SEX'))
not_have_cpox_group = list(not_have_cpox.groupby(by='SEX'))
have_cpox_boy = have_cpox_group[0][1]
have_cpox_girl = have_cpox_group[1][1]
not_have_cpox_boy = not_have_cpox_group[0][1]
not_have_cpox_girl = not_have_cpox_group[1][1]
#接种感染
#have_cpox_boy_injected = have_cpox_boy[(have_cpox_boy['P_NUMMMR']>0) | (have_cpox_boy['P_NUMVRC']>0)]
have_cpox_boy_injected = have_cpox_boy[(have_cpox_boy['P_NUMVRC']>0)]
num_have_cpox_boy_injected = have_cpox_boy_injected.count()['SEQNUMC']
have_cpox_girl_injected = have_cpox_girl[(have_cpox_girl['P_NUMVRC']>0)]
num_have_cpox_girl_injected = have_cpox_girl_injected.count()['SEQNUMC']
#接种未感染
not_have_cpox_boy_injected = not_have_cpox_boy[(not_have_cpox_boy['P_NUMVRC']>0)]
num_not_have_cpox_boy_injected = not_have_cpox_boy_injected.count()['SEQNUMC']
not_have_cpox_girl_injected = not_have_cpox_girl[(not_have_cpox_girl['P_NUMVRC']>0)]
num_not_have_cpox_girl_injected = not_have_cpox_girl_injected.count()['SEQNUMC']
#计算比例
ratio_boy = num_have_cpox_boy_injected / num_not_have_cpox_boy_injected
ratio_girl = num_have_cpox_girl_injected / num_not_have_cpox_girl_injected
dic = {}
dic['male'] = ratio_boy
dic['female'] = ratio_girl
return dic
raise NotImplementedError()
assert len(chickenpox_by_sex())==2, "Return a dictionary with two items, the first for males and the second for females."
'''
Question 4
A correlation is a statistical relationship between two variables. If we wanted to know if vaccines work, we might look at the correlation between the use of the vaccine and whether it results in prevention of the infection or disease [1]. In this question, you are to see if there is a correlation between having had the chicken pox and the number of chickenpox vaccine doses given (varicella).
Some notes on interpreting the answer. The had_chickenpox_column is either 1 (for yes) or 2 (for no), and the num_chickenpox_vaccine_column is the number of doses a child has been given of the varicella vaccine. A positive correlation (e.g., corr > 0) means that an increase in had_chickenpox_column (which means more no’s) would also increase the values of num_chickenpox_vaccine_column (which means more doses of vaccine). If there is a negative correlation (e.g., corr < 0), it indicates that having had chickenpox is related to an increase in the number of vaccine doses.
Also, pval is the probability that we observe a correlation between had_chickenpox_column and num_chickenpox_vaccine_column which is greater than or equal to a particular value occurred by chance. A small pval means that the observed correlation is highly unlikely to occur by chance. In this case, pval should be very small (will end in e-18 indicating a very small number).
[1] This isn’t really the full picture, since we are not looking at when the dose was given. It’s possible that children had chickenpox and then their parents went to get them the vaccine. Does this dataset have the data we would need to investigate the timing of the dose?
'''
def corr_chickenpox():
cpox = df[(df.P_NUMVRC).notnull()]
have_cpox = cpox[(cpox.HAD_CPOX==1) | (cpox.HAD_CPOX==2)]
df1=pd.DataFrame({"had_chickenpox_column":have_cpox.HAD_CPOX,
"num_chickenpox_vaccine_column":have_cpox.P_NUMVRC})
corr, pval=stats.pearsonr(df1["had_chickenpox_column"],df1["num_chickenpox_vaccine_column"])
return corr
raise NotImplementedError()
| 53.895833 | 576 | 0.74024 | 1,237 | 7,761 | 4.43169 | 0.23848 | 0.065669 | 0.038125 | 0.031193 | 0.356804 | 0.270522 | 0.212149 | 0.12915 | 0.12915 | 0.102153 | 0 | 0.017069 | 0.177168 | 7,761 | 143 | 577 | 54.272727 | 0.841372 | 0.048963 | 0 | 0.085714 | 0 | 0 | 0.203759 | 0.029921 | 0 | 0 | 0 | 0.020979 | 0.114286 | 1 | 0.057143 | false | 0 | 0.042857 | 0 | 0.157143 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
0
| 1 |
6a6f28bb63a4999e5f2dcb27c1de7d562bafcd05
| 1,664 |
py
|
Python
|
Experimente/Experiment ID 8/run-cifar10-v7.py
|
MichaelSchwabe/conv-ebnas-abgabe
|
f463d7bbd9b514597e19d25007913f7994cbbf7c
|
[
"MIT"
] | 6 |
2021-11-03T07:20:48.000Z
|
2021-11-10T08:20:44.000Z
|
Experimente/Experiment ID 8/run-cifar10-v7.py
|
MichaelSchwabe/conv-ebnas-abgabe
|
f463d7bbd9b514597e19d25007913f7994cbbf7c
|
[
"MIT"
] | 1 |
2021-11-02T21:10:51.000Z
|
2021-11-02T21:11:05.000Z
|
Experimente/Experiment ID 8/run-cifar10-v7.py
|
MichaelSchwabe/conv-ebnas-abgabe
|
f463d7bbd9b514597e19d25007913f7994cbbf7c
|
[
"MIT"
] | null | null | null |
from __future__ import print_function
from keras.datasets import mnist
from keras.datasets import cifar10
from keras.utils.np_utils import to_categorical
import numpy as np
from keras import backend as K
from evolution import Evolution
from genome_handler import GenomeHandler
import tensorflow as tf
#import mlflow.keras
#import mlflow
#import mlflow.tensorflow
#mlflow.tensorflow.autolog()
#mlflow.keras.autolog()
print("Num GPUs Available: ", len(tf.config.list_physical_devices('GPU')))
K.set_image_data_format("channels_last")
#(x_train, y_train), (x_test, y_test) = mnist.load_data()
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
x_train = x_train.reshape(x_train.shape[0], x_train.shape[1], x_train.shape[2],x_train.shape[3]).astype('float32') / 255
x_test = x_test.reshape(x_test.shape[0], x_test.shape[1], x_test.shape[2], x_test.shape[3]).astype('float32') / 255
# nCLasses
y_train = to_categorical(y_train)
y_test = to_categorical(y_test)
#y_train.shape
dataset = ((x_train, y_train), (x_test, y_test))
genome_handler = GenomeHandler(max_conv_layers=4,
max_dense_layers=2, # includes final dense layer
max_filters=512,
max_dense_nodes=1024,
input_shape=x_train.shape[1:],
n_classes=10)
evo = Evolution(genome_handler, data_path="log/evo_cifar10_gen40_pop10_e20.csv")
model = evo.run(dataset=dataset,
num_generations=40,
pop_size=10,
epochs=20,metric='acc')
#epochs=10,metric='loss')
print(model.summary())
| 37.818182 | 120 | 0.676683 | 239 | 1,664 | 4.435146 | 0.380753 | 0.056604 | 0.051887 | 0.033962 | 0.103774 | 0.062264 | 0.062264 | 0.062264 | 0 | 0 | 0 | 0.038139 | 0.212139 | 1,664 | 44 | 121 | 37.818182 | 0.770404 | 0.141226 | 0 | 0 | 0 | 0 | 0.061972 | 0.024648 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.310345 | 0 | 0.310345 | 0.103448 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 |
0
| 1 |
6a7641f27315b4a34aa454452b185ab3ffeddc05
| 505 |
py
|
Python
|
user_service/user_service/api.py
|
Ziang-Lu/Flask-Blog
|
8daf901a0ea0e079ad24a61fd7f16f1298514d4c
|
[
"MIT"
] | null | null | null |
user_service/user_service/api.py
|
Ziang-Lu/Flask-Blog
|
8daf901a0ea0e079ad24a61fd7f16f1298514d4c
|
[
"MIT"
] | 2 |
2020-06-09T08:40:42.000Z
|
2021-04-30T21:20:35.000Z
|
user_service/user_service/api.py
|
Ziang-Lu/Flask-Blog
|
8daf901a0ea0e079ad24a61fd7f16f1298514d4c
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
API definition module.
"""
from flask import Blueprint
from flask_restful import Api
from .resources.user import UserAuth, UserItem, UserList, UserFollow
# Create an API-related blueprint
api_bp = Blueprint(name='api', import_name=__name__)
api = Api(api_bp)
api.add_resource(UserList, '/users')
api.add_resource(UserItem, '/users/<int:id>')
api.add_resource(UserAuth, '/user-auth')
api.add_resource(
UserFollow, '/user-follow/<int:follower_id>/<followed_username>'
)
| 22.954545 | 68 | 0.740594 | 69 | 505 | 5.217391 | 0.463768 | 0.066667 | 0.155556 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.002227 | 0.110891 | 505 | 21 | 69 | 24.047619 | 0.799555 | 0.152475 | 0 | 0 | 0 | 0 | 0.200477 | 0.119332 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.363636 | 0 | 0.363636 | 0.181818 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 |
0
| 1 |
6a77df2fb34c60a66cb0710a264af376f888be93
| 2,112 |
py
|
Python
|
advanced/itertools_funcs.py
|
ariannasg/python3-essential-training
|
9b52645f5ccb57d2bda5d5f4a3053681a026450a
|
[
"MIT"
] | 1 |
2020-06-02T08:37:41.000Z
|
2020-06-02T08:37:41.000Z
|
advanced/itertools_funcs.py
|
ariannasg/python3-training
|
9b52645f5ccb57d2bda5d5f4a3053681a026450a
|
[
"MIT"
] | null | null | null |
advanced/itertools_funcs.py
|
ariannasg/python3-training
|
9b52645f5ccb57d2bda5d5f4a3053681a026450a
|
[
"MIT"
] | null | null | null |
#!usr/bin/env python3
import itertools
# itertools is a module that's not technically a set of built-in functions but
# it is part of the standard library that comes with python.
# it's useful for for creating and using iterators.
def main():
print('some infinite iterators')
# cycle iterator can be used to cycle over a collection over and over
seq1 = ["Joe", "John", "Mike"]
cycle1 = itertools.cycle(seq1)
print(next(cycle1))
print(next(cycle1))
print(next(cycle1))
print(next(cycle1))
print(next(cycle1))
# use count to create a simple counter
count1 = itertools.count(100, 3)
print(next(count1))
print(next(count1))
print(next(count1))
print('some non-infinite iterators')
values = [10, 5, 20, 30, 40, 50, 40, 30]
# accumulate creates an iterator that accumulates/aggregates values
print(list(itertools.accumulate(values))) # this defaults to addition
print(list(itertools.accumulate(values, max)))
print(list(itertools.accumulate(values, min)))
# use chain to connect sequences together
x = itertools.chain('ABCD', '1234')
print(list(x))
# dropwhile and takewhile will return values until
# a certain condition is met that stops them. they are similar to the
# filter built-in function.
# dropwhile will drop the values from the sequence as long as the
# condition of the function is true and then returns the rest of values
print(list(itertools.dropwhile(is_less_than_forty, values)))
# takewhile will keep the values from the sequence as long as the
# condition of the function is true and then stops giving data
print(list(itertools.takewhile(is_less_than_forty, values)))
def is_less_than_forty(x):
return x < 40
if __name__ == "__main__":
main()
# CONSOLE OUTPUT:
# some infinite iterators
# Joe
# John
# Mike
# Joe
# John
# 100
# 103
# 106
# some non-infinite iterators
# [10, 15, 35, 65, 105, 155, 195, 225]
# [10, 10, 20, 30, 40, 50, 50, 50]
# [10, 5, 5, 5, 5, 5, 5, 5]
# ['A', 'B', 'C', 'D', '1', '2', '3', '4']
# [40, 50, 40, 30]
# [10, 5, 20, 30]
| 29.333333 | 78 | 0.673295 | 321 | 2,112 | 4.376947 | 0.420561 | 0.051246 | 0.053381 | 0.05694 | 0.296085 | 0.193594 | 0.188612 | 0.153025 | 0.153025 | 0.153025 | 0 | 0.066547 | 0.210227 | 2,112 | 71 | 79 | 29.746479 | 0.775779 | 0.529356 | 0 | 0.296296 | 0 | 0 | 0.080125 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.074074 | false | 0 | 0.037037 | 0.037037 | 0.148148 | 0.592593 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
0
| 1 |
6a782fcc9b346f1edc133e8b8d12314c1cc0a5ff
| 421 |
py
|
Python
|
aula 05/model/Pessoa.py
|
Azenha/AlgProg2
|
062b5caac24435717074a18a7499f80130489a46
|
[
"MIT"
] | null | null | null |
aula 05/model/Pessoa.py
|
Azenha/AlgProg2
|
062b5caac24435717074a18a7499f80130489a46
|
[
"MIT"
] | null | null | null |
aula 05/model/Pessoa.py
|
Azenha/AlgProg2
|
062b5caac24435717074a18a7499f80130489a46
|
[
"MIT"
] | null | null | null |
class Pessoa:
def __init__(self, codigo, nome, endereco, telefone):
self.__codigo = int(codigo)
self.nome = str(nome)
self._endereco = str(endereco)
self.__telefone = str(telefone)
def imprimeNome(self):
print(f"Você pode chamar essa pessoa de {self.nome}.")
def __imprimeTelefone(self):
print(f"Você pode ligar para esta pessoa no número {self.__telefone}.")
| 35.083333 | 79 | 0.650831 | 53 | 421 | 4.924528 | 0.471698 | 0.076628 | 0.076628 | 0.10728 | 0.137931 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.239905 | 421 | 12 | 79 | 35.083333 | 0.815625 | 0 | 0 | 0 | 0 | 0 | 0.248815 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.3 | false | 0 | 0 | 0 | 0.4 | 0.2 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 |
0
| 1 |
6a78c857a857449cf31704c6af0759d610215a2d
| 25,852 |
py
|
Python
|
pypyrus_logbook/logger.py
|
t3eHawk/pypyrus_logbook
|
bd647a1c355b07e8df28c0d7298fcfe68cd9572e
|
[
"MIT"
] | null | null | null |
pypyrus_logbook/logger.py
|
t3eHawk/pypyrus_logbook
|
bd647a1c355b07e8df28c0d7298fcfe68cd9572e
|
[
"MIT"
] | null | null | null |
pypyrus_logbook/logger.py
|
t3eHawk/pypyrus_logbook
|
bd647a1c355b07e8df28c0d7298fcfe68cd9572e
|
[
"MIT"
] | 2 |
2019-02-06T08:05:43.000Z
|
2019-02-06T08:06:35.000Z
|
import atexit
import datetime as dt
import os
import platform
import pypyrus_logbook as logbook
import sys
import time
import traceback
from .conf import all_loggers
from .formatter import Formatter
from .header import Header
from .output import Root
from .record import Record
from .sysinfo import Sysinfo
class Logger():
"""This class represents a single logger.
Logger by it self is a complex set of methods, items and commands that
together gives funcionality for advanced logging in different outputs:
console, file, email, database table, HTML document - and using information
from diffrent inputs: user messages, traceback, frames, user parameters,
execution arguments and systems descriptors.
Each logger must have an unique name which will help to identify it.
Main application logger will have the same name as a python script file.
It can be accessed by native logbook methods or by calling `getlogger()`
method with no name.
Parameters
----------
name : str, optional
The argument is used te define `name` attribute
app : str, optional
The argument is used to set the `app` attribute.
desc : str, optional
The argument is used to set the `desc` attribute.
version : str, optional
The argument is used to set the `version` attribute.
status : bool, optional
The argument is used to open or close output `root`.
console : bool, optional
The argument is used to open or close output `console`.
file : bool, optional
The argument is used to open or close output `file`.
email : bool, optional
The argument is used to open or close output `email`.
html : bool, optional
The argument is used to open or close output `html`.
table : bool, optional
The argument is used to open or close output `table`.
directory : str, optional
The argument is used to set logging file folder.
filename : str, optional
The argument is used to set logging file name.
extension : str, optional
The argument is used to set logging file extension.
smtp : dict, optional
The argument is used to configure SMTP connection.
db : dict, optional
The argument is used to configure DB connection.
format : str, optional
The argument is used to set record template.
info : bool, optional
The argument is used to filter info records. The default is True.
debug : bool, optional
The argument is used to filter debug records. The default is False.
warning : bool, optional
The argument is used to filter warning records. The default is True.
error : bool, optional
The argument is used to filter error records. The default is True.
critical : bool, optional
The argument is used to filter critical records. The default is True.
alarming : bool, optional
The argument is used to enable or disable alarming mechanism. The
default is True.
control : bool, optional
The argument is used to enable or disable execution break in case
on error. The default is True.
maxsize : int or bool, optional
The argument is used to define maximum size of output file. Must be
presented as number of bytes. The default is 10 Mb.
maxdays : int or bool, optional
The argument is used to define maximum number of days that will be
logged to same file. The default is 1 which means that new output file
will be opened at each 00:00:00.
maxlevel : int or bool, optional
The argument is used to define the break error level (WARNING = 0,
ERRROR = 1, CRITICAL = 2). All that higher the break level will
interrupt application execution. The default is 1.
maxerrors : int or bool, optional
The argument is used to define maximun number of errors. The default
is False which means it is disabled.
Attributes
----------
name : str
Name of the logger.
app : str
Name of the application that we are logging.
desc : str
Description of the application that we are logging.
version : str
Version of the application that we are logging.
start_date : datetime.datetime
Date when logging was started.
rectypes : dict
All available record types. Keys are used in `Logger` write methods as
`rectype` argument. Values are used in formatting. So if you wish to
modify `rectype` form then edit appropriate one here. If you wish to
use own record types then just add it to that dictinary. By default we
provide the next few record types:
+---------+---------+
| Key | Value |
+=========+=========+
|none |NONE |
+---------+---------+
|info |INFO |
+---------+---------+
|debug |DEBUG |
+---------+---------+
|warning |WARNING |
+---------+---------+
|error |ERROR |
+---------+---------+
|critical |CRITICAL |
+---------+---------+
messages : dict
Messages that are printed with some `Logger` methods like `ok()`,
`success()`, `fail()`. If you wish to modify the text of this messages
just edit the value of appropriate item.
with_errors : int
The flag shows that logger catched errors in the application during its
execution.
count_errors : int
Number of errors that logger catched in the application during its
execution.
filters : dict
Record types filters. To filter record type just set corresponding
item value to False.
root : pypyrus_logbook.output.Root
The output `Root` object.
console : pypyrus_logbook.output.Console
The output `Console` object. Shortcut for `Logger.root.console`.
file : pypyrus_logbook.output.File
The output file. Shortcut for `Logger.output.file`.
email : pypyrus_logbook.output.Email
The output email. Shortcut for `Logger.output.email`.
html: pypyrus_logbook.output.HTML
The output HTML document. Shortcut for `Logger.output.html`.
table: pypyrus_logbook.output.Table
The output table. Shortcut for `Logger.output.table`.
formatter : pypyrus_logbook.formatter.Formatter
Logger formatter which sets all formatting configuration like
record template, error message template, line length etc.
sysinfo : pypyrus_logbook.sysinfo.Sysinfo
Special input object which parse different inputs includeing system
specifications, flag arguments, execution parameters, user parameters
and environment variables and transforms all of that to `Dataset`
object. Through the `Dataset` object data can be easily accessed by
get item operation or by point like `sysinfo.desc['hostname']` or
`sysinfo.desc.hostname`.
header : pypyrus_logbook.header.Header
The header that can be printed to the writable output.
"""
def __init__(self, name=None, app=None, desc=None, version=None,
status=True, console=True, file=True, email=False, html=False,
table=False, directory=None, filename=None, extension=None,
smtp=None, db=None, format=None, info=True, debug=False,
warning=True, error=True, critical=True, alarming=True,
control=True, maxsize=(1024*1024*10), maxdays=1, maxlevel=2,
maxerrors=False):
# Unique name of the logger.
self._name = name
# Attributes describing the application.
self.app = None
self.desc = None
self.version = None
# Some logger important attributes
self._start_date = dt.datetime.now()
self.rectypes = {'none': 'NONE', 'info': 'INFO', 'debug': 'DEBUG',
'warning': 'WARNING', 'error': 'ERROR',
'critical': 'CRITICAL'}
self.messages = {'ok': 'OK', 'success': 'SUCCESS', 'fail': 'FAIL'}
self._with_error = False
self._count_errors = 0
# Complete the initial configuration.
self.configure(app=app, desc=desc, version=version, status=status,
console=console, file=file, email=email, html=html,
table=table, directory=directory, filename=filename,
extension=extension, smtp=smtp, db=db, format=format,
info=info, debug=debug, warning=warning, error=error,
critical=critical, alarming=alarming, control=control,
maxsize=maxsize, maxdays=maxdays, maxlevel=maxlevel,
maxerrors=maxerrors)
# Output shortcuts.
self.console = self.root.console
self.file = self.root.file
self.email = self.root.email
self.html = self.root.html
self.table = self.root.table
# Set exit function.
atexit.register(self._exit)
# Add creating logger to special all_loggers dictinary.
all_loggers[self._name] = self
pass
def __str__(self):
return f'<Logger object "{self._name}">'
__repr__ = __str__
@property
def name(self):
"""Unique logger name."""
return self._name
@property
def start_date(self):
"""Logging start date."""
return self._start_date
@property
def with_error(self):
"""Flag that shows was an error or not."""
return self._with_error
@property
def count_errors(self):
"""The number of occured errors."""
return self._count_errors
def configure(self, app=None, desc=None, version=None, status=None,
console=None, file=None, email=None, html=None, table=None,
directory=None, filename=None, extension=None, smtp=None,
db=None, format=None, info=None, debug=None, warning=None,
error=None, critical=None, alarming=None, control=None,
maxsize=None, maxdays=None, maxlevel=None, maxerrors=None):
"""Main method to configure the logger and all its attributes.
This is an only one right way to customize logger. Parameters are the
same as for creatrion.
Parameters
----------
app : str, optional
The argument is used to set the `app` attribute.
desc : str, optional
The argument is used to set the `desc` attribute.
version : str, optional
The argument is used to set the `version` attribute.
status : bool, optional
The argument is used to open or close output `root`.
console : bool, optional
The argument is used to open or close output `console`.
file : bool, optional
The argument is used to open or close output `file`.
email : bool, optional
The argument is used to open or close output `email`.
html : bool, optional
The argument is used to open or close output `html`.
table : bool, optional
The argument is used to open or close output `table`.
directory : str, optional
The argument is used to set logging file folder.
filename : str, optional
The argument is used to set logging file name.
extension : str, optional
The argument is used to set logging file extension.
smtp : dict, optional
The argument is used to configure SMTP connection.
db : dict, optional
The argument is used to configure DB connection.
format : str, optional
The argument is used to set record template.
info : bool, optional
The argument is used to filter info records.
debug : bool, optional
The argument is used to filter debug records.
warning : bool, optional
The argument is used to filter warning records.
error : bool, optional
The argument is used to filter error records.
critical : bool, optional
The argument is used to filter critical records.
alarming : bool, optional
The argument is used to enable or disable alarming mechanism.
control : bool, optional
The argument is used to enable or disable execution break in case
on error.
maxsize : int or bool, optional
The argument is used to define maximum size of output file.
maxdays : int or bool, optional
The argument is used to define maximum number of days that will be
logged to same file.
maxlevel : int or bool, optional
The argument is used to define the break error level.
maxerrors : int or bool, optional
The argument is used to define maximun number of errors.
"""
if isinstance(app, str) is True: self.app = app
if isinstance(desc, str) is True: self.desc = desc
if isinstance(version, (str, int, float)) is True:
self.version = version
# Build the output root if it is not exists. In other case modify
# existing output if it is requested.
if hasattr(self, 'root') is False:
self.root = Root(self, console=console, file=file, email=email,
html=html, table=table, status=status,
directory=directory, filename=filename,
extension=extension, smtp=smtp, db=db)
else:
for key, value in {'console': console, 'file': file,
'email': email, 'html': html,
'table': table}.items():
if value is True:
getattr(self.root, key).open()
if key == 'file':
getattr(self.root, key).new()
elif value is False:
getattr(self.root, key).close()
# Customize output file path.
path = {}
if directory is not None: path['dir'] = directory
if filename is not None: path['name'] = filename
if extension is not None: path['ext'] = extension
if len(path) > 0:
self.root.file.configure(**path)
# Customize SMTP server.
if isinstance(smtp, dict) is True:
self.root.email.configure(**smtp)
# Customize database connection.
if isinstance(db, dict) is True:
self.root.table.configure(**db)
# Create formatter in case it is not exists yet or just customize it.
# Parameter format can be either string or dictionary.
# When it is string then it must describe records format.
# When it is dictionary it can contaion any parameter of formatter
# that must be customized.
if isinstance(format, str) is True:
format = {'record': format}
if hasattr(self, 'formatter') is False:
format = {} if isinstance(format, dict) is False else format
self.formatter = Formatter(**format)
elif isinstance(format, dict) is True:
self.formatter.configure(**format)
# Create or customize record type filters.
if hasattr(self, 'filters') is False:
self.filters = {}
for key, value in {'info': info, 'debug': debug, 'error': error,
'warning': warning, 'critical': critical}.items():
if isinstance(value, bool) is True:
self.filters[key] = value
# Customize limits and parameters of execution behaviour.
if isinstance(maxsize, (int, float, bool)) is True:
self._maxsize = maxsize
if isinstance(maxdays, (int, float, bool)) is True:
self._maxdays = maxdays
self.__calculate_restart_date()
if isinstance(maxlevel, (int, float, bool)) is True:
self._maxlevel = maxlevel
if isinstance(maxerrors, (int, float, bool)) is True:
self._maxerrors = maxerrors
if isinstance(alarming, bool) is True:
self._alarming = alarming
if isinstance(control, bool) is True:
self._control = control
# Initialize sysinfo instance when not exists.
if hasattr(self, 'sysinfo') is False:
self.sysinfo = Sysinfo(self)
# Initialize header instance when not exists.
if hasattr(self, 'header') is False:
self.header = Header(self)
pass
def write(self, record):
"""Direct write to the output.
Parameters
----------
record : Record
The argument is used to send it to the output `root`.
"""
self.__check_file_stats()
self.root.write(record)
pass
def record(self, rectype, message, error=False, **kwargs):
"""Basic method to write records.
Parameters
----------
rectype : str
By default method creates the record with the type NONE.
That can be changed but depends on available record types.
All registered record types are stored in the instance attribute
rectypes. If you wish to use own record type or change the
presentaion of exeisting one then edit this dictinary.
message : str
The message that must be written.
error : bool, optional
If record is error then set that parameter to `True`.
**kwargs
The keyword arguments used for additional forms (variables) for
record and message formatting.
"""
if self.filters.get(rectype, True) is True:
record = Record(self, rectype, message, error=error, **kwargs)
self.write(record)
pass
def info(self, message, **kwargs):
"""Send INFO record to output."""
rectype = 'info'
self.record(rectype, message, **kwargs)
pass
def debug(self, message, **kwargs):
"""Send DEBUG record to the output."""
rectype = 'debug'
self.record(rectype, message, **kwargs)
pass
def error(self, message=None, rectype='error', format=None, alarming=False,
level=1, **kwargs):
"""Send ERROR record to the output.
If exception in current traceback exists then method will format the
exception according to `formatter.error` string presentation. If
`formatter.error` is set to `False` the exception will be just printed
in original Python style.
Also method will send an alarm if alarming attribute is `True`, email
output is enabled and SMTP server is configurated.
If one of the limit triggers worked then application will be aborted.
Parameters
----------
message : str, optional
The message that must be written instead of exception.
rectype : str, optional
The type of error according to `rectypes` dictionary.
format : str, optional
The format of the error message.
alarming : bool
The argument is used to enable or disable the alarming mechanism
for this certain call.
level : int
The argument is used to describe the error level.
**kwargs
The keyword arguments used for additional forms (variables) for
record and message formatting.
"""
self._with_error = True
self._count_errors += 1
format = self.formatter.error if format is None else format
# Parse the error.
err_type, err_value, err_tb = sys.exc_info()
if message is None and err_type is not None:
if isinstance(format, str) is True:
err_name = err_type.__name__
err_value = err_value
for tb in traceback.walk_tb(err_tb):
f_code = tb[0].f_code
err_file = os.path.abspath(f_code.co_filename)
err_line = tb[1]
err_obj = f_code.co_name
self.record(rectype, message, error=True,
err_name=err_name, err_value=err_value,
err_file=err_file, err_line=err_line,
err_obj=err_obj, **kwargs)
elif format is False:
exception = traceback.format_exception(err_type, err_value,
err_tb)
message = '\n'
message += ''.join(exception)
self.record(rectype, message, **kwargs)
else:
message = message or ''
self.record(rectype, message, **kwargs)
# Break execution in case of critical error if permitted.
# The alarm will be generated at exit if it is configured.
if self._control is True:
if level >= self._maxlevel:
sys.exit()
if self._maxerrors is not False:
if self._count_errors > self._maxerrors:
sys.exit()
# Send alarm if execution was not aborted but alarm is needed.
if alarming is True:
self.root.email.alarm()
pass
def warning(self, message=None, **kwargs):
"""Send WARNING error record to the output."""
self.error(message, rectype='warning', level=0, **kwargs)
pass
def critical(self, message=None, **kwargs):
"""Send CRITICAL error record to the output."""
self.error(message, rectype='critical', level=2, **kwargs)
pass
def head(self):
"""Send header to the output."""
string = self.header.create()
self.write(string)
pass
def subhead(self, string):
"""Send subheader as upper-case text between two border lines to the
output.
Parameters
----------
string : str
The text that will be presented as subheader.
"""
bound = f'{self.formatter.div*self.formatter.length}\n'
string = f'{bound}\t{string}\n{bound}'.upper()
self.write(string)
pass
def line(self, message):
"""Send raw text with the new line to the output.
Parameters
----------
message : str
The message that must be written.
"""
self.write(f'{message}\n')
pass
def bound(self, div=None, length=None):
"""Write horizontal border in the output. Useful when need to separate
different blocks of information.
Parameters
----------
div : str, optional
Symbol that is used to bulid the bound.
length : int, optional
Lenght of the bound.
"""
border = self.formatter.div * self.formatter.length
self.write(border + '\n')
pass
def blank(self, number=1):
"""Write blank lines in the output.
Parameters
----------
number : int, optional
The number of the blank lines that must be written.
"""
string = '\n'*number
self.write(string)
pass
def ok(self, **kwargs):
"""Print INFO message with OK."""
rectype = 'info'
message = self.messages['ok']
self.record(rectype, message, **kwargs)
pass
def success(self, **kwargs):
"""Print INFO message with SUCCESS."""
rectype = 'info'
message = self.messages['success']
self.record(rectype, message, **kwargs)
pass
def fail(self, **kwargs):
"""Print INFO message with FAIL."""
rectype = 'info'
message = self.messages['fail']
self.record(rectype, message, **kwargs)
pass
def restart(self):
"""Restart logging. Will open new file."""
self._start_date = dt.datetime.now()
self.__calculate_restart_date()
if self.root.file.status is True:
self.root.file.new()
if self.header.used is True:
self.head()
pass
def send(self, *args, **kwargs):
"""Send email message. Note that SMTP server connection must be
configured.
"""
self.root.email.send(*args, **kwargs)
pass
def set(self, **kwargs):
"""Update values in table. Note that DB connection must be
configured.
"""
self.root.table.write(**kwargs)
pass
def _exit(self):
# Inform about the error.
if self._alarming is True and self._with_error is True:
self.root.email.alarm()
pass
def __calculate_restart_date(self):
"""Calculate the date when logger must be restarted according to
maxdays parameter.
"""
self.__restart_date = (self._start_date
+ dt.timedelta(days=self._maxdays))
pass
def __check_file_stats(self):
"""Check the output file statistics to catch when current file must be
closed and new one must be opened.
"""
if self.root.file.status is True:
if self._maxsize is not False:
if self.root.file.size is not None:
if self.root.file.size > self._maxsize:
self.restart()
return
if self._maxdays is not False:
if self.__restart_date.day == dt.datetime.now().day:
self.restart()
return
| 39.348554 | 79 | 0.592952 | 3,127 | 25,852 | 4.856092 | 0.1244 | 0.041291 | 0.047942 | 0.062693 | 0.394929 | 0.349687 | 0.314587 | 0.276984 | 0.265789 | 0.259862 | 0 | 0.001947 | 0.324501 | 25,852 | 656 | 80 | 39.408537 | 0.867606 | 0.492999 | 0 | 0.224806 | 0 | 0 | 0.032819 | 0.006209 | 0 | 0 | 0 | 0 | 0 | 1 | 0.108527 | false | 0.085271 | 0.054264 | 0.003876 | 0.197674 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 |
0
| 1 |
6a79e21ee2f5d7ad67e69bd27f9206807683db56
| 488 |
py
|
Python
|
darling_ansible/python_venv/lib/python3.7/site-packages/oci/object_storage/transfer/constants.py
|
revnav/sandbox
|
f9c8422233d093b76821686b6c249417502cf61d
|
[
"Apache-2.0"
] | null | null | null |
darling_ansible/python_venv/lib/python3.7/site-packages/oci/object_storage/transfer/constants.py
|
revnav/sandbox
|
f9c8422233d093b76821686b6c249417502cf61d
|
[
"Apache-2.0"
] | null | null | null |
darling_ansible/python_venv/lib/python3.7/site-packages/oci/object_storage/transfer/constants.py
|
revnav/sandbox
|
f9c8422233d093b76821686b6c249417502cf61d
|
[
"Apache-2.0"
] | 1 |
2020-06-25T03:12:58.000Z
|
2020-06-25T03:12:58.000Z
|
# coding: utf-8
# Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
MEBIBYTE = 1024 * 1024
STREAMING_DEFAULT_PART_SIZE = 10 * MEBIBYTE
DEFAULT_PART_SIZE = 128 * MEBIBYTE
OBJECT_USE_MULTIPART_SIZE = 128 * MEBIBYTE
| 54.222222 | 245 | 0.772541 | 80 | 488 | 4.6125 | 0.7 | 0.01626 | 0.04336 | 0.054201 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.07381 | 0.139344 | 488 | 8 | 246 | 61 | 0.804762 | 0.686475 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
0
| 1 |
6a7f701b1440f625bfec8817f0a39a899231c69f
| 105,704 |
py
|
Python
|
tencentcloud/dbbrain/v20210527/models.py
|
lleiyyang/tencentcloud-sdk-python
|
e6e6a4ce89286673b2322ae92d3c2fbf8665aa0b
|
[
"Apache-2.0"
] | 465 |
2018-04-27T09:54:59.000Z
|
2022-03-29T02:18:01.000Z
|
tencentcloud/dbbrain/v20210527/models.py
|
lleiyyang/tencentcloud-sdk-python
|
e6e6a4ce89286673b2322ae92d3c2fbf8665aa0b
|
[
"Apache-2.0"
] | 91 |
2018-04-27T09:48:11.000Z
|
2022-03-12T08:04:04.000Z
|
tencentcloud/dbbrain/v20210527/models.py
|
lleiyyang/tencentcloud-sdk-python
|
e6e6a4ce89286673b2322ae92d3c2fbf8665aa0b
|
[
"Apache-2.0"
] | 232 |
2018-05-02T08:02:46.000Z
|
2022-03-30T08:02:48.000Z
|
# -*- coding: utf8 -*-
# Copyright (c) 2017-2021 THL A29 Limited, a Tencent company. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from tencentcloud.common.abstract_model import AbstractModel
class AddUserContactRequest(AbstractModel):
"""AddUserContact请求参数结构体
"""
def __init__(self):
r"""
:param Name: 联系人姓名,由中英文、数字、空格、!@#$%^&*()_+-=()组成,不能以下划线开头,长度在20以内。
:type Name: str
:param ContactInfo: 邮箱地址,支持大小写字母、数字、下划线及@字符, 不能以下划线开头,邮箱地址不可重复。
:type ContactInfo: str
:param Product: 服务产品类型,固定值:"mysql"。
:type Product: str
"""
self.Name = None
self.ContactInfo = None
self.Product = None
def _deserialize(self, params):
self.Name = params.get("Name")
self.ContactInfo = params.get("ContactInfo")
self.Product = params.get("Product")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class AddUserContactResponse(AbstractModel):
"""AddUserContact返回参数结构体
"""
def __init__(self):
r"""
:param Id: 添加成功的联系人id。
:type Id: int
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Id = None
self.RequestId = None
def _deserialize(self, params):
self.Id = params.get("Id")
self.RequestId = params.get("RequestId")
class ContactItem(AbstractModel):
"""联系人contact描述。
"""
def __init__(self):
r"""
:param Id: 联系人id。
:type Id: int
:param Name: 联系人姓名。
:type Name: str
:param Mail: 联系人绑定的邮箱。
:type Mail: str
"""
self.Id = None
self.Name = None
self.Mail = None
def _deserialize(self, params):
self.Id = params.get("Id")
self.Name = params.get("Name")
self.Mail = params.get("Mail")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class CreateDBDiagReportTaskRequest(AbstractModel):
"""CreateDBDiagReportTask请求参数结构体
"""
def __init__(self):
r"""
:param InstanceId: 实例ID。
:type InstanceId: str
:param StartTime: 开始时间,如“2020-11-08T14:00:00+08:00”。
:type StartTime: str
:param EndTime: 结束时间,如“2020-11-09T14:00:00+08:00”。
:type EndTime: str
:param SendMailFlag: 是否发送邮件: 0 - 否,1 - 是。
:type SendMailFlag: int
:param ContactPerson: 接收邮件的联系人ID数组。
:type ContactPerson: list of int
:param ContactGroup: 接收邮件的联系组ID数组。
:type ContactGroup: list of int
:param Product: 服务产品类型,支持值包括: "mysql" - 云数据库 MySQL, "cynosdb" - 云数据库 CynosDB for MySQL,默认值为"mysql"。
:type Product: str
"""
self.InstanceId = None
self.StartTime = None
self.EndTime = None
self.SendMailFlag = None
self.ContactPerson = None
self.ContactGroup = None
self.Product = None
def _deserialize(self, params):
self.InstanceId = params.get("InstanceId")
self.StartTime = params.get("StartTime")
self.EndTime = params.get("EndTime")
self.SendMailFlag = params.get("SendMailFlag")
self.ContactPerson = params.get("ContactPerson")
self.ContactGroup = params.get("ContactGroup")
self.Product = params.get("Product")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class CreateDBDiagReportTaskResponse(AbstractModel):
"""CreateDBDiagReportTask返回参数结构体
"""
def __init__(self):
r"""
:param AsyncRequestId: 异步任务的请求 ID,可使用此 ID 查询异步任务的执行结果。
注意:此字段可能返回 null,表示取不到有效值。
:type AsyncRequestId: int
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.AsyncRequestId = None
self.RequestId = None
def _deserialize(self, params):
self.AsyncRequestId = params.get("AsyncRequestId")
self.RequestId = params.get("RequestId")
class CreateDBDiagReportUrlRequest(AbstractModel):
"""CreateDBDiagReportUrl请求参数结构体
"""
def __init__(self):
r"""
:param InstanceId: 实例ID。
:type InstanceId: str
:param AsyncRequestId: 健康报告相应的任务ID,可通过DescribeDBDiagReportTasks查询。
:type AsyncRequestId: int
:param Product: 服务产品类型,支持值:"mysql" - 云数据库 MySQL;"cynosdb" - 云数据库 TDSQL-C for MySQL,默认为"mysql"。
:type Product: str
"""
self.InstanceId = None
self.AsyncRequestId = None
self.Product = None
def _deserialize(self, params):
self.InstanceId = params.get("InstanceId")
self.AsyncRequestId = params.get("AsyncRequestId")
self.Product = params.get("Product")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class CreateDBDiagReportUrlResponse(AbstractModel):
"""CreateDBDiagReportUrl返回参数结构体
"""
def __init__(self):
r"""
:param ReportUrl: 健康报告浏览地址。
:type ReportUrl: str
:param ExpireTime: 健康报告浏览地址到期时间戳(秒)。
:type ExpireTime: int
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.ReportUrl = None
self.ExpireTime = None
self.RequestId = None
def _deserialize(self, params):
self.ReportUrl = params.get("ReportUrl")
self.ExpireTime = params.get("ExpireTime")
self.RequestId = params.get("RequestId")
class CreateMailProfileRequest(AbstractModel):
"""CreateMailProfile请求参数结构体
"""
def __init__(self):
r"""
:param ProfileInfo: 邮件配置内容。
:type ProfileInfo: :class:`tencentcloud.dbbrain.v20210527.models.ProfileInfo`
:param ProfileLevel: 配置级别,支持值包括:"User" - 用户级别,"Instance" - 实例级别,其中数据库巡检邮件配置为用户级别,定期生成邮件配置为实例级别。
:type ProfileLevel: str
:param ProfileName: 配置名称,需要保持唯一性,数据库巡检邮件配置名称自拟;定期生成邮件配置命名格式:"scheduler_" + {instanceId},如"schduler_cdb-test"。
:type ProfileName: str
:param ProfileType: 配置类型,支持值包括:"dbScan_mail_configuration" - 数据库巡检邮件配置,"scheduler_mail_configuration" - 定期生成邮件配置。
:type ProfileType: str
:param Product: 服务产品类型,支持值包括: "mysql" - 云数据库 MySQL, "cynosdb" - 云数据库 CynosDB for MySQL。
:type Product: str
:param BindInstanceIds: 配置绑定的实例ID,当配置级别为"Instance"时需要传入且只能为一个实例;当配置级别为“User”时,此参数不填。
:type BindInstanceIds: list of str
"""
self.ProfileInfo = None
self.ProfileLevel = None
self.ProfileName = None
self.ProfileType = None
self.Product = None
self.BindInstanceIds = None
def _deserialize(self, params):
if params.get("ProfileInfo") is not None:
self.ProfileInfo = ProfileInfo()
self.ProfileInfo._deserialize(params.get("ProfileInfo"))
self.ProfileLevel = params.get("ProfileLevel")
self.ProfileName = params.get("ProfileName")
self.ProfileType = params.get("ProfileType")
self.Product = params.get("Product")
self.BindInstanceIds = params.get("BindInstanceIds")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class CreateMailProfileResponse(AbstractModel):
"""CreateMailProfile返回参数结构体
"""
def __init__(self):
r"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class CreateSchedulerMailProfileRequest(AbstractModel):
"""CreateSchedulerMailProfile请求参数结构体
"""
def __init__(self):
r"""
:param WeekConfiguration: 取值范围1-7,分别代表周一至周日。
:type WeekConfiguration: list of int
:param ProfileInfo: 邮件配置内容。
:type ProfileInfo: :class:`tencentcloud.dbbrain.v20210527.models.ProfileInfo`
:param ProfileName: 配置名称,需要保持唯一性,定期生成邮件配置命名格式:"scheduler_" + {instanceId},如"schduler_cdb-test"。
:type ProfileName: str
:param BindInstanceId: 配置订阅的实例ID。
:type BindInstanceId: str
:param Product: 服务产品类型,支持值包括: "mysql" - 云数据库 MySQL, "cynosdb" - 云数据库 TDSQL-C for MySQL,默认为"mysql"。
:type Product: str
"""
self.WeekConfiguration = None
self.ProfileInfo = None
self.ProfileName = None
self.BindInstanceId = None
self.Product = None
def _deserialize(self, params):
self.WeekConfiguration = params.get("WeekConfiguration")
if params.get("ProfileInfo") is not None:
self.ProfileInfo = ProfileInfo()
self.ProfileInfo._deserialize(params.get("ProfileInfo"))
self.ProfileName = params.get("ProfileName")
self.BindInstanceId = params.get("BindInstanceId")
self.Product = params.get("Product")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class CreateSchedulerMailProfileResponse(AbstractModel):
"""CreateSchedulerMailProfile返回参数结构体
"""
def __init__(self):
r"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class CreateSecurityAuditLogExportTaskRequest(AbstractModel):
"""CreateSecurityAuditLogExportTask请求参数结构体
"""
def __init__(self):
r"""
:param SecAuditGroupId: 安全审计组Id。
:type SecAuditGroupId: str
:param StartTime: 导出日志开始时间,例如2020-12-28 00:00:00。
:type StartTime: str
:param EndTime: 导出日志结束时间,例如2020-12-28 01:00:00。
:type EndTime: str
:param Product: 服务产品类型,支持值:"mysql" - 云数据库 MySQL。
:type Product: str
:param DangerLevels: 日志风险等级列表,支持值包括:0 无风险;1 低风险;2 中风险;3 高风险。
:type DangerLevels: list of int
"""
self.SecAuditGroupId = None
self.StartTime = None
self.EndTime = None
self.Product = None
self.DangerLevels = None
def _deserialize(self, params):
self.SecAuditGroupId = params.get("SecAuditGroupId")
self.StartTime = params.get("StartTime")
self.EndTime = params.get("EndTime")
self.Product = params.get("Product")
self.DangerLevels = params.get("DangerLevels")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class CreateSecurityAuditLogExportTaskResponse(AbstractModel):
"""CreateSecurityAuditLogExportTask返回参数结构体
"""
def __init__(self):
r"""
:param AsyncRequestId: 日志导出任务Id。
:type AsyncRequestId: int
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.AsyncRequestId = None
self.RequestId = None
def _deserialize(self, params):
self.AsyncRequestId = params.get("AsyncRequestId")
self.RequestId = params.get("RequestId")
class DeleteSecurityAuditLogExportTasksRequest(AbstractModel):
"""DeleteSecurityAuditLogExportTasks请求参数结构体
"""
def __init__(self):
r"""
:param SecAuditGroupId: 安全审计组Id。
:type SecAuditGroupId: str
:param AsyncRequestIds: 日志导出任务Id列表,接口会忽略不存在或已删除的任务Id。
:type AsyncRequestIds: list of int non-negative
:param Product: 服务产品类型,支持值: "mysql" - 云数据库 MySQL。
:type Product: str
"""
self.SecAuditGroupId = None
self.AsyncRequestIds = None
self.Product = None
def _deserialize(self, params):
self.SecAuditGroupId = params.get("SecAuditGroupId")
self.AsyncRequestIds = params.get("AsyncRequestIds")
self.Product = params.get("Product")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DeleteSecurityAuditLogExportTasksResponse(AbstractModel):
"""DeleteSecurityAuditLogExportTasks返回参数结构体
"""
def __init__(self):
r"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class DescribeAllUserContactRequest(AbstractModel):
"""DescribeAllUserContact请求参数结构体
"""
def __init__(self):
r"""
:param Product: 服务产品类型,固定值:mysql。
:type Product: str
:param Names: 联系人名数组,支持模糊搜索。
:type Names: list of str
"""
self.Product = None
self.Names = None
def _deserialize(self, params):
self.Product = params.get("Product")
self.Names = params.get("Names")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeAllUserContactResponse(AbstractModel):
"""DescribeAllUserContact返回参数结构体
"""
def __init__(self):
r"""
:param TotalCount: 联系人的总数量。
:type TotalCount: int
:param Contacts: 联系人的信息。
注意:此字段可能返回 null,表示取不到有效值。
:type Contacts: list of ContactItem
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.TotalCount = None
self.Contacts = None
self.RequestId = None
def _deserialize(self, params):
self.TotalCount = params.get("TotalCount")
if params.get("Contacts") is not None:
self.Contacts = []
for item in params.get("Contacts"):
obj = ContactItem()
obj._deserialize(item)
self.Contacts.append(obj)
self.RequestId = params.get("RequestId")
class DescribeAllUserGroupRequest(AbstractModel):
"""DescribeAllUserGroup请求参数结构体
"""
def __init__(self):
r"""
:param Product: 服务产品类型,固定值:mysql。
:type Product: str
:param Names: 联系组名称数组,支持模糊搜索。
:type Names: list of str
"""
self.Product = None
self.Names = None
def _deserialize(self, params):
self.Product = params.get("Product")
self.Names = params.get("Names")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeAllUserGroupResponse(AbstractModel):
"""DescribeAllUserGroup返回参数结构体
"""
def __init__(self):
r"""
:param TotalCount: 组总数。
:type TotalCount: int
:param Groups: 组信息。
注意:此字段可能返回 null,表示取不到有效值。
:type Groups: list of GroupItem
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.TotalCount = None
self.Groups = None
self.RequestId = None
def _deserialize(self, params):
self.TotalCount = params.get("TotalCount")
if params.get("Groups") is not None:
self.Groups = []
for item in params.get("Groups"):
obj = GroupItem()
obj._deserialize(item)
self.Groups.append(obj)
self.RequestId = params.get("RequestId")
class DescribeDBDiagEventRequest(AbstractModel):
"""DescribeDBDiagEvent请求参数结构体
"""
def __init__(self):
r"""
:param InstanceId: 实例 ID 。
:type InstanceId: str
:param EventId: 事件 ID 。通过“获取实例诊断历史DescribeDBDiagHistory”获取。
:type EventId: int
:param Product: 服务产品类型,支持值包括: "mysql" - 云数据库 MySQL, "cynosdb" - 云数据库 CynosDB for MySQL,默认为"mysql"。
:type Product: str
"""
self.InstanceId = None
self.EventId = None
self.Product = None
def _deserialize(self, params):
self.InstanceId = params.get("InstanceId")
self.EventId = params.get("EventId")
self.Product = params.get("Product")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeDBDiagEventResponse(AbstractModel):
"""DescribeDBDiagEvent返回参数结构体
"""
def __init__(self):
r"""
:param DiagItem: 诊断项。
:type DiagItem: str
:param DiagType: 诊断类型。
:type DiagType: str
:param EventId: 事件 ID 。
:type EventId: int
:param Explanation: 诊断事件详情,若无附加解释信息则输出为空。
:type Explanation: str
:param Outline: 诊断概要。
:type Outline: str
:param Problem: 诊断出的问题。
:type Problem: str
:param Severity: 严重程度。严重程度分为5级,按影响程度从高至低分别为:1:致命,2:严重,3:告警,4:提示,5:健康。
:type Severity: int
:param StartTime: 开始时间
:type StartTime: str
:param Suggestions: 诊断建议,若无建议则输出为空。
:type Suggestions: str
:param Metric: 保留字段。
注意:此字段可能返回 null,表示取不到有效值。
:type Metric: str
:param EndTime: 结束时间。
:type EndTime: str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.DiagItem = None
self.DiagType = None
self.EventId = None
self.Explanation = None
self.Outline = None
self.Problem = None
self.Severity = None
self.StartTime = None
self.Suggestions = None
self.Metric = None
self.EndTime = None
self.RequestId = None
def _deserialize(self, params):
self.DiagItem = params.get("DiagItem")
self.DiagType = params.get("DiagType")
self.EventId = params.get("EventId")
self.Explanation = params.get("Explanation")
self.Outline = params.get("Outline")
self.Problem = params.get("Problem")
self.Severity = params.get("Severity")
self.StartTime = params.get("StartTime")
self.Suggestions = params.get("Suggestions")
self.Metric = params.get("Metric")
self.EndTime = params.get("EndTime")
self.RequestId = params.get("RequestId")
class DescribeDBDiagHistoryRequest(AbstractModel):
"""DescribeDBDiagHistory请求参数结构体
"""
def __init__(self):
r"""
:param InstanceId: 实例 ID 。
:type InstanceId: str
:param StartTime: 开始时间,如“2019-09-10 12:13:14”。
:type StartTime: str
:param EndTime: 结束时间,如“2019-09-11 12:13:14”,结束时间与开始时间的间隔最大可为2天。
:type EndTime: str
:param Product: 服务产品类型,支持值包括: "mysql" - 云数据库 MySQL, "cynosdb" - 云数据库 CynosDB for MySQL,默认为"mysql"。
:type Product: str
"""
self.InstanceId = None
self.StartTime = None
self.EndTime = None
self.Product = None
def _deserialize(self, params):
self.InstanceId = params.get("InstanceId")
self.StartTime = params.get("StartTime")
self.EndTime = params.get("EndTime")
self.Product = params.get("Product")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeDBDiagHistoryResponse(AbstractModel):
"""DescribeDBDiagHistory返回参数结构体
"""
def __init__(self):
r"""
:param Events: 事件描述。
:type Events: list of DiagHistoryEventItem
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Events = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Events") is not None:
self.Events = []
for item in params.get("Events"):
obj = DiagHistoryEventItem()
obj._deserialize(item)
self.Events.append(obj)
self.RequestId = params.get("RequestId")
class DescribeDBDiagReportTasksRequest(AbstractModel):
"""DescribeDBDiagReportTasks请求参数结构体
"""
def __init__(self):
r"""
:param StartTime: 第一个任务的开始时间,用于范围查询,时间格式如:2019-09-10 12:13:14。
:type StartTime: str
:param EndTime: 最后一个任务的开始时间,用于范围查询,时间格式如:2019-09-10 12:13:14。
:type EndTime: str
:param InstanceIds: 实例ID数组,用于筛选指定实例的任务列表。
:type InstanceIds: list of str
:param Sources: 任务的触发来源,支持的取值包括:"DAILY_INSPECTION" - 实例巡检;"SCHEDULED" - 定时生成;"MANUAL" - 手动触发。
:type Sources: list of str
:param HealthLevels: 报告的健康等级,支持的取值包括:"HEALTH" - 健康;"SUB_HEALTH" - 亚健康;"RISK" - 危险;"HIGH_RISK" - 高危。
:type HealthLevels: str
:param TaskStatuses: 任务的状态,支持的取值包括:"created" - 新建;"chosen" - 待执行; "running" - 执行中;"failed" - 失败;"finished" - 已完成。
:type TaskStatuses: str
:param Offset: 偏移量,默认0。
:type Offset: int
:param Limit: 返回数量,默认20,最大值为100。
:type Limit: int
:param Product: 服务产品类型,支持值:"mysql" - 云数据库 MySQL;"cynosdb" - 云数据库 TDSQL-C for MySQL,默认为"mysql"。
:type Product: str
"""
self.StartTime = None
self.EndTime = None
self.InstanceIds = None
self.Sources = None
self.HealthLevels = None
self.TaskStatuses = None
self.Offset = None
self.Limit = None
self.Product = None
def _deserialize(self, params):
self.StartTime = params.get("StartTime")
self.EndTime = params.get("EndTime")
self.InstanceIds = params.get("InstanceIds")
self.Sources = params.get("Sources")
self.HealthLevels = params.get("HealthLevels")
self.TaskStatuses = params.get("TaskStatuses")
self.Offset = params.get("Offset")
self.Limit = params.get("Limit")
self.Product = params.get("Product")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeDBDiagReportTasksResponse(AbstractModel):
"""DescribeDBDiagReportTasks返回参数结构体
"""
def __init__(self):
r"""
:param TotalCount: 任务总数目。
:type TotalCount: int
:param Tasks: 任务列表。
:type Tasks: list of HealthReportTask
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.TotalCount = None
self.Tasks = None
self.RequestId = None
def _deserialize(self, params):
self.TotalCount = params.get("TotalCount")
if params.get("Tasks") is not None:
self.Tasks = []
for item in params.get("Tasks"):
obj = HealthReportTask()
obj._deserialize(item)
self.Tasks.append(obj)
self.RequestId = params.get("RequestId")
class DescribeDBSpaceStatusRequest(AbstractModel):
"""DescribeDBSpaceStatus请求参数结构体
"""
def __init__(self):
r"""
:param InstanceId: 实例 ID 。
:type InstanceId: str
:param RangeDays: 时间段天数,截止日期为当日,默认为7天。
:type RangeDays: int
:param Product: 服务产品类型,支持值包括: "mysql" - 云数据库 MySQL, "cynosdb" - 云数据库 CynosDB for MySQL,默认为"mysql"。
:type Product: str
"""
self.InstanceId = None
self.RangeDays = None
self.Product = None
def _deserialize(self, params):
self.InstanceId = params.get("InstanceId")
self.RangeDays = params.get("RangeDays")
self.Product = params.get("Product")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeDBSpaceStatusResponse(AbstractModel):
"""DescribeDBSpaceStatus返回参数结构体
"""
def __init__(self):
r"""
:param Growth: 磁盘增长量(MB)。
:type Growth: int
:param Remain: 磁盘剩余(MB)。
:type Remain: int
:param Total: 磁盘总量(MB)。
:type Total: int
:param AvailableDays: 预计可用天数。
:type AvailableDays: int
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Growth = None
self.Remain = None
self.Total = None
self.AvailableDays = None
self.RequestId = None
def _deserialize(self, params):
self.Growth = params.get("Growth")
self.Remain = params.get("Remain")
self.Total = params.get("Total")
self.AvailableDays = params.get("AvailableDays")
self.RequestId = params.get("RequestId")
class DescribeDiagDBInstancesRequest(AbstractModel):
"""DescribeDiagDBInstances请求参数结构体
"""
def __init__(self):
r"""
:param IsSupported: 是否是DBbrain支持的实例,固定传 true。
:type IsSupported: bool
:param Product: 服务产品类型,支持值包括: "mysql" - 云数据库 MySQL, "cynosdb" - 云数据库 TDSQL-C for MySQL,默认为"mysql"。
:type Product: str
:param Offset: 分页参数,偏移量。
:type Offset: int
:param Limit: 分页参数,分页值,最大值为100。
:type Limit: int
:param InstanceNames: 根据实例名称条件查询。
:type InstanceNames: list of str
:param InstanceIds: 根据实例ID条件查询。
:type InstanceIds: list of str
:param Regions: 根据地域条件查询。
:type Regions: list of str
"""
self.IsSupported = None
self.Product = None
self.Offset = None
self.Limit = None
self.InstanceNames = None
self.InstanceIds = None
self.Regions = None
def _deserialize(self, params):
self.IsSupported = params.get("IsSupported")
self.Product = params.get("Product")
self.Offset = params.get("Offset")
self.Limit = params.get("Limit")
self.InstanceNames = params.get("InstanceNames")
self.InstanceIds = params.get("InstanceIds")
self.Regions = params.get("Regions")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeDiagDBInstancesResponse(AbstractModel):
"""DescribeDiagDBInstances返回参数结构体
"""
def __init__(self):
r"""
:param TotalCount: 实例总数。
:type TotalCount: int
:param DbScanStatus: 全实例巡检状态:0:开启全实例巡检;1:未开启全实例巡检。
:type DbScanStatus: int
:param Items: 实例相关信息。
:type Items: list of InstanceInfo
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.TotalCount = None
self.DbScanStatus = None
self.Items = None
self.RequestId = None
def _deserialize(self, params):
self.TotalCount = params.get("TotalCount")
self.DbScanStatus = params.get("DbScanStatus")
if params.get("Items") is not None:
self.Items = []
for item in params.get("Items"):
obj = InstanceInfo()
obj._deserialize(item)
self.Items.append(obj)
self.RequestId = params.get("RequestId")
class DescribeHealthScoreRequest(AbstractModel):
"""DescribeHealthScore请求参数结构体
"""
def __init__(self):
r"""
:param InstanceId: 需要获取健康得分的实例ID。
:type InstanceId: str
:param Time: 获取健康得分的时间,时间格式如:2019-09-10 12:13:14。
:type Time: str
:param Product: 服务产品类型,支持值包括: "mysql" - 云数据库 MySQL, "cynosdb" - 云数据库 TDSQL-C for MySQL,默认为"mysql"。
:type Product: str
"""
self.InstanceId = None
self.Time = None
self.Product = None
def _deserialize(self, params):
self.InstanceId = params.get("InstanceId")
self.Time = params.get("Time")
self.Product = params.get("Product")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeHealthScoreResponse(AbstractModel):
"""DescribeHealthScore返回参数结构体
"""
def __init__(self):
r"""
:param Data: 健康得分以及异常扣分项。
:type Data: :class:`tencentcloud.dbbrain.v20210527.models.HealthScoreInfo`
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Data = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Data") is not None:
self.Data = HealthScoreInfo()
self.Data._deserialize(params.get("Data"))
self.RequestId = params.get("RequestId")
class DescribeMailProfileRequest(AbstractModel):
"""DescribeMailProfile请求参数结构体
"""
def __init__(self):
r"""
:param ProfileType: 配置类型,支持值包括:"dbScan_mail_configuration" - 数据库巡检邮件配置,"scheduler_mail_configuration" - 定期生成邮件配置。
:type ProfileType: str
:param Product: 服务产品类型,支持值包括: "mysql" - 云数据库 MySQL, "cynosdb" - 云数据库 TDSQL-C for MySQL,默认为"mysql"。
:type Product: str
:param Offset: 分页偏移量。
:type Offset: int
:param Limit: 分页单位,最大支持50。
:type Limit: int
:param ProfileName: 根据邮件配置名称查询,定期发送的邮件配置名称遵循:"scheduler_"+{instanceId}的规则。
:type ProfileName: str
"""
self.ProfileType = None
self.Product = None
self.Offset = None
self.Limit = None
self.ProfileName = None
def _deserialize(self, params):
self.ProfileType = params.get("ProfileType")
self.Product = params.get("Product")
self.Offset = params.get("Offset")
self.Limit = params.get("Limit")
self.ProfileName = params.get("ProfileName")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeMailProfileResponse(AbstractModel):
"""DescribeMailProfile返回参数结构体
"""
def __init__(self):
r"""
:param ProfileList: 邮件配置详情。
注意:此字段可能返回 null,表示取不到有效值。
:type ProfileList: list of UserProfile
:param TotalCount: 邮件模版总数。
注意:此字段可能返回 null,表示取不到有效值。
:type TotalCount: int
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.ProfileList = None
self.TotalCount = None
self.RequestId = None
def _deserialize(self, params):
if params.get("ProfileList") is not None:
self.ProfileList = []
for item in params.get("ProfileList"):
obj = UserProfile()
obj._deserialize(item)
self.ProfileList.append(obj)
self.TotalCount = params.get("TotalCount")
self.RequestId = params.get("RequestId")
class DescribeMySqlProcessListRequest(AbstractModel):
"""DescribeMySqlProcessList请求参数结构体
"""
def __init__(self):
r"""
:param InstanceId: 实例ID。
:type InstanceId: str
:param ID: 线程的ID,用于筛选线程列表。
:type ID: int
:param User: 线程的操作账号名,用于筛选线程列表。
:type User: str
:param Host: 线程的操作主机地址,用于筛选线程列表。
:type Host: str
:param DB: 线程的操作数据库,用于筛选线程列表。
:type DB: str
:param State: 线程的操作状态,用于筛选线程列表。
:type State: str
:param Command: 线程的执行类型,用于筛选线程列表。
:type Command: str
:param Time: 线程的操作时长最小值,单位秒,用于筛选操作时长大于该值的线程列表。
:type Time: int
:param Info: 线程的操作语句,用于筛选线程列表。
:type Info: str
:param Limit: 返回数量,默认20。
:type Limit: int
:param Product: 服务产品类型,支持值:"mysql" - 云数据库 MySQL;"cynosdb" - 云数据库 TDSQL-C for MySQL,默认为"mysql"。
:type Product: str
"""
self.InstanceId = None
self.ID = None
self.User = None
self.Host = None
self.DB = None
self.State = None
self.Command = None
self.Time = None
self.Info = None
self.Limit = None
self.Product = None
def _deserialize(self, params):
self.InstanceId = params.get("InstanceId")
self.ID = params.get("ID")
self.User = params.get("User")
self.Host = params.get("Host")
self.DB = params.get("DB")
self.State = params.get("State")
self.Command = params.get("Command")
self.Time = params.get("Time")
self.Info = params.get("Info")
self.Limit = params.get("Limit")
self.Product = params.get("Product")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeMySqlProcessListResponse(AbstractModel):
"""DescribeMySqlProcessList返回参数结构体
"""
def __init__(self):
r"""
:param ProcessList: 实时线程列表。
:type ProcessList: list of MySqlProcess
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.ProcessList = None
self.RequestId = None
def _deserialize(self, params):
if params.get("ProcessList") is not None:
self.ProcessList = []
for item in params.get("ProcessList"):
obj = MySqlProcess()
obj._deserialize(item)
self.ProcessList.append(obj)
self.RequestId = params.get("RequestId")
class DescribeSecurityAuditLogDownloadUrlsRequest(AbstractModel):
"""DescribeSecurityAuditLogDownloadUrls请求参数结构体
"""
def __init__(self):
r"""
:param SecAuditGroupId: 安全审计组Id。
:type SecAuditGroupId: str
:param AsyncRequestId: 异步任务Id。
:type AsyncRequestId: int
:param Product: 服务产品类型,支持值:"mysql" - 云数据库 MySQL。
:type Product: str
"""
self.SecAuditGroupId = None
self.AsyncRequestId = None
self.Product = None
def _deserialize(self, params):
self.SecAuditGroupId = params.get("SecAuditGroupId")
self.AsyncRequestId = params.get("AsyncRequestId")
self.Product = params.get("Product")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeSecurityAuditLogDownloadUrlsResponse(AbstractModel):
"""DescribeSecurityAuditLogDownloadUrls返回参数结构体
"""
def __init__(self):
r"""
:param Urls: 导出结果的COS链接列表。当结果集很大时,可能会切分为多个url下载。
:type Urls: list of str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Urls = None
self.RequestId = None
def _deserialize(self, params):
self.Urls = params.get("Urls")
self.RequestId = params.get("RequestId")
class DescribeSecurityAuditLogExportTasksRequest(AbstractModel):
"""DescribeSecurityAuditLogExportTasks请求参数结构体
"""
def __init__(self):
r"""
:param SecAuditGroupId: 安全审计组Id。
:type SecAuditGroupId: str
:param Product: 服务产品类型,支持值:"mysql" - 云数据库 MySQL。
:type Product: str
:param AsyncRequestIds: 日志导出任务Id列表。
:type AsyncRequestIds: list of int non-negative
:param Offset: 偏移量,默认0。
:type Offset: int
:param Limit: 返回数量,默认20,最大值为100。
:type Limit: int
"""
self.SecAuditGroupId = None
self.Product = None
self.AsyncRequestIds = None
self.Offset = None
self.Limit = None
def _deserialize(self, params):
self.SecAuditGroupId = params.get("SecAuditGroupId")
self.Product = params.get("Product")
self.AsyncRequestIds = params.get("AsyncRequestIds")
self.Offset = params.get("Offset")
self.Limit = params.get("Limit")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeSecurityAuditLogExportTasksResponse(AbstractModel):
"""DescribeSecurityAuditLogExportTasks返回参数结构体
"""
def __init__(self):
r"""
:param Tasks: 安全审计日志导出任务列表。
:type Tasks: list of SecLogExportTaskInfo
:param TotalCount: 安全审计日志导出任务总数。
:type TotalCount: int
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Tasks = None
self.TotalCount = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Tasks") is not None:
self.Tasks = []
for item in params.get("Tasks"):
obj = SecLogExportTaskInfo()
obj._deserialize(item)
self.Tasks.append(obj)
self.TotalCount = params.get("TotalCount")
self.RequestId = params.get("RequestId")
class DescribeSlowLogTimeSeriesStatsRequest(AbstractModel):
"""DescribeSlowLogTimeSeriesStats请求参数结构体
"""
def __init__(self):
r"""
:param InstanceId: 实例 ID 。
:type InstanceId: str
:param StartTime: 开始时间,如“2019-09-10 12:13:14”。
:type StartTime: str
:param EndTime: 结束时间,如“2019-09-10 12:13:14”,结束时间与开始时间的间隔最大可为7天。
:type EndTime: str
:param Product: 服务产品类型,支持值包括: "mysql" - 云数据库 MySQL, "cynosdb" - 云数据库 CynosDB for MySQL,默认为"mysql"。
:type Product: str
"""
self.InstanceId = None
self.StartTime = None
self.EndTime = None
self.Product = None
def _deserialize(self, params):
self.InstanceId = params.get("InstanceId")
self.StartTime = params.get("StartTime")
self.EndTime = params.get("EndTime")
self.Product = params.get("Product")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeSlowLogTimeSeriesStatsResponse(AbstractModel):
"""DescribeSlowLogTimeSeriesStats返回参数结构体
"""
def __init__(self):
r"""
:param Period: 柱间单位时间间隔,单位为秒。
:type Period: int
:param TimeSeries: 单位时间间隔内慢日志数量统计。
:type TimeSeries: list of TimeSlice
:param SeriesData: 单位时间间隔内的实例 cpu 利用率监控数据。
:type SeriesData: :class:`tencentcloud.dbbrain.v20210527.models.MonitorMetricSeriesData`
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Period = None
self.TimeSeries = None
self.SeriesData = None
self.RequestId = None
def _deserialize(self, params):
self.Period = params.get("Period")
if params.get("TimeSeries") is not None:
self.TimeSeries = []
for item in params.get("TimeSeries"):
obj = TimeSlice()
obj._deserialize(item)
self.TimeSeries.append(obj)
if params.get("SeriesData") is not None:
self.SeriesData = MonitorMetricSeriesData()
self.SeriesData._deserialize(params.get("SeriesData"))
self.RequestId = params.get("RequestId")
class DescribeSlowLogTopSqlsRequest(AbstractModel):
"""DescribeSlowLogTopSqls请求参数结构体
"""
def __init__(self):
r"""
:param InstanceId: 实例 ID 。
:type InstanceId: str
:param StartTime: 开始时间,如“2019-09-10 12:13:14”。
:type StartTime: str
:param EndTime: 截止时间,如“2019-09-11 10:13:14”,截止时间与开始时间的间隔小于7天。
:type EndTime: str
:param SortBy: 排序键,目前支持 QueryTime,ExecTimes,RowsSent,LockTime以及RowsExamined 等排序键,默认为QueryTime。
:type SortBy: str
:param OrderBy: 排序方式,支持ASC(升序)以及DESC(降序),默认为DESC。
:type OrderBy: str
:param Limit: 返回数量,默认为20,最大值为100。
:type Limit: int
:param Offset: 偏移量,默认为0。
:type Offset: int
:param SchemaList: 数据库名称数组。
:type SchemaList: list of SchemaItem
:param Product: 服务产品类型,支持值包括: "mysql" - 云数据库 MySQL, "cynosdb" - 云数据库 CynosDB for MySQL,默认为"mysql"。
:type Product: str
"""
self.InstanceId = None
self.StartTime = None
self.EndTime = None
self.SortBy = None
self.OrderBy = None
self.Limit = None
self.Offset = None
self.SchemaList = None
self.Product = None
def _deserialize(self, params):
self.InstanceId = params.get("InstanceId")
self.StartTime = params.get("StartTime")
self.EndTime = params.get("EndTime")
self.SortBy = params.get("SortBy")
self.OrderBy = params.get("OrderBy")
self.Limit = params.get("Limit")
self.Offset = params.get("Offset")
if params.get("SchemaList") is not None:
self.SchemaList = []
for item in params.get("SchemaList"):
obj = SchemaItem()
obj._deserialize(item)
self.SchemaList.append(obj)
self.Product = params.get("Product")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeSlowLogTopSqlsResponse(AbstractModel):
"""DescribeSlowLogTopSqls返回参数结构体
"""
def __init__(self):
r"""
:param TotalCount: 符合条件的记录总数。
:type TotalCount: int
:param Rows: 慢日志 top sql 列表
:type Rows: list of SlowLogTopSqlItem
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.TotalCount = None
self.Rows = None
self.RequestId = None
def _deserialize(self, params):
self.TotalCount = params.get("TotalCount")
if params.get("Rows") is not None:
self.Rows = []
for item in params.get("Rows"):
obj = SlowLogTopSqlItem()
obj._deserialize(item)
self.Rows.append(obj)
self.RequestId = params.get("RequestId")
class DescribeSlowLogUserHostStatsRequest(AbstractModel):
"""DescribeSlowLogUserHostStats请求参数结构体
"""
def __init__(self):
r"""
:param InstanceId: 实例ID。
:type InstanceId: str
:param StartTime: 查询范围的开始时间,时间格式如:2019-09-10 12:13:14。
:type StartTime: str
:param EndTime: 查询范围的结束时间,时间格式如:2019-09-10 12:13:14。
:type EndTime: str
:param Product: 服务产品类型,支持值:"mysql" - 云数据库 MySQL;"cynosdb" - 云数据库 TDSQL-C for MySQL,默认为"mysql"。
:type Product: str
:param Md5: SOL模板的MD5值
:type Md5: str
"""
self.InstanceId = None
self.StartTime = None
self.EndTime = None
self.Product = None
self.Md5 = None
def _deserialize(self, params):
self.InstanceId = params.get("InstanceId")
self.StartTime = params.get("StartTime")
self.EndTime = params.get("EndTime")
self.Product = params.get("Product")
self.Md5 = params.get("Md5")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeSlowLogUserHostStatsResponse(AbstractModel):
"""DescribeSlowLogUserHostStats返回参数结构体
"""
def __init__(self):
r"""
:param TotalCount: 来源地址数目。
:type TotalCount: int
:param Items: 各来源地址的慢日志占比详情列表。
:type Items: list of SlowLogHost
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.TotalCount = None
self.Items = None
self.RequestId = None
def _deserialize(self, params):
self.TotalCount = params.get("TotalCount")
if params.get("Items") is not None:
self.Items = []
for item in params.get("Items"):
obj = SlowLogHost()
obj._deserialize(item)
self.Items.append(obj)
self.RequestId = params.get("RequestId")
class DescribeTopSpaceSchemaTimeSeriesRequest(AbstractModel):
"""DescribeTopSpaceSchemaTimeSeries请求参数结构体
"""
def __init__(self):
r"""
:param InstanceId: 实例ID。
:type InstanceId: str
:param Limit: 返回的Top库数量,最大值为100,默认为20。
:type Limit: int
:param SortBy: 筛选Top库所用的排序字段,可选字段包含DataLength、IndexLength、TotalLength、DataFree、FragRatio、TableRows、PhysicalFileSize(仅云数据库 MySQL实例支持),云数据库 MySQL实例默认为 PhysicalFileSize,其他产品实例默认为TotalLength。
:type SortBy: str
:param StartDate: 开始日期,如“2021-01-01”,最早为当日的前第29天,默认为截止日期的前第6天。
:type StartDate: str
:param EndDate: 截止日期,如“2021-01-01”,最早为当日的前第29天,默认为当日。
:type EndDate: str
:param Product: 服务产品类型,支持值包括: "mysql" - 云数据库 MySQL, "cynosdb" - 云数据库 CynosDB for MySQL,默认为"mysql"。
:type Product: str
"""
self.InstanceId = None
self.Limit = None
self.SortBy = None
self.StartDate = None
self.EndDate = None
self.Product = None
def _deserialize(self, params):
self.InstanceId = params.get("InstanceId")
self.Limit = params.get("Limit")
self.SortBy = params.get("SortBy")
self.StartDate = params.get("StartDate")
self.EndDate = params.get("EndDate")
self.Product = params.get("Product")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeTopSpaceSchemaTimeSeriesResponse(AbstractModel):
"""DescribeTopSpaceSchemaTimeSeries返回参数结构体
"""
def __init__(self):
r"""
:param TopSpaceSchemaTimeSeries: 返回的Top库空间统计信息的时序数据列表。
:type TopSpaceSchemaTimeSeries: list of SchemaSpaceTimeSeries
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.TopSpaceSchemaTimeSeries = None
self.RequestId = None
def _deserialize(self, params):
if params.get("TopSpaceSchemaTimeSeries") is not None:
self.TopSpaceSchemaTimeSeries = []
for item in params.get("TopSpaceSchemaTimeSeries"):
obj = SchemaSpaceTimeSeries()
obj._deserialize(item)
self.TopSpaceSchemaTimeSeries.append(obj)
self.RequestId = params.get("RequestId")
class DescribeTopSpaceSchemasRequest(AbstractModel):
"""DescribeTopSpaceSchemas请求参数结构体
"""
def __init__(self):
r"""
:param InstanceId: 实例 ID 。
:type InstanceId: str
:param Limit: 返回的Top库数量,最大值为100,默认为20。
:type Limit: int
:param SortBy: 筛选Top库所用的排序字段,可选字段包含DataLength、IndexLength、TotalLength、DataFree、FragRatio、TableRows、PhysicalFileSize(仅云数据库 MySQL实例支持),云数据库 MySQL实例默认为 PhysicalFileSize,其他产品实例默认为TotalLength。
:type SortBy: str
:param Product: 服务产品类型,支持值包括: "mysql" - 云数据库 MySQL, "cynosdb" - 云数据库 CynosDB for MySQL,默认为"mysql"。
:type Product: str
"""
self.InstanceId = None
self.Limit = None
self.SortBy = None
self.Product = None
def _deserialize(self, params):
self.InstanceId = params.get("InstanceId")
self.Limit = params.get("Limit")
self.SortBy = params.get("SortBy")
self.Product = params.get("Product")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeTopSpaceSchemasResponse(AbstractModel):
"""DescribeTopSpaceSchemas返回参数结构体
"""
def __init__(self):
r"""
:param TopSpaceSchemas: 返回的Top库空间统计信息列表。
:type TopSpaceSchemas: list of SchemaSpaceData
:param Timestamp: 采集库空间数据的时间戳(秒)。
:type Timestamp: int
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.TopSpaceSchemas = None
self.Timestamp = None
self.RequestId = None
def _deserialize(self, params):
if params.get("TopSpaceSchemas") is not None:
self.TopSpaceSchemas = []
for item in params.get("TopSpaceSchemas"):
obj = SchemaSpaceData()
obj._deserialize(item)
self.TopSpaceSchemas.append(obj)
self.Timestamp = params.get("Timestamp")
self.RequestId = params.get("RequestId")
class DescribeTopSpaceTableTimeSeriesRequest(AbstractModel):
"""DescribeTopSpaceTableTimeSeries请求参数结构体
"""
def __init__(self):
r"""
:param InstanceId: 实例 ID 。
:type InstanceId: str
:param Limit: 返回的Top表数量,最大值为100,默认为20。
:type Limit: int
:param SortBy: 筛选Top表所用的排序字段,可选字段包含DataLength、IndexLength、TotalLength、DataFree、FragRatio、TableRows、PhysicalFileSize,默认为 PhysicalFileSize。
:type SortBy: str
:param StartDate: 开始日期,如“2021-01-01”,最早为当日的前第29天,默认为截止日期的前第6天。
:type StartDate: str
:param EndDate: 截止日期,如“2021-01-01”,最早为当日的前第29天,默认为当日。
:type EndDate: str
:param Product: 服务产品类型,支持值包括: "mysql" - 云数据库 MySQL, "cynosdb" - 云数据库 CynosDB for MySQL,默认为"mysql"。
:type Product: str
"""
self.InstanceId = None
self.Limit = None
self.SortBy = None
self.StartDate = None
self.EndDate = None
self.Product = None
def _deserialize(self, params):
self.InstanceId = params.get("InstanceId")
self.Limit = params.get("Limit")
self.SortBy = params.get("SortBy")
self.StartDate = params.get("StartDate")
self.EndDate = params.get("EndDate")
self.Product = params.get("Product")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeTopSpaceTableTimeSeriesResponse(AbstractModel):
"""DescribeTopSpaceTableTimeSeries返回参数结构体
"""
def __init__(self):
r"""
:param TopSpaceTableTimeSeries: 返回的Top表空间统计信息的时序数据列表。
:type TopSpaceTableTimeSeries: list of TableSpaceTimeSeries
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.TopSpaceTableTimeSeries = None
self.RequestId = None
def _deserialize(self, params):
if params.get("TopSpaceTableTimeSeries") is not None:
self.TopSpaceTableTimeSeries = []
for item in params.get("TopSpaceTableTimeSeries"):
obj = TableSpaceTimeSeries()
obj._deserialize(item)
self.TopSpaceTableTimeSeries.append(obj)
self.RequestId = params.get("RequestId")
class DescribeTopSpaceTablesRequest(AbstractModel):
"""DescribeTopSpaceTables请求参数结构体
"""
def __init__(self):
r"""
:param InstanceId: 实例 ID 。
:type InstanceId: str
:param Limit: 返回的Top表数量,最大值为100,默认为20。
:type Limit: int
:param SortBy: 筛选Top表所用的排序字段,可选字段包含DataLength、IndexLength、TotalLength、DataFree、FragRatio、TableRows、PhysicalFileSize(仅云数据库 MySQL实例支持),云数据库 MySQL实例默认为 PhysicalFileSize,其他产品实例默认为TotalLength。
:type SortBy: str
:param Product: 服务产品类型,支持值包括: "mysql" - 云数据库 MySQL, "cynosdb" - 云数据库 CynosDB for MySQL,默认为"mysql"。
:type Product: str
"""
self.InstanceId = None
self.Limit = None
self.SortBy = None
self.Product = None
def _deserialize(self, params):
self.InstanceId = params.get("InstanceId")
self.Limit = params.get("Limit")
self.SortBy = params.get("SortBy")
self.Product = params.get("Product")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeTopSpaceTablesResponse(AbstractModel):
"""DescribeTopSpaceTables返回参数结构体
"""
def __init__(self):
r"""
:param TopSpaceTables: 返回的Top表空间统计信息列表。
:type TopSpaceTables: list of TableSpaceData
:param Timestamp: 采集表空间数据的时间戳(秒)。
:type Timestamp: int
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.TopSpaceTables = None
self.Timestamp = None
self.RequestId = None
def _deserialize(self, params):
if params.get("TopSpaceTables") is not None:
self.TopSpaceTables = []
for item in params.get("TopSpaceTables"):
obj = TableSpaceData()
obj._deserialize(item)
self.TopSpaceTables.append(obj)
self.Timestamp = params.get("Timestamp")
self.RequestId = params.get("RequestId")
class DescribeUserSqlAdviceRequest(AbstractModel):
"""DescribeUserSqlAdvice请求参数结构体
"""
def __init__(self):
r"""
:param InstanceId: 实例ID。
:type InstanceId: str
:param SqlText: SQL语句。
:type SqlText: str
:param Schema: 库名。
:type Schema: str
"""
self.InstanceId = None
self.SqlText = None
self.Schema = None
def _deserialize(self, params):
self.InstanceId = params.get("InstanceId")
self.SqlText = params.get("SqlText")
self.Schema = params.get("Schema")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeUserSqlAdviceResponse(AbstractModel):
"""DescribeUserSqlAdvice返回参数结构体
"""
def __init__(self):
r"""
:param Advices: SQL优化建议,可解析为JSON数组,无需优化时输出为空。
:type Advices: str
:param Comments: SQL优化建议备注,可解析为String数组,无需优化时输出为空。
:type Comments: str
:param SqlText: SQL语句。
:type SqlText: str
:param Schema: 库名。
:type Schema: str
:param Tables: 相关表的DDL信息,可解析为JSON数组。
:type Tables: str
:param SqlPlan: SQL执行计划,可解析为JSON,无需优化时输出为空。
:type SqlPlan: str
:param Cost: SQL优化后的成本节约详情,可解析为JSON,无需优化时输出为空。
:type Cost: str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Advices = None
self.Comments = None
self.SqlText = None
self.Schema = None
self.Tables = None
self.SqlPlan = None
self.Cost = None
self.RequestId = None
def _deserialize(self, params):
self.Advices = params.get("Advices")
self.Comments = params.get("Comments")
self.SqlText = params.get("SqlText")
self.Schema = params.get("Schema")
self.Tables = params.get("Tables")
self.SqlPlan = params.get("SqlPlan")
self.Cost = params.get("Cost")
self.RequestId = params.get("RequestId")
class DiagHistoryEventItem(AbstractModel):
"""实例诊断历史事件
"""
def __init__(self):
r"""
:param DiagType: 诊断类型。
:type DiagType: str
:param EndTime: 结束时间。
:type EndTime: str
:param StartTime: 开始时间。
:type StartTime: str
:param EventId: 事件唯一ID 。
:type EventId: int
:param Severity: 严重程度。严重程度分为5级,按影响程度从高至低分别为:1:致命,2:严重,3:告警,4:提示,5:健康。
:type Severity: int
:param Outline: 诊断概要。
:type Outline: str
:param DiagItem: 诊断项说明。
:type DiagItem: str
:param InstanceId: 实例 ID 。
:type InstanceId: str
:param Metric: 保留字段。
注意:此字段可能返回 null,表示取不到有效值。
:type Metric: str
:param Region: 地域。
:type Region: str
"""
self.DiagType = None
self.EndTime = None
self.StartTime = None
self.EventId = None
self.Severity = None
self.Outline = None
self.DiagItem = None
self.InstanceId = None
self.Metric = None
self.Region = None
def _deserialize(self, params):
self.DiagType = params.get("DiagType")
self.EndTime = params.get("EndTime")
self.StartTime = params.get("StartTime")
self.EventId = params.get("EventId")
self.Severity = params.get("Severity")
self.Outline = params.get("Outline")
self.DiagItem = params.get("DiagItem")
self.InstanceId = params.get("InstanceId")
self.Metric = params.get("Metric")
self.Region = params.get("Region")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class EventInfo(AbstractModel):
"""异常事件信息。
"""
def __init__(self):
r"""
:param EventId: 事件 ID 。
:type EventId: int
:param DiagType: 诊断类型。
:type DiagType: str
:param StartTime: 开始时间。
:type StartTime: str
:param EndTime: 结束时间。
:type EndTime: str
:param Outline: 概要。
:type Outline: str
:param Severity: 严重程度。严重程度分为5级,按影响程度从高至低分别为:1:致命,2:严重,3:告警,4:提示,5:健康。
:type Severity: int
:param ScoreLost: 扣分。
:type ScoreLost: int
:param Metric: 保留字段。
:type Metric: str
:param Count: 告警数目。
:type Count: int
"""
self.EventId = None
self.DiagType = None
self.StartTime = None
self.EndTime = None
self.Outline = None
self.Severity = None
self.ScoreLost = None
self.Metric = None
self.Count = None
def _deserialize(self, params):
self.EventId = params.get("EventId")
self.DiagType = params.get("DiagType")
self.StartTime = params.get("StartTime")
self.EndTime = params.get("EndTime")
self.Outline = params.get("Outline")
self.Severity = params.get("Severity")
self.ScoreLost = params.get("ScoreLost")
self.Metric = params.get("Metric")
self.Count = params.get("Count")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class GroupItem(AbstractModel):
"""描述组信息。
"""
def __init__(self):
r"""
:param Id: 组id。
:type Id: int
:param Name: 组名称。
:type Name: str
:param MemberCount: 组成员数量。
:type MemberCount: int
"""
self.Id = None
self.Name = None
self.MemberCount = None
def _deserialize(self, params):
self.Id = params.get("Id")
self.Name = params.get("Name")
self.MemberCount = params.get("MemberCount")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class HealthReportTask(AbstractModel):
"""健康报告任务详情。
"""
def __init__(self):
r"""
:param AsyncRequestId: 异步任务请求 ID。
:type AsyncRequestId: int
:param Source: 任务的触发来源,支持的取值包括:"DAILY_INSPECTION" - 实例巡检;"SCHEDULED" - 定时生成;"MANUAL" - 手动触发。
:type Source: str
:param Progress: 任务完成进度,单位%。
:type Progress: int
:param CreateTime: 任务创建时间。
:type CreateTime: str
:param StartTime: 任务开始执行时间。
:type StartTime: str
:param EndTime: 任务完成执行时间。
:type EndTime: str
:param InstanceInfo: 任务所属实例的基础信息。
:type InstanceInfo: :class:`tencentcloud.dbbrain.v20210527.models.InstanceBasicInfo`
:param HealthStatus: 健康报告中的健康信息。
:type HealthStatus: :class:`tencentcloud.dbbrain.v20210527.models.HealthStatus`
"""
self.AsyncRequestId = None
self.Source = None
self.Progress = None
self.CreateTime = None
self.StartTime = None
self.EndTime = None
self.InstanceInfo = None
self.HealthStatus = None
def _deserialize(self, params):
self.AsyncRequestId = params.get("AsyncRequestId")
self.Source = params.get("Source")
self.Progress = params.get("Progress")
self.CreateTime = params.get("CreateTime")
self.StartTime = params.get("StartTime")
self.EndTime = params.get("EndTime")
if params.get("InstanceInfo") is not None:
self.InstanceInfo = InstanceBasicInfo()
self.InstanceInfo._deserialize(params.get("InstanceInfo"))
if params.get("HealthStatus") is not None:
self.HealthStatus = HealthStatus()
self.HealthStatus._deserialize(params.get("HealthStatus"))
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class HealthScoreInfo(AbstractModel):
"""获取健康得分返回的详情。
"""
def __init__(self):
r"""
:param IssueTypes: 异常详情。
:type IssueTypes: list of IssueTypeInfo
:param EventsTotalCount: 异常事件总数。
:type EventsTotalCount: int
:param HealthScore: 健康得分。
:type HealthScore: int
:param HealthLevel: 健康等级, 如:"HEALTH", "SUB_HEALTH", "RISK", "HIGH_RISK"。
:type HealthLevel: str
"""
self.IssueTypes = None
self.EventsTotalCount = None
self.HealthScore = None
self.HealthLevel = None
def _deserialize(self, params):
if params.get("IssueTypes") is not None:
self.IssueTypes = []
for item in params.get("IssueTypes"):
obj = IssueTypeInfo()
obj._deserialize(item)
self.IssueTypes.append(obj)
self.EventsTotalCount = params.get("EventsTotalCount")
self.HealthScore = params.get("HealthScore")
self.HealthLevel = params.get("HealthLevel")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class HealthStatus(AbstractModel):
"""实例健康详情。
"""
def __init__(self):
r"""
:param HealthScore: 健康分数,满分100。
:type HealthScore: int
:param HealthLevel: 健康等级,取值包括:"HEALTH" - 健康;"SUB_HEALTH" - 亚健康;"RISK"- 危险;"HIGH_RISK" - 高危。
:type HealthLevel: str
:param ScoreLost: 总扣分分数。
:type ScoreLost: int
:param ScoreDetails: 扣分详情。
注意:此字段可能返回 null,表示取不到有效值。
:type ScoreDetails: list of ScoreDetail
"""
self.HealthScore = None
self.HealthLevel = None
self.ScoreLost = None
self.ScoreDetails = None
def _deserialize(self, params):
self.HealthScore = params.get("HealthScore")
self.HealthLevel = params.get("HealthLevel")
self.ScoreLost = params.get("ScoreLost")
if params.get("ScoreDetails") is not None:
self.ScoreDetails = []
for item in params.get("ScoreDetails"):
obj = ScoreDetail()
obj._deserialize(item)
self.ScoreDetails.append(obj)
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class InstanceBasicInfo(AbstractModel):
"""实例基础信息。
"""
def __init__(self):
r"""
:param InstanceId: 实例ID。
:type InstanceId: str
:param InstanceName: 实例名称。
:type InstanceName: str
:param Vip: 实例内网IP。
:type Vip: str
:param Vport: 实例内网Port。
:type Vport: int
:param Product: 实例产品。
:type Product: str
:param EngineVersion: 实例引擎版本。
:type EngineVersion: str
"""
self.InstanceId = None
self.InstanceName = None
self.Vip = None
self.Vport = None
self.Product = None
self.EngineVersion = None
def _deserialize(self, params):
self.InstanceId = params.get("InstanceId")
self.InstanceName = params.get("InstanceName")
self.Vip = params.get("Vip")
self.Vport = params.get("Vport")
self.Product = params.get("Product")
self.EngineVersion = params.get("EngineVersion")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class InstanceConfs(AbstractModel):
"""实例配置。
"""
def __init__(self):
r"""
:param DailyInspection: 数据库巡检开关, Yes/No。
:type DailyInspection: str
:param OverviewDisplay: 实例概览开关,Yes/No。
:type OverviewDisplay: str
"""
self.DailyInspection = None
self.OverviewDisplay = None
def _deserialize(self, params):
self.DailyInspection = params.get("DailyInspection")
self.OverviewDisplay = params.get("OverviewDisplay")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class InstanceInfo(AbstractModel):
"""查询实例列表,返回实例的相关信息的对象。
"""
def __init__(self):
r"""
:param InstanceId: 实例ID。
:type InstanceId: str
:param InstanceName: 实例名称。
:type InstanceName: str
:param Region: 实例所属地域。
:type Region: str
:param HealthScore: 健康得分。
:type HealthScore: int
:param Product: 所属产品。
:type Product: str
:param EventCount: 异常事件数量。
:type EventCount: int
:param InstanceType: 实例类型:1:MASTER;2:DR,3:RO,4:SDR。
:type InstanceType: int
:param Cpu: 核心数。
:type Cpu: int
:param Memory: 内存,单位MB。
:type Memory: int
:param Volume: 硬盘存储,单位GB。
:type Volume: int
:param EngineVersion: 数据库版本。
:type EngineVersion: str
:param Vip: 内网地址。
:type Vip: str
:param Vport: 内网端口。
:type Vport: int
:param Source: 接入来源。
:type Source: str
:param GroupId: 分组ID。
:type GroupId: str
:param GroupName: 分组组名。
:type GroupName: str
:param Status: 实例状态:0:发货中;1:运行正常;4:销毁中;5:隔离中。
:type Status: int
:param UniqSubnetId: 子网统一ID。
:type UniqSubnetId: str
:param DeployMode: cdb类型。
:type DeployMode: str
:param InitFlag: cdb实例初始化标志:0:未初始化;1:已初始化。
:type InitFlag: int
:param TaskStatus: 任务状态。
:type TaskStatus: int
:param UniqVpcId: 私有网络统一ID。
:type UniqVpcId: str
:param InstanceConf: 实例巡检/概览的状态。
:type InstanceConf: :class:`tencentcloud.dbbrain.v20210527.models.InstanceConfs`
:param DeadlineTime: 资源到期时间。
:type DeadlineTime: str
:param IsSupported: 是否是DBbrain支持的实例。
:type IsSupported: bool
:param SecAuditStatus: 实例安全审计日志开启状态:ON: 安全审计开启;OFF: 未开启安全审计。
:type SecAuditStatus: str
:param AuditPolicyStatus: 实例审计日志开启状态,ALL_AUDIT: 开启全审计;RULE_AUDIT: 开启规则审计;UNBOUND: 未开启审计。
:type AuditPolicyStatus: str
:param AuditRunningStatus: 实例审计日志运行状态:normal: 运行中; paused: 欠费暂停。
:type AuditRunningStatus: str
"""
self.InstanceId = None
self.InstanceName = None
self.Region = None
self.HealthScore = None
self.Product = None
self.EventCount = None
self.InstanceType = None
self.Cpu = None
self.Memory = None
self.Volume = None
self.EngineVersion = None
self.Vip = None
self.Vport = None
self.Source = None
self.GroupId = None
self.GroupName = None
self.Status = None
self.UniqSubnetId = None
self.DeployMode = None
self.InitFlag = None
self.TaskStatus = None
self.UniqVpcId = None
self.InstanceConf = None
self.DeadlineTime = None
self.IsSupported = None
self.SecAuditStatus = None
self.AuditPolicyStatus = None
self.AuditRunningStatus = None
def _deserialize(self, params):
self.InstanceId = params.get("InstanceId")
self.InstanceName = params.get("InstanceName")
self.Region = params.get("Region")
self.HealthScore = params.get("HealthScore")
self.Product = params.get("Product")
self.EventCount = params.get("EventCount")
self.InstanceType = params.get("InstanceType")
self.Cpu = params.get("Cpu")
self.Memory = params.get("Memory")
self.Volume = params.get("Volume")
self.EngineVersion = params.get("EngineVersion")
self.Vip = params.get("Vip")
self.Vport = params.get("Vport")
self.Source = params.get("Source")
self.GroupId = params.get("GroupId")
self.GroupName = params.get("GroupName")
self.Status = params.get("Status")
self.UniqSubnetId = params.get("UniqSubnetId")
self.DeployMode = params.get("DeployMode")
self.InitFlag = params.get("InitFlag")
self.TaskStatus = params.get("TaskStatus")
self.UniqVpcId = params.get("UniqVpcId")
if params.get("InstanceConf") is not None:
self.InstanceConf = InstanceConfs()
self.InstanceConf._deserialize(params.get("InstanceConf"))
self.DeadlineTime = params.get("DeadlineTime")
self.IsSupported = params.get("IsSupported")
self.SecAuditStatus = params.get("SecAuditStatus")
self.AuditPolicyStatus = params.get("AuditPolicyStatus")
self.AuditRunningStatus = params.get("AuditRunningStatus")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class IssueTypeInfo(AbstractModel):
"""指标信息。
"""
def __init__(self):
r"""
:param IssueType: 指标分类:AVAILABILITY:可用性,MAINTAINABILITY:可维护性,PERFORMANCE,性能,RELIABILITY可靠性。
:type IssueType: str
:param Events: 异常事件。
:type Events: list of EventInfo
:param TotalCount: 异常事件总数。
:type TotalCount: int
"""
self.IssueType = None
self.Events = None
self.TotalCount = None
def _deserialize(self, params):
self.IssueType = params.get("IssueType")
if params.get("Events") is not None:
self.Events = []
for item in params.get("Events"):
obj = EventInfo()
obj._deserialize(item)
self.Events.append(obj)
self.TotalCount = params.get("TotalCount")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class KillMySqlThreadsRequest(AbstractModel):
"""KillMySqlThreads请求参数结构体
"""
def __init__(self):
r"""
:param InstanceId: 实例ID。
:type InstanceId: str
:param Stage: kill会话任务的阶段,取值包括:"Prepare"-准备阶段,"Commit"-提交阶段。
:type Stage: str
:param Threads: 需要kill的sql会话ID列表,此参数用于Prepare阶段。
:type Threads: list of int
:param SqlExecId: 执行ID,此参数用于Commit阶段。
:type SqlExecId: str
:param Product: 服务产品类型,支持值包括: "mysql" - 云数据库 MySQL, "cynosdb" - 云数据库 CynosDB for MySQL,默认为"mysql"。
:type Product: str
"""
self.InstanceId = None
self.Stage = None
self.Threads = None
self.SqlExecId = None
self.Product = None
def _deserialize(self, params):
self.InstanceId = params.get("InstanceId")
self.Stage = params.get("Stage")
self.Threads = params.get("Threads")
self.SqlExecId = params.get("SqlExecId")
self.Product = params.get("Product")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class KillMySqlThreadsResponse(AbstractModel):
"""KillMySqlThreads返回参数结构体
"""
def __init__(self):
r"""
:param Threads: kill完成的sql会话ID列表。
:type Threads: list of int
:param SqlExecId: 执行ID, Prepare阶段的任务输出,用于Commit阶段中指定执行kill操作的会话ID。
注意:此字段可能返回 null,表示取不到有效值。
:type SqlExecId: str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Threads = None
self.SqlExecId = None
self.RequestId = None
def _deserialize(self, params):
self.Threads = params.get("Threads")
self.SqlExecId = params.get("SqlExecId")
self.RequestId = params.get("RequestId")
class MailConfiguration(AbstractModel):
"""邮件发送配置
"""
def __init__(self):
r"""
:param SendMail: 是否开启邮件发送: 0, 否; 1, 是。
:type SendMail: int
:param Region: 地域配置, 如["ap-guangzhou", "ap-shanghai"]。巡检的邮件发送模版,配置需要发送巡检邮件的地域;订阅的邮件发送模版,配置当前订阅实例的所属地域。
:type Region: list of str
:param HealthStatus: 发送指定的健康等级的报告, 如["HEALTH", "SUB_HEALTH", "RISK", "HIGH_RISK"]。
:type HealthStatus: list of str
:param ContactPerson: 联系人id, 联系人/联系组不能都为空。
:type ContactPerson: list of int
:param ContactGroup: 联系组id, 联系人/联系组不能都为空。
:type ContactGroup: list of int
"""
self.SendMail = None
self.Region = None
self.HealthStatus = None
self.ContactPerson = None
self.ContactGroup = None
def _deserialize(self, params):
self.SendMail = params.get("SendMail")
self.Region = params.get("Region")
self.HealthStatus = params.get("HealthStatus")
self.ContactPerson = params.get("ContactPerson")
self.ContactGroup = params.get("ContactGroup")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class ModifyDiagDBInstanceConfRequest(AbstractModel):
"""ModifyDiagDBInstanceConf请求参数结构体
"""
def __init__(self):
r"""
:param InstanceConfs: 实例配置,包括巡检、概览开关等。
:type InstanceConfs: :class:`tencentcloud.dbbrain.v20210527.models.InstanceConfs`
:param Regions: 生效实例地域,取值为"All",代表全地域。
:type Regions: str
:param Product: 服务产品类型,支持值包括: "mysql" - 云数据库 MySQL, "cynosdb" - 云数据库 CynosDB for MySQL。
:type Product: str
:param InstanceIds: 指定更改巡检状态的实例ID。
:type InstanceIds: list of str
"""
self.InstanceConfs = None
self.Regions = None
self.Product = None
self.InstanceIds = None
def _deserialize(self, params):
if params.get("InstanceConfs") is not None:
self.InstanceConfs = InstanceConfs()
self.InstanceConfs._deserialize(params.get("InstanceConfs"))
self.Regions = params.get("Regions")
self.Product = params.get("Product")
self.InstanceIds = params.get("InstanceIds")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class ModifyDiagDBInstanceConfResponse(AbstractModel):
"""ModifyDiagDBInstanceConf返回参数结构体
"""
def __init__(self):
r"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class MonitorFloatMetric(AbstractModel):
"""监控数据(浮点型)
"""
def __init__(self):
r"""
:param Metric: 指标名称。
:type Metric: str
:param Unit: 指标单位。
:type Unit: str
:param Values: 指标值。
注意:此字段可能返回 null,表示取不到有效值。
:type Values: list of float
"""
self.Metric = None
self.Unit = None
self.Values = None
def _deserialize(self, params):
self.Metric = params.get("Metric")
self.Unit = params.get("Unit")
self.Values = params.get("Values")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class MonitorFloatMetricSeriesData(AbstractModel):
"""单位时间间隔内的监控指标数据(浮点型)
"""
def __init__(self):
r"""
:param Series: 监控指标。
:type Series: list of MonitorFloatMetric
:param Timestamp: 监控指标对应的时间戳。
:type Timestamp: list of int
"""
self.Series = None
self.Timestamp = None
def _deserialize(self, params):
if params.get("Series") is not None:
self.Series = []
for item in params.get("Series"):
obj = MonitorFloatMetric()
obj._deserialize(item)
self.Series.append(obj)
self.Timestamp = params.get("Timestamp")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class MonitorMetric(AbstractModel):
"""监控数据
"""
def __init__(self):
r"""
:param Metric: 指标名称。
:type Metric: str
:param Unit: 指标单位。
:type Unit: str
:param Values: 指标值。
注意:此字段可能返回 null,表示取不到有效值。
:type Values: list of float
"""
self.Metric = None
self.Unit = None
self.Values = None
def _deserialize(self, params):
self.Metric = params.get("Metric")
self.Unit = params.get("Unit")
self.Values = params.get("Values")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class MonitorMetricSeriesData(AbstractModel):
"""单位时间间隔内的监控指标数据
"""
def __init__(self):
r"""
:param Series: 监控指标。
:type Series: list of MonitorMetric
:param Timestamp: 监控指标对应的时间戳。
:type Timestamp: list of int
"""
self.Series = None
self.Timestamp = None
def _deserialize(self, params):
if params.get("Series") is not None:
self.Series = []
for item in params.get("Series"):
obj = MonitorMetric()
obj._deserialize(item)
self.Series.append(obj)
self.Timestamp = params.get("Timestamp")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class MySqlProcess(AbstractModel):
"""关系型数据库线程
"""
def __init__(self):
r"""
:param ID: 线程ID。
:type ID: str
:param User: 线程的操作账号名。
:type User: str
:param Host: 线程的操作主机地址。
:type Host: str
:param DB: 线程的操作数据库。
:type DB: str
:param State: 线程的操作状态。
:type State: str
:param Command: 线程的执行类型。
:type Command: str
:param Time: 线程的操作时长,单位秒。
:type Time: str
:param Info: 线程的操作语句。
:type Info: str
"""
self.ID = None
self.User = None
self.Host = None
self.DB = None
self.State = None
self.Command = None
self.Time = None
self.Info = None
def _deserialize(self, params):
self.ID = params.get("ID")
self.User = params.get("User")
self.Host = params.get("Host")
self.DB = params.get("DB")
self.State = params.get("State")
self.Command = params.get("Command")
self.Time = params.get("Time")
self.Info = params.get("Info")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class ProfileInfo(AbstractModel):
"""用户配置的信息
"""
def __init__(self):
r"""
:param Language: 语言, 如"zh"。
:type Language: str
:param MailConfiguration: 邮件模板的内容。
:type MailConfiguration: :class:`tencentcloud.dbbrain.v20210527.models.MailConfiguration`
"""
self.Language = None
self.MailConfiguration = None
def _deserialize(self, params):
self.Language = params.get("Language")
if params.get("MailConfiguration") is not None:
self.MailConfiguration = MailConfiguration()
self.MailConfiguration._deserialize(params.get("MailConfiguration"))
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class SchemaItem(AbstractModel):
"""SchemaItem数组
"""
def __init__(self):
r"""
:param Schema: 数据库名称
:type Schema: str
"""
self.Schema = None
def _deserialize(self, params):
self.Schema = params.get("Schema")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class SchemaSpaceData(AbstractModel):
"""库空间统计数据。
"""
def __init__(self):
r"""
:param TableSchema: 库名。
:type TableSchema: str
:param DataLength: 数据空间(MB)。
:type DataLength: float
:param IndexLength: 索引空间(MB)。
:type IndexLength: float
:param DataFree: 碎片空间(MB)。
:type DataFree: float
:param TotalLength: 总使用空间(MB)。
:type TotalLength: float
:param FragRatio: 碎片率(%)。
:type FragRatio: float
:param TableRows: 行数。
:type TableRows: int
:param PhysicalFileSize: 库中所有表对应的独立物理文件大小加和(MB)。
注意:此字段可能返回 null,表示取不到有效值。
:type PhysicalFileSize: float
"""
self.TableSchema = None
self.DataLength = None
self.IndexLength = None
self.DataFree = None
self.TotalLength = None
self.FragRatio = None
self.TableRows = None
self.PhysicalFileSize = None
def _deserialize(self, params):
self.TableSchema = params.get("TableSchema")
self.DataLength = params.get("DataLength")
self.IndexLength = params.get("IndexLength")
self.DataFree = params.get("DataFree")
self.TotalLength = params.get("TotalLength")
self.FragRatio = params.get("FragRatio")
self.TableRows = params.get("TableRows")
self.PhysicalFileSize = params.get("PhysicalFileSize")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class SchemaSpaceTimeSeries(AbstractModel):
"""库空间时序数据
"""
def __init__(self):
r"""
:param TableSchema: 库名
:type TableSchema: str
:param SeriesData: 单位时间间隔内的空间指标数据。
:type SeriesData: :class:`tencentcloud.dbbrain.v20210527.models.MonitorMetricSeriesData`
"""
self.TableSchema = None
self.SeriesData = None
def _deserialize(self, params):
self.TableSchema = params.get("TableSchema")
if params.get("SeriesData") is not None:
self.SeriesData = MonitorMetricSeriesData()
self.SeriesData._deserialize(params.get("SeriesData"))
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class ScoreDetail(AbstractModel):
"""扣分详情。
"""
def __init__(self):
r"""
:param IssueType: 扣分项分类,取值包括:可用性、可维护性、性能及可靠性。
:type IssueType: str
:param ScoreLost: 扣分总分。
:type ScoreLost: int
:param ScoreLostMax: 扣分总分上限。
:type ScoreLostMax: int
:param Items: 扣分项列表。
注意:此字段可能返回 null,表示取不到有效值。
:type Items: list of ScoreItem
"""
self.IssueType = None
self.ScoreLost = None
self.ScoreLostMax = None
self.Items = None
def _deserialize(self, params):
self.IssueType = params.get("IssueType")
self.ScoreLost = params.get("ScoreLost")
self.ScoreLostMax = params.get("ScoreLostMax")
if params.get("Items") is not None:
self.Items = []
for item in params.get("Items"):
obj = ScoreItem()
obj._deserialize(item)
self.Items.append(obj)
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class ScoreItem(AbstractModel):
"""诊断扣分项。
"""
def __init__(self):
r"""
:param DiagItem: 异常诊断项名称。
:type DiagItem: str
:param IssueType: 诊断项分类,取值包括:可用性、可维护性、性能及可靠性。
:type IssueType: str
:param TopSeverity: 健康等级,取值包括:信息、提示、告警、严重、致命。
:type TopSeverity: str
:param Count: 该异常诊断项出现次数。
:type Count: int
:param ScoreLost: 扣分分数。
:type ScoreLost: int
"""
self.DiagItem = None
self.IssueType = None
self.TopSeverity = None
self.Count = None
self.ScoreLost = None
def _deserialize(self, params):
self.DiagItem = params.get("DiagItem")
self.IssueType = params.get("IssueType")
self.TopSeverity = params.get("TopSeverity")
self.Count = params.get("Count")
self.ScoreLost = params.get("ScoreLost")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class SecLogExportTaskInfo(AbstractModel):
"""安全审计日志导出任务信息
"""
def __init__(self):
r"""
:param AsyncRequestId: 异步任务Id。
:type AsyncRequestId: int
:param StartTime: 任务开始时间。
注意:此字段可能返回 null,表示取不到有效值。
:type StartTime: str
:param EndTime: 任务结束时间。
注意:此字段可能返回 null,表示取不到有效值。
:type EndTime: str
:param CreateTime: 任务创建时间。
:type CreateTime: str
:param Status: 任务状态。
:type Status: str
:param Progress: 任务执行进度。
:type Progress: int
:param LogStartTime: 导出日志开始时间。
注意:此字段可能返回 null,表示取不到有效值。
:type LogStartTime: str
:param LogEndTime: 导出日志结束时间。
注意:此字段可能返回 null,表示取不到有效值。
:type LogEndTime: str
:param TotalSize: 日志文件总大小,单位KB。
注意:此字段可能返回 null,表示取不到有效值。
:type TotalSize: int
:param DangerLevels: 风险等级列表。0 无风险;1 低风险;2 中风险;3 高风险。
注意:此字段可能返回 null,表示取不到有效值。
:type DangerLevels: list of int non-negative
"""
self.AsyncRequestId = None
self.StartTime = None
self.EndTime = None
self.CreateTime = None
self.Status = None
self.Progress = None
self.LogStartTime = None
self.LogEndTime = None
self.TotalSize = None
self.DangerLevels = None
def _deserialize(self, params):
self.AsyncRequestId = params.get("AsyncRequestId")
self.StartTime = params.get("StartTime")
self.EndTime = params.get("EndTime")
self.CreateTime = params.get("CreateTime")
self.Status = params.get("Status")
self.Progress = params.get("Progress")
self.LogStartTime = params.get("LogStartTime")
self.LogEndTime = params.get("LogEndTime")
self.TotalSize = params.get("TotalSize")
self.DangerLevels = params.get("DangerLevels")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class SlowLogHost(AbstractModel):
"""慢日志来源地址详情。
"""
def __init__(self):
r"""
:param UserHost: 来源地址。
:type UserHost: str
:param Ratio: 该来源地址的慢日志数目占总数目的比例,单位%。
:type Ratio: float
:param Count: 该来源地址的慢日志数目。
:type Count: int
"""
self.UserHost = None
self.Ratio = None
self.Count = None
def _deserialize(self, params):
self.UserHost = params.get("UserHost")
self.Ratio = params.get("Ratio")
self.Count = params.get("Count")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class SlowLogTopSqlItem(AbstractModel):
"""慢日志TopSql
"""
def __init__(self):
r"""
:param LockTime: sql总锁等待时间,单位秒
:type LockTime: float
:param LockTimeMax: 最大锁等待时间,单位秒
:type LockTimeMax: float
:param LockTimeMin: 最小锁等待时间,单位秒
:type LockTimeMin: float
:param RowsExamined: 总扫描行数
:type RowsExamined: int
:param RowsExaminedMax: 最大扫描行数
:type RowsExaminedMax: int
:param RowsExaminedMin: 最小扫描行数
:type RowsExaminedMin: int
:param QueryTime: 总耗时,单位秒
:type QueryTime: float
:param QueryTimeMax: 最大执行时间,单位秒
:type QueryTimeMax: float
:param QueryTimeMin: 最小执行时间,单位秒
:type QueryTimeMin: float
:param RowsSent: 总返回行数
:type RowsSent: int
:param RowsSentMax: 最大返回行数
:type RowsSentMax: int
:param RowsSentMin: 最小返回行数
:type RowsSentMin: int
:param ExecTimes: 执行次数
:type ExecTimes: int
:param SqlTemplate: sql模板
:type SqlTemplate: str
:param SqlText: 带参数SQL(随机)
:type SqlText: str
:param Schema: 数据库名
:type Schema: str
:param QueryTimeRatio: 总耗时占比,单位%
:type QueryTimeRatio: float
:param LockTimeRatio: sql总锁等待时间占比,单位%
:type LockTimeRatio: float
:param RowsExaminedRatio: 总扫描行数占比,单位%
:type RowsExaminedRatio: float
:param RowsSentRatio: 总返回行数占比,单位%
:type RowsSentRatio: float
:param QueryTimeAvg: 平均执行时间,单位秒
:type QueryTimeAvg: float
:param RowsSentAvg: 平均返回行数
:type RowsSentAvg: float
:param LockTimeAvg: 平均锁等待时间,单位秒
:type LockTimeAvg: float
:param RowsExaminedAvg: 平均扫描行数
:type RowsExaminedAvg: float
:param Md5: SOL模板的MD5值
:type Md5: str
"""
self.LockTime = None
self.LockTimeMax = None
self.LockTimeMin = None
self.RowsExamined = None
self.RowsExaminedMax = None
self.RowsExaminedMin = None
self.QueryTime = None
self.QueryTimeMax = None
self.QueryTimeMin = None
self.RowsSent = None
self.RowsSentMax = None
self.RowsSentMin = None
self.ExecTimes = None
self.SqlTemplate = None
self.SqlText = None
self.Schema = None
self.QueryTimeRatio = None
self.LockTimeRatio = None
self.RowsExaminedRatio = None
self.RowsSentRatio = None
self.QueryTimeAvg = None
self.RowsSentAvg = None
self.LockTimeAvg = None
self.RowsExaminedAvg = None
self.Md5 = None
def _deserialize(self, params):
self.LockTime = params.get("LockTime")
self.LockTimeMax = params.get("LockTimeMax")
self.LockTimeMin = params.get("LockTimeMin")
self.RowsExamined = params.get("RowsExamined")
self.RowsExaminedMax = params.get("RowsExaminedMax")
self.RowsExaminedMin = params.get("RowsExaminedMin")
self.QueryTime = params.get("QueryTime")
self.QueryTimeMax = params.get("QueryTimeMax")
self.QueryTimeMin = params.get("QueryTimeMin")
self.RowsSent = params.get("RowsSent")
self.RowsSentMax = params.get("RowsSentMax")
self.RowsSentMin = params.get("RowsSentMin")
self.ExecTimes = params.get("ExecTimes")
self.SqlTemplate = params.get("SqlTemplate")
self.SqlText = params.get("SqlText")
self.Schema = params.get("Schema")
self.QueryTimeRatio = params.get("QueryTimeRatio")
self.LockTimeRatio = params.get("LockTimeRatio")
self.RowsExaminedRatio = params.get("RowsExaminedRatio")
self.RowsSentRatio = params.get("RowsSentRatio")
self.QueryTimeAvg = params.get("QueryTimeAvg")
self.RowsSentAvg = params.get("RowsSentAvg")
self.LockTimeAvg = params.get("LockTimeAvg")
self.RowsExaminedAvg = params.get("RowsExaminedAvg")
self.Md5 = params.get("Md5")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class TableSpaceData(AbstractModel):
"""库表空间统计数据。
"""
def __init__(self):
r"""
:param TableName: 表名。
:type TableName: str
:param TableSchema: 库名。
:type TableSchema: str
:param Engine: 库表的存储引擎。
:type Engine: str
:param DataLength: 数据空间(MB)。
:type DataLength: float
:param IndexLength: 索引空间(MB)。
:type IndexLength: float
:param DataFree: 碎片空间(MB)。
:type DataFree: float
:param TotalLength: 总使用空间(MB)。
:type TotalLength: float
:param FragRatio: 碎片率(%)。
:type FragRatio: float
:param TableRows: 行数。
:type TableRows: int
:param PhysicalFileSize: 表对应的独立物理文件大小(MB)。
:type PhysicalFileSize: float
"""
self.TableName = None
self.TableSchema = None
self.Engine = None
self.DataLength = None
self.IndexLength = None
self.DataFree = None
self.TotalLength = None
self.FragRatio = None
self.TableRows = None
self.PhysicalFileSize = None
def _deserialize(self, params):
self.TableName = params.get("TableName")
self.TableSchema = params.get("TableSchema")
self.Engine = params.get("Engine")
self.DataLength = params.get("DataLength")
self.IndexLength = params.get("IndexLength")
self.DataFree = params.get("DataFree")
self.TotalLength = params.get("TotalLength")
self.FragRatio = params.get("FragRatio")
self.TableRows = params.get("TableRows")
self.PhysicalFileSize = params.get("PhysicalFileSize")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class TableSpaceTimeSeries(AbstractModel):
"""库表空间时序数据
"""
def __init__(self):
r"""
:param TableName: 表名。
:type TableName: str
:param TableSchema: 库名。
:type TableSchema: str
:param Engine: 库表的存储引擎。
:type Engine: str
:param SeriesData: 单位时间间隔内的空间指标数据。
:type SeriesData: :class:`tencentcloud.dbbrain.v20210527.models.MonitorFloatMetricSeriesData`
"""
self.TableName = None
self.TableSchema = None
self.Engine = None
self.SeriesData = None
def _deserialize(self, params):
self.TableName = params.get("TableName")
self.TableSchema = params.get("TableSchema")
self.Engine = params.get("Engine")
if params.get("SeriesData") is not None:
self.SeriesData = MonitorFloatMetricSeriesData()
self.SeriesData._deserialize(params.get("SeriesData"))
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class TimeSlice(AbstractModel):
"""单位时间间隔内的慢日志统计
"""
def __init__(self):
r"""
:param Count: 总数
:type Count: int
:param Timestamp: 统计开始时间
:type Timestamp: int
"""
self.Count = None
self.Timestamp = None
def _deserialize(self, params):
self.Count = params.get("Count")
self.Timestamp = params.get("Timestamp")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class UserProfile(AbstractModel):
"""用户配置的相关信息,包括邮件配置。
"""
def __init__(self):
r"""
:param ProfileId: 配置的id。
注意:此字段可能返回 null,表示取不到有效值。
:type ProfileId: str
:param ProfileType: 配置类型,支持值包括:"dbScan_mail_configuration" - 数据库巡检邮件配置,"scheduler_mail_configuration" - 定期生成邮件配置。
注意:此字段可能返回 null,表示取不到有效值。
:type ProfileType: str
:param ProfileLevel: 配置级别,支持值包括:"User" - 用户级别,"Instance" - 实例级别,其中数据库巡检邮件配置为用户级别,定期生成邮件配置为实例级别。
注意:此字段可能返回 null,表示取不到有效值。
:type ProfileLevel: str
:param ProfileName: 配置名称。
注意:此字段可能返回 null,表示取不到有效值。
:type ProfileName: str
:param ProfileInfo: 配置详情。
:type ProfileInfo: :class:`tencentcloud.dbbrain.v20210527.models.ProfileInfo`
"""
self.ProfileId = None
self.ProfileType = None
self.ProfileLevel = None
self.ProfileName = None
self.ProfileInfo = None
def _deserialize(self, params):
self.ProfileId = params.get("ProfileId")
self.ProfileType = params.get("ProfileType")
self.ProfileLevel = params.get("ProfileLevel")
self.ProfileName = params.get("ProfileName")
if params.get("ProfileInfo") is not None:
self.ProfileInfo = ProfileInfo()
self.ProfileInfo._deserialize(params.get("ProfileInfo"))
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
| 31.310427 | 195 | 0.597149 | 10,779 | 105,704 | 5.781705 | 0.088505 | 0.064264 | 0.015532 | 0.016945 | 0.658077 | 0.609153 | 0.559009 | 0.541391 | 0.510069 | 0.499318 | 0 | 0.007231 | 0.293518 | 105,704 | 3,376 | 196 | 31.310427 | 0.827299 | 0.307793 | 0 | 0.727163 | 0 | 0 | 0.080074 | 0.001422 | 0 | 0 | 0 | 0 | 0 | 1 | 0.105769 | false | 0 | 0.001202 | 0 | 0.159856 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
0
| 1 |
6a836399736ccfbfdcec602215566bd6e9ae598c
| 2,201 |
py
|
Python
|
melisa/utils/snowflake.py
|
MelisaDev/melisa
|
53fee10d8c1bf4dd716bc90096c16f096e11bfbf
|
[
"MIT"
] | 5 |
2022-03-11T19:51:28.000Z
|
2022-03-13T16:28:58.000Z
|
melisa/utils/snowflake.py
|
jungledev1/melisa
|
835e4b644e50b5038599ecbd1bfa510a0d3200e9
|
[
"MIT"
] | 2 |
2022-03-19T18:09:39.000Z
|
2022-03-23T12:18:49.000Z
|
melisa/utils/snowflake.py
|
jungledev1/melisa
|
835e4b644e50b5038599ecbd1bfa510a0d3200e9
|
[
"MIT"
] | 1 |
2022-03-23T07:30:04.000Z
|
2022-03-23T07:30:04.000Z
|
# Copyright MelisaDev 2022 - Present
# Full MIT License can be found in `LICENSE.txt` at the project root.
from __future__ import annotations
class Snowflake(int):
"""
Discord utilizes Twitter's snowflake format for uniquely identifiable descriptors (IDs).
These IDs are guaranteed to be unique across all of Discord,
except in some unique scenarios in which child objects share their parent's ID.
Because Snowflake IDs are up to 64 bits in size (e.g. a uint64),
they are always returned as strings in the HTTP API
to prevent integer overflows in some languages.
See Gateway ETF/JSON for more information regarding Gateway encoding.
Read more here: https://discord.com/developers/docs/reference#snowflakes
"""
_MAX_VALUE: int = 9223372036854775807
_MIN_VALUE: int = 0
def __init__(self, _):
super().__init__()
if self < self._MIN_VALUE:
raise ValueError("snowflake value should be greater than or equal to 0.")
if self > self._MAX_VALUE:
raise ValueError(
"snowflake value should be less than or equal to 9223372036854775807."
)
@classmethod
def __factory__(cls, string: str) -> Snowflake:
return cls.from_string(string)
@classmethod
def from_string(cls, string: str):
"""Initialize a new Snowflake from a string.
Parameters
----------
string: :class:`str`
The snowflake as a string.
"""
return Snowflake(int(string))
@property
def timestamp(self) -> int:
"""
Milliseconds since Discord Epoch, the first second of 2015 or 1420070400000.
"""
return self >> 22
@property
def worker_id(self) -> int:
"""Internal worker ID"""
return (self >> 17) % 16
@property
def process_id(self) -> int:
"""Internal process ID"""
return (self >> 12) % 16
@property
def increment(self) -> int:
"""For every ID that is generated on that process, this number is incremented"""
return self % 2048
@property
def unix(self) -> int:
return self.timestamp + 1420070400000
| 30.150685 | 92 | 0.63562 | 272 | 2,201 | 5.036765 | 0.518382 | 0.040146 | 0.014599 | 0.042336 | 0.061314 | 0.061314 | 0.061314 | 0 | 0 | 0 | 0 | 0.058081 | 0.280327 | 2,201 | 72 | 93 | 30.569444 | 0.806818 | 0.431168 | 0 | 0.212121 | 0 | 0 | 0.107747 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.242424 | false | 0 | 0.030303 | 0.060606 | 0.575758 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 |
0
| 1 |
6a89b2893b587e6d66f6aa207ca89999bce84710
| 846 |
py
|
Python
|
utils/config.py
|
jtr109/Alpha2kindle
|
a411d05cafa9036a732eeb75fa13f68963f254e3
|
[
"MIT"
] | null | null | null |
utils/config.py
|
jtr109/Alpha2kindle
|
a411d05cafa9036a732eeb75fa13f68963f254e3
|
[
"MIT"
] | null | null | null |
utils/config.py
|
jtr109/Alpha2kindle
|
a411d05cafa9036a732eeb75fa13f68963f254e3
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import os
class BaseConf(object):
HEADERS = {
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) "
"AppleWebKit/537.36 (KHTML, like Gecko) "
"Chrome/55.0.2883.95 "
"Safari/537.36",
"Accept": "text/html,application/xhtml+xml,application/xml;"
"q=0.9,image/webp,*/*;"
"q=0.8",
"Accept-Encoding": "gzip, deflate, sdch, br",
"Accept-Language": "zh-CN,zh;q=0.8,en;q=0.6,zh-TW;q=0.4",
"Cache-Control": "max-age=0",
}
class TestConf(BaseConf):
REDIS_URL = "redis://:{password}@{hostname}:{port}/{db_number}".format(
password=os.environ.get("REDIS_PWD"),
hostname='127.0.0.1',
port=6379,
db_number=0
)
CURCONF = TestConf
| 27.290323 | 75 | 0.51773 | 110 | 846 | 3.927273 | 0.663636 | 0.023148 | 0.013889 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.082631 | 0.299054 | 846 | 30 | 76 | 28.2 | 0.645868 | 0.024823 | 0 | 0 | 0 | 0.045455 | 0.470231 | 0.185905 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0.090909 | 0.045455 | 0 | 0.227273 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 |
0
| 1 |
6a8e7fcaf4ca3d67de4aab013987d7db788188b5
| 252 |
py
|
Python
|
pyqtgraph/examples/template.py
|
secantsquared/pyqtgraph
|
3ef7f5b91639543e43bcd66a84290fb9bc18fc5c
|
[
"MIT"
] | null | null | null |
pyqtgraph/examples/template.py
|
secantsquared/pyqtgraph
|
3ef7f5b91639543e43bcd66a84290fb9bc18fc5c
|
[
"MIT"
] | null | null | null |
pyqtgraph/examples/template.py
|
secantsquared/pyqtgraph
|
3ef7f5b91639543e43bcd66a84290fb9bc18fc5c
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Description of example
"""
import pyqtgraph as pg
from pyqtgraph.Qt import QtCore, QtGui, mkQApp
import numpy as np
app = mkQApp()
# win.setWindowTitle('pyqtgraph example: ____')
if __name__ == '__main__':
pg.exec()
| 15.75 | 47 | 0.68254 | 32 | 252 | 5 | 0.75 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.004808 | 0.174603 | 252 | 15 | 48 | 16.8 | 0.764423 | 0.361111 | 0 | 0 | 0 | 0 | 0.052632 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.5 | 0 | 0.5 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 |
0
| 1 |
6a8fddf8511ca7d429d8644119f475536d5dae17
| 2,486 |
py
|
Python
|
main.py
|
ThomasDLi/simple-photo-editor
|
f8b3f1025155e2542b93b94c12d607b9b5e45731
|
[
"MIT"
] | 1 |
2021-05-21T19:21:26.000Z
|
2021-05-21T19:21:26.000Z
|
main.py
|
ThomasDLi/simple-photo-editor
|
f8b3f1025155e2542b93b94c12d607b9b5e45731
|
[
"MIT"
] | null | null | null |
main.py
|
ThomasDLi/simple-photo-editor
|
f8b3f1025155e2542b93b94c12d607b9b5e45731
|
[
"MIT"
] | null | null | null |
from PIL import Image, ImageEnhance
user_account_name = "Thomas.Li26"
def main():
mode = input("Specify image editing mode. Type DEEPFRY, STRETCH, BRIGHTNESS, SHARPEN, or INVERT: ")
if mode == "DEEPFRY":
DEEPFRY()
if mode == "STRETCH":
STRETCH()
if mode == "INVERT":
INVERT()
if mode == "BRIGHTNESS":
BRIGHTNESS()
if mode == "SHARPEN":
SHARPEN()
def DEEPFRY():
img = input("Insert the name of an image found in the Downloads folder (for example: Image.png): ")
im = Image.open(r"C:\Users\{}\Downloads\{}".format(user_account_name, img))
enhancer = ImageEnhance.Contrast(im)
factor = float(input("Specify deepfry amount (0-100): "))
im_output = enhancer.enhance(factor)
im_output.save('more-contrast-image.png')
im_output.show()
def STRETCH():
img = input("Insert the name of an image found in the Downloads folder (for example: Image.png): ")
im = Image.open(r"C:\Users\{}\Downloads\{}".format(user_account_name, img))
factor = int(input("Specify width: "))
factor2 = int(input("Specify height: "))
im_output = im.resize((factor,factor2))
im_output.save('more-contrast-image.png')
im_output.show()
def INVERT():
img = input("Insert the name of an image found in the Downloads folder (for example: Image.png): ")
im = Image.open(r"C:\Users\{}\Downloads\{}".format(user_account_name, img))
enhancer = ImageEnhance.Contrast(im)
im_output = enhancer.enhance(-1)
im_output.save('more-contrast-image.png')
im_output.show()
def BRIGHTNESS():
img = input("Insert the name of an image found in the Downloads folder (for example: Image.png): ")
im = Image.open(r"C:\Users\{}\Downloads\{}".format(user_account_name, img))
enhancer = ImageEnhance.Brightness(im)
factor = float(input("Specify brightness amount: "))
im_output = enhancer.enhance(factor)
im_output.save('more-contrast-image.png')
im_output.show()
def SHARPEN():
img = input("Insert the name of an image found in the Downloads folder (for example: Image.png): ")
im = Image.open(r"C:\Users\{}\Downloads\{}".format(user_account_name, img))
enhancer = ImageEnhance.Sharpness(im)
factor = float(input("Specify sharpening amount: "))
im_output = enhancer.enhance(factor)
im_output.save('more-contrast-image.png')
im_output.show()
if __name__ == "__main__":
main()
| 38.84375 | 104 | 0.650442 | 326 | 2,486 | 4.852761 | 0.190184 | 0.075853 | 0.063211 | 0.053729 | 0.710493 | 0.664349 | 0.664349 | 0.664349 | 0.664349 | 0.664349 | 0 | 0.004555 | 0.205149 | 2,486 | 63 | 105 | 39.460317 | 0.796053 | 0 | 0 | 0.454545 | 0 | 0 | 0.37598 | 0.096987 | 0 | 0 | 0 | 0 | 0 | 1 | 0.109091 | false | 0 | 0.018182 | 0 | 0.127273 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
0
| 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.