hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
6b3e1154af6f1eb866c2c34cdc822a0ff3902ab9
| 2,191 |
py
|
Python
|
sorting/python/max_heap.py
|
zhou7rui/algorithm
|
9b5500ac3d8bdfd223bf9aec55e68675f2df7c59
|
[
"MIT"
] | 6 |
2017-08-31T07:13:34.000Z
|
2018-09-10T08:54:43.000Z
|
sorting/python/max_heap.py
|
zhou7rui/algorithm
|
9b5500ac3d8bdfd223bf9aec55e68675f2df7c59
|
[
"MIT"
] | null | null | null |
sorting/python/max_heap.py
|
zhou7rui/algorithm
|
9b5500ac3d8bdfd223bf9aec55e68675f2df7c59
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*
'''
最大堆实现
98
/ \
96 84
/ \ / \
92 82 78 47
/ \ / \ / \ / \
33 26 51 85 50 15 44 60
/ \ / \ / \ / \ / \ / \ / \ / \
40 51 98 51 7 17 94 82 32 21 64 60 7 44 63 63
'''
import random
class Maxheap(object):
def __init__(self,cpacity,arr = None):
self.data = [None] * (cpacity + 1)
self.cpacity = cpacity
if arr is None:
self.count = 0
else:
for i in range(0,cpacity):
self.data[i + 1]= arr[i]
self.count = cpacity
for i in range(self.count / 2, 0, -1):
self.__shifDown(i)
def size(self):
return self.count
def isEmpty(self):
return self.count == 0
def __shiftUp(self,k):
while k > 1 and self.data[k] > self.data[int(k / 2)]:
self.data[k],self.data[int(k / 2)] = self.data[int(k / 2)], self.data[k]
k =int(k/2)
def insert(self,data):
self.data[self.count + 1] = data
self.count += 1
self.__shiftUp(self.count)
def __shifDown(self,k):
while k * 2 <= self.count:
j = k * 2
if self.count >= j + 1 and self.data[j + 1] > self.data[j]:
j += 1
if self.data[k] > self.data[j]:
break
self.data[k], self.data[j] = self.data[j],self.data[k]
k = j
def extractMax(self):
ret = self.data[1]
self.data[1], self.data[self.count] = self.data[self.count], self.data[1]
self.count -= 1
self.__shifDown(1)
return ret
if __name__ == '__main__':
N = 31
M = 100
heap = Maxheap(N)
for i in range(0,N):
k = random.randint(1, M)
heap.insert(k)
# arr = [random.randint(1,M) for i in range(N)]
# heap = Maxheap(len(arr),arr)
print(heap.size())
print(heap.data)
print(heap.extractMax())
| 24.076923 | 84 | 0.426289 | 281 | 2,191 | 3.245552 | 0.263345 | 0.201754 | 0.059211 | 0.048246 | 0.222588 | 0.157895 | 0.072368 | 0.072368 | 0.057018 | 0.057018 | 0 | 0.076735 | 0.440895 | 2,191 | 90 | 85 | 24.344444 | 0.667755 | 0.219534 | 0 | 0 | 0 | 0 | 0.004869 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.14 | false | 0 | 0.02 | 0.04 | 0.24 | 0.06 | 0 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
0
| 1 |
6b3e3c2d633954d06881dc1103a976a7248201f2
| 585 |
py
|
Python
|
ink2canvas/svg/Use.py
|
greipfrut/pdftohtml5canvas
|
bd4b829a5fd02b503e6b32c268b265daa92e92e5
|
[
"MIT"
] | 4 |
2016-05-06T21:29:39.000Z
|
2020-02-25T08:47:48.000Z
|
ink2canvas/svg/Use.py
|
letw/pdftohtml5canvas
|
bd4b829a5fd02b503e6b32c268b265daa92e92e5
|
[
"MIT"
] | null | null | null |
ink2canvas/svg/Use.py
|
letw/pdftohtml5canvas
|
bd4b829a5fd02b503e6b32c268b265daa92e92e5
|
[
"MIT"
] | null | null | null |
from ink2canvas.svg.AbstractShape import AbstractShape
class Use(AbstractShape):
def drawClone(self):
drawables = self.rootTree.getDrawable()
OriginName = self.getCloneId()
OriginObject = self.rootTree.searchElementById(OriginName,drawables)
OriginObject.runDraw()
def draw(self, isClip=False):
if self.hasTransform():
transMatrix = self.getTransform()
self.canvasContext.transform(*transMatrix)
self.drawClone()
def getCloneId(self):
return self.attr("href","xlink")[1:]
| 32.5 | 76 | 0.647863 | 53 | 585 | 7.150943 | 0.603774 | 0.063325 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.004545 | 0.247863 | 585 | 18 | 77 | 32.5 | 0.856818 | 0 | 0 | 0 | 0 | 0 | 0.015358 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.214286 | false | 0 | 0.071429 | 0.071429 | 0.428571 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 |
0
| 1 |
6b454d373a4daf57bd5eb97d08752d3322beb78a
| 6,146 |
py
|
Python
|
bcgs/disqus_objects.py
|
aeturnum/bcgs
|
e5ae4c9f4cdd45b47615f00581dcc3792c281ea3
|
[
"MIT"
] | null | null | null |
bcgs/disqus_objects.py
|
aeturnum/bcgs
|
e5ae4c9f4cdd45b47615f00581dcc3792c281ea3
|
[
"MIT"
] | null | null | null |
bcgs/disqus_objects.py
|
aeturnum/bcgs
|
e5ae4c9f4cdd45b47615f00581dcc3792c281ea3
|
[
"MIT"
] | null | null | null |
import requests
import aiohttp
from constants import API_KEY
class User(object):
def __init__(self, author_info):
# "author": {
# "about": "",
# "avatar": {
# "cache": "//a.disquscdn.com/1519942534/images/noavatar92.png",
# "isCustom": false,
# "large": {
# "cache": "//a.disquscdn.com/1519942534/images/noavatar92.png",
# "permalink": "https://disqus.com/api/users/avatars/felix1999.jpg"
# },
# "permalink": "https://disqus.com/api/users/avatars/felix1999.jpg",
# "small": {
# "cache": "//a.disquscdn.com/1519942534/images/noavatar32.png",
# "permalink": "https://disqus.com/api/users/avatars/felix1999.jpg"
# }
# },
# "disable3rdPartyTrackers": false,
# "id": "5472588",
# "isAnonymous": false,
# "isPowerContributor": false,
# "isPrimary": true,
# "isPrivate": true,
# "joinedAt": "2010-11-20T04:45:33",
# "location": "",
# "name": "felix1999",
# "profileUrl": "https://disqus.com/by/felix1999/",
# "signedUrl": "",
# "url": "",
# "username": "felix1999"
# },
self._basic_info = author_info
self._detailed_info = None
async def load(self):
async with aiohttp.ClientSession(connector=aiohttp.TCPConnector(verify_ssl=False)) as session:
user_info = await session.get(
'https://disqus.com/api/3.0/users/details.json',
params={'user': self.id, 'api_key': API_KEY}
)
detail_json = await user_info.json()
if detail_json['code'] != 0:
print(f'Problem with getting user details from user {self.id}')
print(detail_json)
self._detailed_info = detail_json['response']
def _get_detailed_info(self):
# https://disqus.com/api/3.0/users/details.json?user=137780765&api_key=E8Uh5l5fHZ6gD8U3KycjAIAk46f68Zw7C6eW8WSjZvCLXebZ7p0r1yrYDrLilk2F
# {
# "code": 0,
# "response": {
# "about": "",
# "avatar": {
# "cache": "https://c.disquscdn.com/uploads/users/13778/765/avatar92.jpg?1433896551",
# "isCustom": true,
# "large": {
# "cache": "https://c.disquscdn.com/uploads/users/13778/765/avatar92.jpg?1433896551",
# "permalink": "https://disqus.com/api/users/avatars/disqus_FqhLpDGmTT.jpg"
# },
# "permalink": "https://disqus.com/api/users/avatars/disqus_FqhLpDGmTT.jpg",
# "small": {
# "cache": "https://c.disquscdn.com/uploads/users/13778/765/avatar32.jpg?1433896551",
# "permalink": "https://disqus.com/api/users/avatars/disqus_FqhLpDGmTT.jpg"
# }
# },
# "disable3rdPartyTrackers": false,
# "id": "137780765",
# "isAnonymous": false,
# "isPowerContributor": false,
# "isPrimary": true,
# "isPrivate": false,
# "joinedAt": "2015-01-02T18:40:14",
# "location": "",
# "name": "Bob",
# "numFollowers": 2,
# "numFollowing": 0,
# "numForumsFollowing": 0,
# "numLikesReceived": 8967,
# "numPosts": 4147,
# "profileUrl": "https://disqus.com/by/disqus_FqhLpDGmTT/",
# "rep": 3.5297520000000002,
# "reputation": 3.5297520000000002,
# "reputationLabel": "High",
# "signedUrl": "",
# "url": "",
# "username": "disqus_FqhLpDGmTT"
# }
# }
print("WARNING: auto-loading user in async version of code!!!!")
details = requests.get(
'https://disqus.com/api/3.0/users/details.json',
{'user': self.id, 'api_key': API_KEY}
)
detail_json = details.json()
if detail_json['code'] != 0:
print(f'Problem with getting user details from user {self.id}')
print(detail_json)
self._detailed_info = detail_json['response']
@property
def anonymous(self):
return 'id' not in self._basic_info
@property
def private(self):
return self.anonymous or self._basic_info.get('isPrivate')
@property
def id(self):
if self.private:
return 'Private'
return self._basic_info.get('id', 'Anonymous')
@property
def name(self):
return self._basic_info.get('name')
@property
def username(self):
return self._basic_info.get('username')
@property
def location(self):
return self._basic_info.get('location')
@property
def joined_at(self):
return self._basic_info.get('joinedAt')
@property
def profile_url(self):
return self._basic_info.get('profileUrl')
@property
def total_posts(self):
if self._detailed_info is None:
self._get_detailed_info()
return self._detailed_info.get('numPosts')
@property
def total_likes(self):
if self._detailed_info is None:
self._get_detailed_info()
return self._detailed_info.get('numLikesReceived')
@property
def user_info_row(self):
return [
self.id,
self.name,
self.username,
self.total_posts,
self.total_likes,
self.location,
self.joined_at,
self.profile_url
]
| 36.802395 | 143 | 0.493817 | 548 | 6,146 | 5.394161 | 0.24635 | 0.040934 | 0.052097 | 0.051759 | 0.515223 | 0.490189 | 0.434709 | 0.393437 | 0.362652 | 0.310893 | 0 | 0.064012 | 0.374715 | 6,146 | 166 | 144 | 37.024096 | 0.705178 | 0.432314 | 0 | 0.324675 | 0 | 0 | 0.113583 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.168831 | false | 0 | 0.038961 | 0.103896 | 0.376623 | 0.064935 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 |
0
| 1 |
6b496440b1b757ff1f65cdc922e139b550fcb6ef
| 473 |
py
|
Python
|
setup.py
|
aagaard/dbservice
|
47daadab307e6744ef151dd4e0aacff27dcda881
|
[
"MIT"
] | 1 |
2020-04-27T16:30:50.000Z
|
2020-04-27T16:30:50.000Z
|
setup.py
|
aagaard/dbservice
|
47daadab307e6744ef151dd4e0aacff27dcda881
|
[
"MIT"
] | null | null | null |
setup.py
|
aagaard/dbservice
|
47daadab307e6744ef151dd4e0aacff27dcda881
|
[
"MIT"
] | 1 |
2021-01-13T02:16:56.000Z
|
2021-01-13T02:16:56.000Z
|
#!/usr/bin/env python3
# -*- encoding: utf-8 -*-
"""
Setup for the dbservice
"""
from setuptools import setup, find_packages
setup(
name='dbservice',
version='0.9',
description="Database service for storing meter data",
author="Søren Aagaard Mikkelsen",
author_email='smik@eng.au.dk',
url='https://github.com/dbservice/dbservice',
packages=find_packages(),
package_data={'': ['static/*.*', 'templates/*.*']},
scripts=['manage.py'],
)
| 22.52381 | 58 | 0.646934 | 56 | 473 | 5.392857 | 0.803571 | 0.07947 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.010101 | 0.162791 | 473 | 20 | 59 | 23.65 | 0.752525 | 0.145877 | 0 | 0 | 0 | 0 | 0.4 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0.083333 | 0 | 0.083333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 |
0
| 1 |
6b4af341d1bd006f2df5874fa788b8866cb5c77d
| 800 |
py
|
Python
|
venv/lib/python3.6/site-packages/ansible_collections/junipernetworks/junos/plugins/module_utils/network/junos/argspec/facts/facts.py
|
usegalaxy-no/usegalaxy
|
75dad095769fe918eb39677f2c887e681a747f3a
|
[
"MIT"
] | 1 |
2020-01-22T13:11:23.000Z
|
2020-01-22T13:11:23.000Z
|
venv/lib/python3.6/site-packages/ansible_collections/junipernetworks/junos/plugins/module_utils/network/junos/argspec/facts/facts.py
|
usegalaxy-no/usegalaxy
|
75dad095769fe918eb39677f2c887e681a747f3a
|
[
"MIT"
] | 12 |
2020-02-21T07:24:52.000Z
|
2020-04-14T09:54:32.000Z
|
venv/lib/python3.6/site-packages/ansible_collections/junipernetworks/junos/plugins/module_utils/network/junos/argspec/facts/facts.py
|
usegalaxy-no/usegalaxy
|
75dad095769fe918eb39677f2c887e681a747f3a
|
[
"MIT"
] | null | null | null |
#
# -*- coding: utf-8 -*-
# Copyright 2019 Red Hat
# GNU General Public License v3.0+
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
"""
The arg spec for the junos facts module.
"""
from __future__ import absolute_import, division, print_function
__metaclass__ = type
class FactsArgs(object):
""" The arg spec for the junos facts module
"""
def __init__(self, **kwargs):
pass
argument_spec = {
"gather_subset": dict(
default=["!config"], type="list", elements="str"
),
"config_format": dict(
default="text", choices=["xml", "text", "set", "json"]
),
"gather_network_resources": dict(type="list", elements="str"),
"available_network_resources": {"type": "bool", "default": False},
}
| 25.806452 | 74 | 0.60625 | 95 | 800 | 4.884211 | 0.705263 | 0.025862 | 0.043103 | 0.056034 | 0.137931 | 0.137931 | 0.137931 | 0.137931 | 0 | 0 | 0 | 0.014706 | 0.235 | 800 | 30 | 75 | 26.666667 | 0.743464 | 0.2775 | 0 | 0.133333 | 0 | 0 | 0.233929 | 0.091071 | 0 | 0 | 0 | 0 | 0 | 1 | 0.066667 | false | 0.066667 | 0.066667 | 0 | 0.266667 | 0.066667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 |
0
| 1 |
860a91391db83eb979d9849bfe427e0dbb8bf3eb
| 1,171 |
py
|
Python
|
helpers.py
|
owenjones/CaBot
|
dd47c077b21cbcf52c0ffd2e30b47fb736a41ebc
|
[
"MIT"
] | 3 |
2020-03-26T11:43:40.000Z
|
2021-12-27T18:26:06.000Z
|
helpers.py
|
owenjones/CaBot
|
dd47c077b21cbcf52c0ffd2e30b47fb736a41ebc
|
[
"MIT"
] | 2 |
2021-05-14T01:31:12.000Z
|
2021-08-23T16:07:44.000Z
|
helpers.py
|
owenjones/CaBot
|
dd47c077b21cbcf52c0ffd2e30b47fb736a41ebc
|
[
"MIT"
] | 1 |
2020-04-22T19:06:43.000Z
|
2020-04-22T19:06:43.000Z
|
from server import roles
def hasRole(member, roleID):
role = member.guild.get_role(roleID)
return role in member.roles
def gainedRole(before, after, roleID):
role = before.guild.get_role(roleID)
return (role not in before.roles) and (role in after.roles)
def isExplorer(ctx):
return hasRole(ctx.author, roles["explorer"])
def isNetwork(ctx):
return hasRole(ctx.author, roles["network"])
def isLeader(ctx):
return hasRole(ctx.author, roles["leader"])
def isAdmin(ctx):
return hasRole(ctx.author, roles["admin"])
def isBot(ctx):
return hasRole(ctx.author, roles["bot"])
class Colours:
DEFAULT = 0
AQUA = 1752220
GREEN = 3066993
BLUE = 3447003
PURPLE = 10181046
GOLD = 15844367
ORANGE = 15105570
RED = 15158332
GREY = 9807270
DARKER_GREY = 8359053
NAVY = 3426654
DARK_AQUA = 1146986
DARK_GREEN = 2067276
DARK_BLUE = 2123412
DARK_PURPLE = 7419530
DARK_GOLD = 12745742
DARK_ORANGE = 11027200
DARK_RED = 10038562
DARK_GREY = 9936031
LIGHT_GREY = 12370112
DARK_NAVY = 2899536
LUMINOUS_VIVID_PINK = 16580705
DARK_VIVID_PINK = 12320855
| 20.189655 | 63 | 0.679761 | 149 | 1,171 | 5.228188 | 0.47651 | 0.057766 | 0.102696 | 0.121951 | 0.264442 | 0.264442 | 0 | 0 | 0 | 0 | 0 | 0.18313 | 0.230572 | 1,171 | 57 | 64 | 20.54386 | 0.681465 | 0 | 0 | 0 | 0 | 0 | 0.024765 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.170732 | false | 0 | 0.02439 | 0.121951 | 0.95122 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 |
0
| 1 |
860eaaee93a0cd4aceb0ebc7da1f6e2b65f05589
| 224 |
py
|
Python
|
boa3_test/test_sc/event_test/EventNep5Transfer.py
|
hal0x2328/neo3-boa
|
6825a3533384cb01660773050719402a9703065b
|
[
"Apache-2.0"
] | 25 |
2020-07-22T19:37:43.000Z
|
2022-03-08T03:23:55.000Z
|
boa3_test/test_sc/event_test/EventNep5Transfer.py
|
hal0x2328/neo3-boa
|
6825a3533384cb01660773050719402a9703065b
|
[
"Apache-2.0"
] | 419 |
2020-04-23T17:48:14.000Z
|
2022-03-31T13:17:45.000Z
|
boa3_test/test_sc/event_test/EventNep5Transfer.py
|
hal0x2328/neo3-boa
|
6825a3533384cb01660773050719402a9703065b
|
[
"Apache-2.0"
] | 15 |
2020-05-21T21:54:24.000Z
|
2021-11-18T06:17:24.000Z
|
from boa3.builtin import public
from boa3.builtin.contract import Nep5TransferEvent
transfer = Nep5TransferEvent
@public
def Main(from_addr: bytes, to_addr: bytes, amount: int):
transfer(from_addr, to_addr, amount)
| 18.666667 | 56 | 0.785714 | 30 | 224 | 5.733333 | 0.5 | 0.093023 | 0.174419 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.020725 | 0.138393 | 224 | 11 | 57 | 20.363636 | 0.870466 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.166667 | false | 0 | 0.333333 | 0 | 0.5 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 |
0
| 1 |
86161f7e9f969066db82c2f68d6e2be07cfb7ad1
| 3,694 |
py
|
Python
|
src/falconpy/_endpoint/_filevantage.py
|
kra-ts/falconpy
|
c7c4ed93cb3b56cdfd86757f573fde57e4ccf857
|
[
"Unlicense"
] | null | null | null |
src/falconpy/_endpoint/_filevantage.py
|
kra-ts/falconpy
|
c7c4ed93cb3b56cdfd86757f573fde57e4ccf857
|
[
"Unlicense"
] | null | null | null |
src/falconpy/_endpoint/_filevantage.py
|
kra-ts/falconpy
|
c7c4ed93cb3b56cdfd86757f573fde57e4ccf857
|
[
"Unlicense"
] | null | null | null |
"""Internal API endpoint constant library.
_______ __ _______ __ __ __
| _ .----.-----.--.--.--.--| | _ | |_.----|__| |--.-----.
|. 1___| _| _ | | | | _ | 1___| _| _| | <| -__|
|. |___|__| |_____|________|_____|____ |____|__| |__|__|__|_____|
|: 1 | |: 1 |
|::.. . | CROWDSTRIKE FALCON |::.. . | FalconPy
`-------' `-------'
OAuth2 API - Customer SDK
This is free and unencumbered software released into the public domain.
Anyone is free to copy, modify, publish, use, compile, sell, or
distribute this software, either in source code form or as a compiled
binary, for any purpose, commercial or non-commercial, and by any
means.
In jurisdictions that recognize copyright laws, the author or authors
of this software dedicate any and all copyright interest in the
software to the public domain. We make this dedication for the benefit
of the public at large and to the detriment of our heirs and
successors. We intend this dedication to be an overt act of
relinquishment in perpetuity of all present and future rights to this
software under copyright law.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
For more information, please refer to <https://unlicense.org>
"""
_filevantage_endpoints = [
[
"getChanges",
"GET",
"/filevantage/entities/changes/v2",
"Retrieve information on changes",
"filevantage",
[
{
"type": "array",
"items": {
"type": "string"
},
"collectionFormat": "multi",
"description": "Comma separated values of change ids",
"name": "ids",
"in": "query",
"required": True
}
]
],
[
"queryChanges",
"GET",
"/filevantage/queries/changes/v2",
"Returns one or more change IDs",
"filevantage",
[
{
"minimum": 0,
"type": "integer",
"description": "The first change index to return in the response. "
"If not provided it will default to '0'. "
"Use with the `limit` parameter to manage pagination of results.",
"name": "offset",
"in": "query"
},
{
"type": "integer",
"description": "The maximum number of changes to return in the response "
"(default: 100; max: 500). "
"Use with the `offset` parameter to manage pagination of results",
"name": "limit",
"in": "query"
},
{
"type": "string",
"description": "Sort changes using options like:\n\n"
"- `action_timestamp` (timestamp of the change occurrence) \n\n "
"Sort either `asc` (ascending) or `desc` (descending). "
"For example: `action_timestamp|asc`.\n"
"The full list of allowed sorting options can be reviewed in our API documentation.",
"name": "sort",
"in": "query"
},
{
"type": "string",
"description": "Filter changes using a query in Falcon Query Language (FQL). \n\n"
"Common filter options include:\n\n - `host.host_name`\n - `action_timestamp`\n\n "
"The full list of allowed filter parameters can be reviewed in our API documentation.",
"name": "filter",
"in": "query"
}
]
]
]
| 35.180952 | 95 | 0.600704 | 426 | 3,694 | 4.997653 | 0.469484 | 0.01644 | 0.0155 | 0.023485 | 0.139032 | 0.093001 | 0.073274 | 0.035698 | 0 | 0 | 0 | 0.005631 | 0.278831 | 3,694 | 104 | 96 | 35.519231 | 0.793544 | 0.457228 | 0 | 0.181818 | 0 | 0.015152 | 0.624124 | 0.055055 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
0
| 1 |
861a31bb111c594972aeb70c462a963cf1fefdb9
| 5,215 |
py
|
Python
|
pysnmp/HH3C-PPPOE-SERVER-MIB.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 11 |
2021-02-02T16:27:16.000Z
|
2021-08-31T06:22:49.000Z
|
pysnmp/HH3C-PPPOE-SERVER-MIB.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 75 |
2021-02-24T17:30:31.000Z
|
2021-12-08T00:01:18.000Z
|
pysnmp/HH3C-PPPOE-SERVER-MIB.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 10 |
2019-04-30T05:51:36.000Z
|
2022-02-16T03:33:41.000Z
|
#
# PySNMP MIB module HH3C-PPPOE-SERVER-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/HH3C-PPPOE-SERVER-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 19:16:17 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
Integer, OctetString, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "Integer", "OctetString", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueSizeConstraint, SingleValueConstraint, ValueRangeConstraint, ConstraintsIntersection, ConstraintsUnion = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueSizeConstraint", "SingleValueConstraint", "ValueRangeConstraint", "ConstraintsIntersection", "ConstraintsUnion")
hh3cCommon, = mibBuilder.importSymbols("HH3C-OID-MIB", "hh3cCommon")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
ObjectIdentity, Integer32, IpAddress, NotificationType, Unsigned32, iso, MibIdentifier, Counter64, Counter32, MibScalar, MibTable, MibTableRow, MibTableColumn, Gauge32, ModuleIdentity, Bits, TimeTicks = mibBuilder.importSymbols("SNMPv2-SMI", "ObjectIdentity", "Integer32", "IpAddress", "NotificationType", "Unsigned32", "iso", "MibIdentifier", "Counter64", "Counter32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Gauge32", "ModuleIdentity", "Bits", "TimeTicks")
DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention")
hh3cPPPoEServer = ModuleIdentity((1, 3, 6, 1, 4, 1, 25506, 2, 102))
hh3cPPPoEServer.setRevisions(('2009-05-06 00:00',))
if mibBuilder.loadTexts: hh3cPPPoEServer.setLastUpdated('200905060000Z')
if mibBuilder.loadTexts: hh3cPPPoEServer.setOrganization('Hangzhou H3C Technologies Co., Ltd.')
hh3cPPPoEServerObject = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 2, 102, 1))
hh3cPPPoEServerMaxSessions = MibScalar((1, 3, 6, 1, 4, 1, 25506, 2, 102, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hh3cPPPoEServerMaxSessions.setStatus('current')
hh3cPPPoEServerCurrSessions = MibScalar((1, 3, 6, 1, 4, 1, 25506, 2, 102, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hh3cPPPoEServerCurrSessions.setStatus('current')
hh3cPPPoEServerAuthRequests = MibScalar((1, 3, 6, 1, 4, 1, 25506, 2, 102, 1, 3), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hh3cPPPoEServerAuthRequests.setStatus('current')
hh3cPPPoEServerAuthSuccesses = MibScalar((1, 3, 6, 1, 4, 1, 25506, 2, 102, 1, 4), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hh3cPPPoEServerAuthSuccesses.setStatus('current')
hh3cPPPoEServerAuthFailures = MibScalar((1, 3, 6, 1, 4, 1, 25506, 2, 102, 1, 5), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hh3cPPPoEServerAuthFailures.setStatus('current')
hh3cPPPoESAbnormOffsThreshold = MibScalar((1, 3, 6, 1, 4, 1, 25506, 2, 102, 1, 6), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hh3cPPPoESAbnormOffsThreshold.setStatus('current')
hh3cPPPoESAbnormOffPerThreshold = MibScalar((1, 3, 6, 1, 4, 1, 25506, 2, 102, 1, 7), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 100))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hh3cPPPoESAbnormOffPerThreshold.setStatus('current')
hh3cPPPoESNormOffPerThreshold = MibScalar((1, 3, 6, 1, 4, 1, 25506, 2, 102, 1, 8), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 100))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hh3cPPPoESNormOffPerThreshold.setStatus('current')
hh3cPPPoEServerTraps = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 2, 102, 2))
hh3cPPPoeServerTrapPrefix = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 2, 102, 2, 0))
hh3cPPPoESAbnormOffsAlarm = NotificationType((1, 3, 6, 1, 4, 1, 25506, 2, 102, 2, 0, 1))
if mibBuilder.loadTexts: hh3cPPPoESAbnormOffsAlarm.setStatus('current')
hh3cPPPoESAbnormOffPerAlarm = NotificationType((1, 3, 6, 1, 4, 1, 25506, 2, 102, 2, 0, 2))
if mibBuilder.loadTexts: hh3cPPPoESAbnormOffPerAlarm.setStatus('current')
hh3cPPPoESNormOffPerAlarm = NotificationType((1, 3, 6, 1, 4, 1, 25506, 2, 102, 2, 0, 3))
if mibBuilder.loadTexts: hh3cPPPoESNormOffPerAlarm.setStatus('current')
mibBuilder.exportSymbols("HH3C-PPPOE-SERVER-MIB", hh3cPPPoEServerMaxSessions=hh3cPPPoEServerMaxSessions, hh3cPPPoEServerObject=hh3cPPPoEServerObject, hh3cPPPoeServerTrapPrefix=hh3cPPPoeServerTrapPrefix, hh3cPPPoEServerAuthFailures=hh3cPPPoEServerAuthFailures, hh3cPPPoEServer=hh3cPPPoEServer, PYSNMP_MODULE_ID=hh3cPPPoEServer, hh3cPPPoESAbnormOffsAlarm=hh3cPPPoESAbnormOffsAlarm, hh3cPPPoEServerAuthRequests=hh3cPPPoEServerAuthRequests, hh3cPPPoEServerAuthSuccesses=hh3cPPPoEServerAuthSuccesses, hh3cPPPoESNormOffPerThreshold=hh3cPPPoESNormOffPerThreshold, hh3cPPPoEServerCurrSessions=hh3cPPPoEServerCurrSessions, hh3cPPPoEServerTraps=hh3cPPPoEServerTraps, hh3cPPPoESAbnormOffPerThreshold=hh3cPPPoESAbnormOffPerThreshold, hh3cPPPoESAbnormOffPerAlarm=hh3cPPPoESAbnormOffPerAlarm, hh3cPPPoESAbnormOffsThreshold=hh3cPPPoESAbnormOffsThreshold, hh3cPPPoESNormOffPerAlarm=hh3cPPPoESNormOffPerAlarm)
| 115.888889 | 892 | 0.797124 | 519 | 5,215 | 8.00578 | 0.263969 | 0.007702 | 0.01083 | 0.01444 | 0.351865 | 0.282551 | 0.222383 | 0.222383 | 0.222383 | 0.218773 | 0 | 0.091964 | 0.074209 | 5,215 | 44 | 893 | 118.522727 | 0.768641 | 0.06443 | 0 | 0 | 0 | 0 | 0.141889 | 0.013347 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.189189 | 0 | 0.189189 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
0
| 1 |
861a472cf4ef7f924185a3fe1ea6569338502257
| 2,041 |
py
|
Python
|
Pyshare2019/02 - if + Nesteed if/Nesteed-IF.py
|
suhaili99/python-share
|
6c65faaff722b8bd9e381650a6b277f56d1ae4c9
|
[
"MIT"
] | 4 |
2019-10-21T11:00:55.000Z
|
2020-10-22T16:11:21.000Z
|
Pyshare2019/02 - if + Nesteed if/Nesteed-IF.py
|
suhaili99/python-share
|
6c65faaff722b8bd9e381650a6b277f56d1ae4c9
|
[
"MIT"
] | 1 |
2019-12-17T05:20:26.000Z
|
2019-12-17T05:20:26.000Z
|
Pyshare2019/02 - if + Nesteed if/Nesteed-IF.py
|
suhaili99/python-share
|
6c65faaff722b8bd9e381650a6b277f56d1ae4c9
|
[
"MIT"
] | 9 |
2019-10-20T05:48:03.000Z
|
2020-11-17T14:08:14.000Z
|
name = input("masukkan nama pembeli = ")
alamat= input("Alamat = ")
NoTelp = input("No Telp = ")
print("\n")
print("=================INFORMASI HARGA MOBIL DEALER JAYA ABADI===============")
print("Pilih Jenis Mobil :")
print("\t 1.Daihatsu ")
print("\t 2.Honda ")
print("\t 3.Toyota ")
print("")
pilihan = int(input("Pilih jenis mobil yang ingin dibeli : "))
print("")
if (pilihan==1):
print("<<<<<<<< Macam macam mobil pada Daihatsu >>>>>>>>>")
print("\ta.Grand New Xenia")
print("\tb.All New Terios")
print("\tc.New Ayla")
Pilih1 = input("Mana yang ingin anda pilih ?? = ")
if(Pilih1 == "a"):
print("Harga mobil Grand New Xenia adalah 183 juta ")
elif(Pilih1== "b"):
print("Harga mobil All New Terios adalah 215 juta")
elif(Pilih1== "c"):
print("Harga mobil New Ayla adalah 110 juta")
else:
print("Tidak terdefinisi")
elif (pilihan==2):
print("<<<<<<<< Macam macam mobil pada Honda >>>>>>>>>")
print("\ta.Honda Brio Satya S")
print("\tb.Honda Jazz ")
print("\tb.Honda Mobilio ")
pilih2 = input("Mana yang ingin anda pilih??")
if(pilih2=="a"):
print("Harga mobil HOnda Brio Satya S adalah 131 juta")
elif(pilih2=="b"):
print("Harga mobil Honda Jazz adalah 232 juta")
elif(pilih2=="c"):
print("Harga mobil Honda mobilio adalah 189 juta")
else:
print("Tidak terdefinisi")
elif (pilihan==3):
print("<<<<<<<< Macam macam mobil pada Toyota>>>>>>>>?")
print("\ta.Alphard")
print("\tb.Camry")
print("\tc.Fortuner")
pilih3 = input("Mana yang ingin anda pilih??")
if (pilih3=="a"):
print("Harga mobil Alphard adalah 870 juta")
elif (pilih3=="b"):
print("Harga mobil Camry adalah 560 Juta")
elif (pilih3=="c"):
print("Harga mobil Fortuner adalah 492 Juta")
| 34.59322 | 80 | 0.529152 | 241 | 2,041 | 4.481328 | 0.307054 | 0.092593 | 0.125 | 0.055556 | 0.221296 | 0.15463 | 0.15463 | 0 | 0 | 0 | 0 | 0.031228 | 0.293974 | 2,041 | 58 | 81 | 35.189655 | 0.718251 | 0 | 0 | 0.115385 | 0 | 0 | 0.476237 | 0.012739 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.596154 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
0
| 1 |
861d74d55db578d9eef6b283f432f055362e839e
| 975 |
py
|
Python
|
examples/given_data.py
|
GuoJingyao/cornac
|
e7529990ec1dfa586c4af3de98e4b3e00a786578
|
[
"Apache-2.0"
] | null | null | null |
examples/given_data.py
|
GuoJingyao/cornac
|
e7529990ec1dfa586c4af3de98e4b3e00a786578
|
[
"Apache-2.0"
] | null | null | null |
examples/given_data.py
|
GuoJingyao/cornac
|
e7529990ec1dfa586c4af3de98e4b3e00a786578
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Example to train and evaluate a model with given data
@author: Quoc-Tuan Truong <tuantq.vnu@gmail.com>
"""
from cornac.data import Reader
from cornac.eval_methods import BaseMethod
from cornac.models import MF
from cornac.metrics import MAE, RMSE
from cornac.utils import cache
# Download MovieLens 100K provided training and test splits
reader = Reader()
train_data = reader.read(cache(url='http://files.grouplens.org/datasets/movielens/ml-100k/u1.base'))
test_data = reader.read(cache(url='http://files.grouplens.org/datasets/movielens/ml-100k/u1.test'))
eval_method = BaseMethod.from_splits(train_data=train_data, test_data=test_data,
exclude_unknowns=False, verbose=True)
mf = MF(k=10, max_iter=25, learning_rate=0.01, lambda_reg=0.02,
use_bias=True, early_stop=True, verbose=True)
# Evaluation
result = eval_method.evaluate(model=mf, metrics=[MAE(), RMSE()], user_based=True)
print(result)
| 33.62069 | 100 | 0.73641 | 146 | 975 | 4.80137 | 0.541096 | 0.071327 | 0.039943 | 0.054208 | 0.194009 | 0.194009 | 0.194009 | 0.194009 | 0.194009 | 0.194009 | 0 | 0.026222 | 0.139487 | 975 | 28 | 101 | 34.821429 | 0.809297 | 0.2 | 0 | 0 | 0 | 0 | 0.158442 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.357143 | 0 | 0.357143 | 0.071429 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 |
0
| 1 |
861f5e4cfdc98de2a394371bb5b02dd397322979
| 203 |
py
|
Python
|
Python/Fibonacci.py
|
kennethsequeira/Hello-world
|
464227bc7d9778a4a2a4044fe415a629003ea77f
|
[
"MIT"
] | 1 |
2018-12-19T11:42:09.000Z
|
2018-12-19T11:42:09.000Z
|
Python/Fibonacci.py
|
kennethsequeira/Hello-world
|
464227bc7d9778a4a2a4044fe415a629003ea77f
|
[
"MIT"
] | 1 |
2019-10-25T09:19:21.000Z
|
2019-10-25T09:19:21.000Z
|
Python/Fibonacci.py
|
kennethsequeira/Hello-world
|
464227bc7d9778a4a2a4044fe415a629003ea77f
|
[
"MIT"
] | 7 |
2019-09-11T07:17:32.000Z
|
2019-09-25T12:23:52.000Z
|
#Doesn't work.
import time
fibonacci = [1, 1]
n = int(input())
while len(fibonacci) < n:
fibonacci.append(fibonacci[-1] + fibonacci[-2])
for i in range(n):
print(fibonacci[i], end=' ')
| 20.3 | 52 | 0.605911 | 30 | 203 | 4.1 | 0.666667 | 0.162602 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.025 | 0.211823 | 203 | 9 | 53 | 22.555556 | 0.74375 | 0.064039 | 0 | 0 | 0 | 0 | 0.005556 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.142857 | 0 | 0.142857 | 0.142857 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
0
| 1 |
86205fe9ef8a0c045201301f18357ead5b9c92fc
| 6,081 |
py
|
Python
|
py/_log/log.py
|
EnjoyLifeFund/py36pkgs
|
0ac677fbbfa7b6d8c527fe2c759ba05117b07fd2
|
[
"MIT",
"BSD-2-Clause",
"BSD-3-Clause"
] | 2 |
2018-03-14T06:45:40.000Z
|
2018-06-08T07:46:02.000Z
|
py/_log/log.py
|
EnjoyLifeFund/py36pkgs
|
0ac677fbbfa7b6d8c527fe2c759ba05117b07fd2
|
[
"MIT",
"BSD-2-Clause",
"BSD-3-Clause"
] | 1 |
2019-03-23T08:23:21.000Z
|
2019-03-23T08:23:21.000Z
|
py/_log/log.py
|
EnjoyLifeFund/py36pkgs
|
0ac677fbbfa7b6d8c527fe2c759ba05117b07fd2
|
[
"MIT",
"BSD-2-Clause",
"BSD-3-Clause"
] | 2 |
2017-11-07T18:05:19.000Z
|
2017-11-14T18:06:55.000Z
|
"""
basic logging functionality based on a producer/consumer scheme.
XXX implement this API: (maybe put it into slogger.py?)
log = Logger(
info=py.log.STDOUT,
debug=py.log.STDOUT,
command=None)
log.info("hello", "world")
log.command("hello", "world")
log = Logger(info=Logger(something=...),
debug=py.log.STDOUT,
command=None)
"""
import py, sys
class Message(object):
def __init__(self, keywords, args):
self.keywords = keywords
self.args = args
def content(self):
return " ".join(map(str, self.args))
def prefix(self):
return "[%s] " % (":".join(self.keywords))
def __str__(self):
return self.prefix() + self.content()
class Producer(object):
""" (deprecated) Log producer API which sends messages to be logged
to a 'consumer' object, which then prints them to stdout,
stderr, files, etc. Used extensively by PyPy-1.1.
"""
Message = Message # to allow later customization
keywords2consumer = {}
def __init__(self, keywords, keywordmapper=None, **kw):
if hasattr(keywords, 'split'):
keywords = tuple(keywords.split())
self._keywords = keywords
if keywordmapper is None:
keywordmapper = default_keywordmapper
self._keywordmapper = keywordmapper
def __repr__(self):
return "<py.log.Producer %s>" % ":".join(self._keywords)
def __getattr__(self, name):
if '_' in name:
raise AttributeError(name)
producer = self.__class__(self._keywords + (name,))
setattr(self, name, producer)
return producer
def __call__(self, *args):
""" write a message to the appropriate consumer(s) """
func = self._keywordmapper.getconsumer(self._keywords)
if func is not None:
func(self.Message(self._keywords, args))
class KeywordMapper:
def __init__(self):
self.keywords2consumer = {}
def getstate(self):
return self.keywords2consumer.copy()
def setstate(self, state):
self.keywords2consumer.clear()
self.keywords2consumer.update(state)
def getconsumer(self, keywords):
""" return a consumer matching the given keywords.
tries to find the most suitable consumer by walking, starting from
the back, the list of keywords, the first consumer matching a
keyword is returned (falling back to py.log.default)
"""
for i in range(len(keywords), 0, -1):
try:
return self.keywords2consumer[keywords[:i]]
except KeyError:
continue
return self.keywords2consumer.get('default', default_consumer)
def setconsumer(self, keywords, consumer):
""" set a consumer for a set of keywords. """
# normalize to tuples
if isinstance(keywords, str):
keywords = tuple(filter(None, keywords.split()))
elif hasattr(keywords, '_keywords'):
keywords = keywords._keywords
elif not isinstance(keywords, tuple):
raise TypeError("key %r is not a string or tuple" % (keywords,))
if consumer is not None and not py.builtin.callable(consumer):
if not hasattr(consumer, 'write'):
raise TypeError(
"%r should be None, callable or file-like" % (consumer,))
consumer = File(consumer)
self.keywords2consumer[keywords] = consumer
def default_consumer(msg):
""" the default consumer, prints the message to stdout (using 'print') """
sys.stderr.write(str(msg)+"\n")
default_keywordmapper = KeywordMapper()
def setconsumer(keywords, consumer):
default_keywordmapper.setconsumer(keywords, consumer)
def setstate(state):
default_keywordmapper.setstate(state)
def getstate():
return default_keywordmapper.getstate()
#
# Consumers
#
class File(object):
""" log consumer wrapping a file(-like) object """
def __init__(self, f):
assert hasattr(f, 'write')
#assert isinstance(f, file) or not hasattr(f, 'open')
self._file = f
def __call__(self, msg):
""" write a message to the log """
self._file.write(str(msg) + "\n")
if hasattr(self._file, 'flush'):
self._file.flush()
class Path(object):
""" log consumer that opens and writes to a Path """
def __init__(self, filename, append=False,
delayed_create=False, buffering=False):
self._append = append
self._filename = str(filename)
self._buffering = buffering
if not delayed_create:
self._openfile()
def _openfile(self):
mode = self._append and 'a' or 'w'
f = open(self._filename, mode)
self._file = f
def __call__(self, msg):
""" write a message to the log """
if not hasattr(self, "_file"):
self._openfile()
self._file.write(str(msg) + "\n")
if not self._buffering:
self._file.flush()
def STDOUT(msg):
""" consumer that writes to sys.stdout """
sys.stdout.write(str(msg)+"\n")
def STDERR(msg):
""" consumer that writes to sys.stderr """
sys.stderr.write(str(msg)+"\n")
class Syslog:
""" consumer that writes to the syslog daemon """
def __init__(self, priority = None):
if priority is None:
priority = self.LOG_INFO
self.priority = priority
def __call__(self, msg):
""" write a message to the log """
py.std.syslog.syslog(self.priority, str(msg))
for _prio in "EMERG ALERT CRIT ERR WARNING NOTICE INFO DEBUG".split():
_prio = "LOG_" + _prio
try:
setattr(Syslog, _prio, getattr(py.std.syslog, _prio))
except AttributeError:
pass
| 32.518717 | 79 | 0.587568 | 682 | 6,081 | 5.093842 | 0.256598 | 0.037997 | 0.018998 | 0.017271 | 0.107369 | 0.090674 | 0.048071 | 0.035406 | 0.035406 | 0.035406 | 0 | 0.002831 | 0.302911 | 6,081 | 186 | 80 | 32.693548 | 0.816702 | 0.233021 | 0 | 0.132743 | 0 | 0 | 0.04698 | 0 | 0 | 0 | 0 | 0 | 0.00885 | 1 | 0.230089 | false | 0.00885 | 0.00885 | 0.053097 | 0.389381 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 |
0
| 1 |
86207cdb07326bc532b6b5b79d11a692b3f498c4
| 1,696 |
py
|
Python
|
test/test_all_contacts.py
|
Sergggio/python_training
|
6dfdbed9a503cf9a6810b31c57bdde76b15e4ec4
|
[
"Apache-2.0"
] | null | null | null |
test/test_all_contacts.py
|
Sergggio/python_training
|
6dfdbed9a503cf9a6810b31c57bdde76b15e4ec4
|
[
"Apache-2.0"
] | null | null | null |
test/test_all_contacts.py
|
Sergggio/python_training
|
6dfdbed9a503cf9a6810b31c57bdde76b15e4ec4
|
[
"Apache-2.0"
] | null | null | null |
import re
from model.contact import Contact
def test_all_contacts(app, db):
contacts_from_db = db.get_contact_list()
phone_list_from_db = db.phones_from_db()
#email_liset_from_db = db.emails_from_db()
phone_list = []
for phone in phone_list_from_db:
phone_list.append(merge_phones_like_on_home_page(phone))
email_list = []
#for email in email_liset_from_db:
# email_list.append(merge_mail_like_on_home_page(email))
contacts_from_home_page = sorted(app.contact.get_contact_list(), key=Contact.id_or_max)
phones_from_home_page = [con.all_phones_from_home_page for con in contacts_from_home_page]
#emails_from_home_page = [con.all_mail_from_home_page for con in contacts_from_home_page]
assert phone_list == phones_from_home_page
#assert email_list == emails_from_home_page
assert contacts_from_db == contacts_from_home_page
def clear(s):
return re.sub("[() -]", "", s)
def remove_spaces(s):
return re.sub(' +', ' ', s).rstrip()
def merge_phones_like_on_home_page(contact):
return "\n".join(filter(lambda x: x != "",
map(lambda x: clear(x),
filter(lambda x: x is not None,
[contact.home_phone, contact.mobile_phone,
contact.work_phone, contact.secondary_phone]))))
def merge_email_like_on_home_page(contact):
return "\n".join(filter(lambda x: x != "",
map(lambda x: remove_spaces(x),
filter(lambda x: x is not None,
[contact.email, contact.email2, contact.email3]))))
| 38.545455 | 94 | 0.635024 | 234 | 1,696 | 4.209402 | 0.235043 | 0.113706 | 0.121827 | 0.056853 | 0.35736 | 0.294416 | 0.257868 | 0.257868 | 0.257868 | 0.194924 | 0 | 0.001608 | 0.266509 | 1,696 | 43 | 95 | 39.44186 | 0.790193 | 0.154481 | 0 | 0.142857 | 0 | 0 | 0.009097 | 0 | 0 | 0 | 0 | 0 | 0.071429 | 1 | 0.178571 | false | 0 | 0.071429 | 0.142857 | 0.392857 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 |
0
| 1 |
862dc531f725b524bb6846cb090205fc7468f382
| 1,166 |
py
|
Python
|
src/backup/template/PositionalArgumentTemplate.py
|
ytyaru0/Python.TemplateFileMaker.20180314204216
|
4849f982acea5d86b711c5dec4cc046016ab1031
|
[
"CC0-1.0"
] | null | null | null |
src/backup/template/PositionalArgumentTemplate.py
|
ytyaru0/Python.TemplateFileMaker.20180314204216
|
4849f982acea5d86b711c5dec4cc046016ab1031
|
[
"CC0-1.0"
] | null | null | null |
src/backup/template/PositionalArgumentTemplate.py
|
ytyaru0/Python.TemplateFileMaker.20180314204216
|
4849f982acea5d86b711c5dec4cc046016ab1031
|
[
"CC0-1.0"
] | null | null | null |
from string import Template
import re
class PositionalArgumentTemplate(Template):
# (?i): 大文字小文字を区別しないモードを開始する
# (?-i): 大文字小文字を区別しないモードを無効にする
idpattern_default = Template.idpattern # (?-i:[_a-zA-Z][_a-zA-Z0-9]*)
idpattern = '([0-9]+)'
def find_place_holders(self, template:str):
#for m in re.findall(self.pattern, template):
#for m in re.finditer(self.pattern, template):
for m in self.pattern.finditer(template):
print(m, type(m))
#print(dir(m))
#print(len(m.groups()))
print(m[0])
#print(m.groups())
#print(m, m.groups(), m.group('named'), type(m))
#print(m.group('escaped'))
#print(m.group('named'))
#print(m.group('braced'))
#print(m.group('invalid'))
if __name__ == '__main__':
template_str = '${0} is Aug.'
t = PositionalArgumentTemplate(template_str)
print(template_str)
print(dir(t))
print(t.delimiter)
print(t.idpattern)
print(type(t.idpattern))
print(t.flags)
print(t.pattern)
print(t.substitute(**{'0':'V'}))
t.find_place_holders(template_str)
| 31.513514 | 73 | 0.587479 | 145 | 1,166 | 4.593103 | 0.344828 | 0.072072 | 0.066066 | 0.024024 | 0.075075 | 0.075075 | 0 | 0 | 0 | 0 | 0 | 0.007946 | 0.244425 | 1,166 | 36 | 74 | 32.388889 | 0.748014 | 0.316467 | 0 | 0 | 0 | 0 | 0.038314 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.047619 | false | 0 | 0.095238 | 0 | 0.285714 | 0.47619 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
0
| 1 |
862e0a0793ac26ff1693be29a952ce4f785121be
| 1,020 |
py
|
Python
|
cla-backend/cla/tests/unit/test_company.py
|
kdhaigud/easycla
|
f913f8dbf658acf4711b601f9312ca5663a4efe8
|
[
"Apache-2.0",
"CC-BY-4.0",
"MIT"
] | null | null | null |
cla-backend/cla/tests/unit/test_company.py
|
kdhaigud/easycla
|
f913f8dbf658acf4711b601f9312ca5663a4efe8
|
[
"Apache-2.0",
"CC-BY-4.0",
"MIT"
] | null | null | null |
cla-backend/cla/tests/unit/test_company.py
|
kdhaigud/easycla
|
f913f8dbf658acf4711b601f9312ca5663a4efe8
|
[
"Apache-2.0",
"CC-BY-4.0",
"MIT"
] | null | null | null |
# Copyright The Linux Foundation and each contributor to CommunityBridge.
# SPDX-License-Identifier: MIT
import json
import os
import requests
import uuid
import hug
import pytest
from falcon import HTTP_200, HTTP_409
import cla
from cla import routes
ID_TOKEN = os.environ.get('ID_TOKEN')
API_URL = os.environ.get('API_URL')
def test_create_company_duplicate():
"""
Test creating duplicate company names
"""
import pdb;pdb.set_trace()
url = f'{API_URL}/v1/company'
company_name = 'test_company_name'
data = {
'company_id' : uuid.uuid4() ,
'company_name' : company_name ,
}
headers = {
'Authorization' : f'Bearer {ID_TOKEN}'
}
response = requests.post(url, data=data, headers=headers)
assert response.status == HTTP_200
# add duplicate company
data = {
'company_id' : uuid.uuid4(),
'company_name' : company_name
}
req = hug.test.post(routes, url, data=data, headers=headers)
assert req.status == HTTP_409
| 23.72093 | 73 | 0.673529 | 133 | 1,020 | 4.992481 | 0.428571 | 0.099398 | 0.036145 | 0.051205 | 0.225904 | 0.225904 | 0.13253 | 0.13253 | 0.13253 | 0 | 0 | 0.018916 | 0.222549 | 1,020 | 42 | 74 | 24.285714 | 0.818411 | 0.157843 | 0 | 0.133333 | 0 | 0 | 0.15 | 0 | 0 | 0 | 0 | 0 | 0.066667 | 1 | 0.033333 | false | 0 | 0.333333 | 0 | 0.366667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 |
0
| 1 |
86346fa63b7971b7ad956846f8bc8dcc94175283
| 2,679 |
py
|
Python
|
server/cauth/views.py
|
mashaka/TravelHelper
|
8a216dd13c253e138f241187dee46e6e53281a7b
|
[
"MIT"
] | null | null | null |
server/cauth/views.py
|
mashaka/TravelHelper
|
8a216dd13c253e138f241187dee46e6e53281a7b
|
[
"MIT"
] | 3 |
2020-02-11T23:38:20.000Z
|
2021-06-10T19:10:53.000Z
|
server/cauth/views.py
|
mashaka/TravelHelper
|
8a216dd13c253e138f241187dee46e6e53281a7b
|
[
"MIT"
] | 1 |
2018-09-19T11:19:48.000Z
|
2018-09-19T11:19:48.000Z
|
from django.shortcuts import render
from django.contrib.auth.decorators import login_required
from django.contrib.auth.forms import AdminPasswordChangeForm, PasswordChangeForm, UserCreationForm
from django.contrib.auth import update_session_auth_hash, login, authenticate
from django.contrib import messages
from django.shortcuts import render, redirect
from social_django.models import UserSocialAuth
from django.http import HttpResponse
from django.shortcuts import get_object_or_404, redirect
from rest_framework.authtoken.models import Token
from app.methods import prepare_user
def get_token(request):
if request.user:
user = request.user
prepare_user(user)
token,_ = Token.objects.get_or_create(user=user)
url = "travel://?token=" + token.key + '&id=' + str(user.id)
else:
url = "travel://error"
response = HttpResponse(url, status=302)
response['Location'] = url
return response
@login_required
def get_facebook_token(request):
q = get_object_or_404(UserSocialAuth, user=request.user, provider='facebook')
return HttpResponse(str(q.extra_data))
def signup(request):
return render(request, 'signup.html')
@login_required
def home(request):
return render(request, 'home.html')
@login_required
def settings(request):
user = request.user
try:
github_login = user.social_auth.get(provider='github')
except UserSocialAuth.DoesNotExist:
github_login = None
try:
twitter_login = user.social_auth.get(provider='twitter')
except UserSocialAuth.DoesNotExist:
twitter_login = None
try:
facebook_login = user.social_auth.get(provider='facebook')
except UserSocialAuth.DoesNotExist:
facebook_login = None
can_disconnect = (user.social_auth.count() > 1 or user.has_usable_password())
return render(request, 'settings.html', {
'facebook_login': facebook_login,
'can_disconnect': can_disconnect
})
@login_required
def password(request):
if request.user.has_usable_password():
PasswordForm = PasswordChangeForm
else:
PasswordForm = AdminPasswordChangeForm
if request.method == 'POST':
form = PasswordForm(request.user, request.POST)
if form.is_valid():
form.save()
update_session_auth_hash(request, form.user)
messages.success(request, 'Your password was successfully updated!')
return redirect('password')
else:
messages.error(request, 'Please correct the error below.')
else:
form = PasswordForm(request.user)
return render(request, 'password.html', {'form': form})
| 31.892857 | 99 | 0.709966 | 312 | 2,679 | 5.942308 | 0.294872 | 0.04315 | 0.036677 | 0.040453 | 0.081985 | 0.048544 | 0 | 0 | 0 | 0 | 0 | 0.00464 | 0.195595 | 2,679 | 83 | 100 | 32.277108 | 0.855684 | 0 | 0 | 0.231884 | 0 | 0 | 0.086226 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.086957 | false | 0.15942 | 0.15942 | 0.028986 | 0.347826 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 |
0
| 1 |
863b23444fda9cb581afbddd6338c59075cfc887
| 1,793 |
py
|
Python
|
tests/test_responder.py
|
craigderington/responder-persons-api
|
d2270d2f761c5dd3dbe253113d410f3e37d4d217
|
[
"Apache-2.0"
] | null | null | null |
tests/test_responder.py
|
craigderington/responder-persons-api
|
d2270d2f761c5dd3dbe253113d410f3e37d4d217
|
[
"Apache-2.0"
] | null | null | null |
tests/test_responder.py
|
craigderington/responder-persons-api
|
d2270d2f761c5dd3dbe253113d410f3e37d4d217
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
import pytest
import app as service
import yaml
import responder
from starlette.responses import PlainTextResponse
@pytest.fixture
def api():
return service.api
def test_hello_world(api):
r = api.requests.get("/api/v1.0/index")
assert r.text == "Hello, World!"
def test_basic_route(api):
@api.route("/api/v1.0/index")
def index(req, resp):
resp.text = "Hello, World!"
def test_requests_session(api):
assert api.session()
assert api.requests
def test_json_media(api):
dump = {"life": 42}
@api.route("/")
def media(req, resp):
resp.media = dump
r = api.requests.get("http://;/")
assert "json" in r.headers["Content-Type"]
assert r.json() == dump
def test_yaml_media(api):
dump = {"life": 42}
@api.route("/")
def media(req, resp):
resp.media = dump
r = api.requests.get("http://;/", headers={"Accept": "yaml"})
assert "yaml" in r.headers["Content-Type"]
assert yaml.load(r.content) == dump
def test_background(api):
@api.route("/")
def route(req, resp):
@api.background.task
def task():
import time
time.sleep(3)
task()
api.text = "ok"
r = api.requests.get(api.url_for(route))
assert r.ok
def test_500_error(api):
def catcher(req, exc):
return PlainTextResponse("Suppressed error", 500)
api.app.add_exception_handler(ValueError, catcher)
@api.route("/api/v1.0/index")
def view(req, resp):
raise ValueError
r = api.requests.get(api.url_for(view))
assert not r.ok
assert r.content == b'Suppressed error'
def test_404_error(api):
r = api.requests.get("/api/v1.0/foo")
assert r.status_code == responder.API.status_codes.HTTP_404
| 19.703297 | 65 | 0.621305 | 253 | 1,793 | 4.316206 | 0.284585 | 0.051282 | 0.065934 | 0.082418 | 0.349817 | 0.311355 | 0.261905 | 0.177656 | 0.1337 | 0.1337 | 0 | 0.018813 | 0.229225 | 1,793 | 90 | 66 | 19.922222 | 0.771346 | 0.00725 | 0 | 0.189655 | 0 | 0 | 0.106299 | 0 | 0 | 0 | 0 | 0 | 0.189655 | 1 | 0.275862 | false | 0 | 0.103448 | 0.034483 | 0.413793 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 |
0
| 1 |
8648090bbe37bd69072d860284239d2be5f5a913
| 469 |
py
|
Python
|
code-wars/010.moving-zeros-to-the-end.py
|
code-knayam/DataStructureAlgorithms
|
8425911633d4d343c58798a123175289ed0df1fe
|
[
"MIT"
] | null | null | null |
code-wars/010.moving-zeros-to-the-end.py
|
code-knayam/DataStructureAlgorithms
|
8425911633d4d343c58798a123175289ed0df1fe
|
[
"MIT"
] | null | null | null |
code-wars/010.moving-zeros-to-the-end.py
|
code-knayam/DataStructureAlgorithms
|
8425911633d4d343c58798a123175289ed0df1fe
|
[
"MIT"
] | null | null | null |
# Write an algorithm that takes an array and moves all of the zeros to the end, preserving the order of the other elements.
def move_zeros(array):
#your code here
new_array = []
new_index = 0
while len(array) > 0:
item = array.pop(0)
if item == 0 and not type(item) == bool :
new_array.append(item)
else:
new_array.insert(new_index, item)
new_index = new_index + 1
return new_array
| 31.266667 | 123 | 0.603412 | 70 | 469 | 3.914286 | 0.557143 | 0.116788 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.015674 | 0.319829 | 469 | 15 | 124 | 31.266667 | 0.84326 | 0.287846 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.066667 | 0 | 1 | 0.090909 | false | 0 | 0 | 0 | 0.181818 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
0
| 1 |
864a44b2fa4b1d6dbb15ace15ef151c81922788f
| 928 |
py
|
Python
|
__main__.py
|
miezebieze/scott-launcher
|
a03597d0883af075128d1ea4ea53e7b5132807b1
|
[
"MIT"
] | 1 |
2020-06-12T20:49:47.000Z
|
2020-06-12T20:49:47.000Z
|
__main__.py
|
miezebieze/scott-launcher
|
a03597d0883af075128d1ea4ea53e7b5132807b1
|
[
"MIT"
] | null | null | null |
__main__.py
|
miezebieze/scott-launcher
|
a03597d0883af075128d1ea4ea53e7b5132807b1
|
[
"MIT"
] | null | null | null |
from enum import Enum
from window import Window
D = Enum ('Directions','N NE E SE S SW W NW')
selector_map = {
D.NW: [0.5,0.5], D.N: [1.5,0], D.NE: [2.5,0.5],
D.W: [0,1.5], D.E: [3,1.5],
D.SW: [0.5,2.5], D.S: [1.5,3], D.SE: [2.5,2.5],
}
selector_size = 100
window_size = selector_size*4
window = Window (window_size,window_size,selector_map,selector_size,selector_size)
# set actions here
from functools import partial
def say (something):
print (''.join (('Me: "',something,'"')))
window.actions[D.NW] = partial (say,'northwast')
window.actions[D.N] = partial (say,'north')
window.actions[D.NE] = partial (say,'neorthest')
window.actions[D.W] = partial (say,'western')
window.actions[D.E] = partial (say,'easy')
window.actions[D.SW] = partial (say,'suess whest')
window.actions[D.S] = partial (say,'sissy')
window.actions[D.SE] = partial (say,'seoul')
window.go ()
| 29 | 82 | 0.626078 | 158 | 928 | 3.620253 | 0.28481 | 0.181818 | 0.195804 | 0.013986 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.041721 | 0.173491 | 928 | 31 | 83 | 29.935484 | 0.704042 | 0.017241 | 0 | 0 | 0 | 0 | 0.098901 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.043478 | false | 0 | 0.130435 | 0 | 0.173913 | 0.043478 | 0 | 0 | 0 | null | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
0
| 1 |
865535238f10c51c669114bcf29b3699dd34b1e8
| 559 |
py
|
Python
|
examples/django/hello_world/wsgi.py
|
liuyu81/SnapSearch-Client-Python
|
41857806c2b26f0537de2dcc23a145107a4ecd04
|
[
"MIT"
] | null | null | null |
examples/django/hello_world/wsgi.py
|
liuyu81/SnapSearch-Client-Python
|
41857806c2b26f0537de2dcc23a145107a4ecd04
|
[
"MIT"
] | null | null | null |
examples/django/hello_world/wsgi.py
|
liuyu81/SnapSearch-Client-Python
|
41857806c2b26f0537de2dcc23a145107a4ecd04
|
[
"MIT"
] | 1 |
2018-03-04T20:24:14.000Z
|
2018-03-04T20:24:14.000Z
|
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "hello_world.settings")
# django WSGI application
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# load SnapSearch API credentials
api_email = "<email>"
api_key = "<key>"
# initialize the interceptor
from SnapSearch import Client, Detector, Interceptor
interceptor = Interceptor(Client(api_email, api_key), Detector())
# deploy the interceptor
from SnapSearch.wsgi import InterceptorMiddleware
application = InterceptorMiddleware(application, interceptor)
| 27.95 | 71 | 0.815742 | 65 | 559 | 6.846154 | 0.415385 | 0.101124 | 0.080899 | 0.125843 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.103757 | 559 | 19 | 72 | 29.421053 | 0.888224 | 0.187835 | 0 | 0 | 0 | 0 | 0.120267 | 0.048998 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.4 | 0 | 0.4 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 |
0
| 1 |
865b44ebd78e20ddd28ec532ea20204eaa6a07dc
| 848 |
py
|
Python
|
examples/run_merger.py
|
needlehaystack/needlestack
|
e00529a2a7c2d85059936a85f54dfb55e515b6ef
|
[
"Apache-2.0"
] | 3 |
2019-10-03T22:15:21.000Z
|
2022-02-08T09:05:41.000Z
|
examples/run_merger.py
|
cungtv/needlestack
|
e00529a2a7c2d85059936a85f54dfb55e515b6ef
|
[
"Apache-2.0"
] | 1 |
2021-04-30T21:08:47.000Z
|
2021-04-30T21:08:47.000Z
|
examples/run_merger.py
|
cungtv/needlestack
|
e00529a2a7c2d85059936a85f54dfb55e515b6ef
|
[
"Apache-2.0"
] | 2 |
2019-08-02T19:13:09.000Z
|
2019-10-25T01:47:17.000Z
|
import logging
from grpc_health.v1 import health_pb2, health_pb2_grpc
from grpc_health.v1.health import HealthServicer
from needlestack.apis import servicers_pb2_grpc
from needlestack.servicers import factory
from needlestack.servicers.merger import MergerServicer
from examples import configs
logging.getLogger("kazoo").setLevel("WARN")
def main():
config = configs.LocalDockerConfig()
server = factory.create_server(config)
manager = factory.create_zookeeper_cluster_manager(config)
manager.startup()
servicers_pb2_grpc.add_MergerServicer_to_server(MergerServicer(config, manager), server)
health = HealthServicer()
health_pb2_grpc.add_HealthServicer_to_server(health, server)
health.set("Merger", health_pb2.HealthCheckResponse.SERVING)
factory.serve(server)
if __name__ == "__main__":
main()
| 25.69697 | 92 | 0.792453 | 101 | 848 | 6.356436 | 0.376238 | 0.056075 | 0.043614 | 0.049844 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.010811 | 0.127358 | 848 | 32 | 93 | 26.5 | 0.856757 | 0 | 0 | 0 | 0 | 0 | 0.027123 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.05 | false | 0 | 0.35 | 0 | 0.4 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 |
0
| 1 |
865b48e5b6d60c2c5b81fb4b0a827e80f5502ece
| 4,482 |
py
|
Python
|
engine_wrapper.py
|
lidevelopers/Lishogi-Bot-1
|
5e669870930fe497e323324f36ccdbf5b04d26d3
|
[
"MIT"
] | null | null | null |
engine_wrapper.py
|
lidevelopers/Lishogi-Bot-1
|
5e669870930fe497e323324f36ccdbf5b04d26d3
|
[
"MIT"
] | 2 |
2021-06-28T11:09:19.000Z
|
2021-06-30T16:59:13.000Z
|
engine_wrapper.py
|
lidevelopers/Lishogi-Bot-1
|
5e669870930fe497e323324f36ccdbf5b04d26d3
|
[
"MIT"
] | 9 |
2021-06-28T08:06:08.000Z
|
2021-10-06T05:01:57.000Z
|
import os
import shogi
import backoff
import subprocess
from util import *
import logging
logger = logging.getLogger(__name__)
import engine_ctrl
@backoff.on_exception(backoff.expo, BaseException, max_time=120)
def create_engine(config, board):
cfg = config["engine"]
engine_path = os.path.realpath(os.path.join(cfg["dir"], cfg["name"]))
engine_type = cfg.get("protocol")
engine_options = cfg.get("engine_options")
commands = [engine_path]
if engine_options:
for k, v in engine_options.items():
commands.append("--{}={}".format(k, v))
silence_stderr = cfg.get("silence_stderr", False)
return USIEngine(board, commands, cfg.get("usi_options", {}), cfg.get("go_commands", {}), silence_stderr)
class EngineWrapper:
def __init__(self, board, commands, options=None, silence_stderr=False):
pass
def search_for(self, board, movetime):
pass
def first_search(self, board, movetime):
pass
def search(self, game, board, btime, wtime, binc, winc):
pass
def print_stats(self):
pass
def get_opponent_info(self, game):
pass
def name(self):
return self.engine.name
def report_game_result(self, game, board):
pass
def quit(self):
self.engine.kill_process()
def print_handler_stats(self):
pass
def get_handler_stats(self):
pass
class USIEngine(EngineWrapper):
def __init__(self, board, commands, options, go_commands={}, silence_stderr=False):
commands = commands[0] if len(commands) == 1 else commands
self.go_commands = go_commands
self.engine = engine_ctrl.Engine(commands)
self.engine.usi()
if options:
for name, value in options.items():
self.engine.setoption(name, value)
self.engine.isready()
def first_search(self, board, movetime):
best_move, _ = self.engine.go(board.sfen(), "", movetime=movetime)
return best_move
def search_with_ponder(self, game, board, btime, wtime, binc, winc, byo, ponder=False):
moves = [m.usi() for m in list(board.move_stack)]
cmds = self.go_commands
if len(cmds) > 0:
best_move, ponder_move = self.engine.go(
game.initial_fen,
moves,
nodes=cmds.get("nodes"),
depth=cmds.get("depth"),
movetime=cmds.get("movetime"),
ponder=ponder
)
else:
best_move, ponder_move = self.engine.go(
game.initial_fen,
moves,
btime=btime,
wtime=wtime,
binc=binc,
winc=winc,
byo=byo,
ponder=ponder
)
return (best_move, ponder_move)
def search(self, game, board, btime, wtime, binc, winc):
cmds = self.go_commands
moves = [m.usi() for m in list(board.move_stack)]
best_move, _ = self.engine.go(
game.initial_fen,
moves,
btime=btime,
wtime=wtime,
binc=binc,
winc=winc,
depth=cmds.get("depth"),
nodes=cmds.get("nodes"),
movetime=cmds.get("movetime")
)
return best_move
def stop(self):
self.engine.kill_process()
def print_stats(self, stats=None):
if stats is None:
stats = ['score', 'depth', 'nodes', 'nps']
info = self.engine.info
for stat in stats:
if stat in info:
logger.info("{}: {}".format(stat, info[stat]))
def get_stats(self, stats=None):
if stats is None:
stats = ['score', 'depth', 'nodes', 'nps']
info = self.engine.info
stats_str = []
for stat in stats:
if stat in info:
stats_str.append("{}: {}".format(stat, info[stat]))
return stats_str
def get_opponent_info(self, game):
name = game.opponent.name
if name:
rating = game.opponent.rating if game.opponent.rating is not None else "none"
title = game.opponent.title if game.opponent.title else "none"
player_type = "computer" if title == "BOT" else "human"
def report_game_result(self, game, board):
self.engine.protocol._position(board)
| 29.486842 | 109 | 0.566934 | 528 | 4,482 | 4.660985 | 0.206439 | 0.056887 | 0.026412 | 0.026006 | 0.422999 | 0.369768 | 0.323446 | 0.222267 | 0.201138 | 0.168631 | 0 | 0.00197 | 0.320616 | 4,482 | 151 | 110 | 29.682119 | 0.80624 | 0 | 0 | 0.471074 | 0 | 0 | 0.041499 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.173554 | false | 0.07438 | 0.057851 | 0.008264 | 0.297521 | 0.024793 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 |
0
| 1 |
865e9017c35669feb5f2b679820ab813bc9d8b73
| 533 |
py
|
Python
|
scene_action2.py
|
encela95dus/ios_pythonista_examples
|
e136cdcb05126f0f9b9f6fb6365870876b419619
|
[
"MIT"
] | 36 |
2019-01-12T04:17:49.000Z
|
2022-03-31T05:33:29.000Z
|
scene_action2.py
|
Backup-eric645/ios_pythonista_examples
|
e136cdcb05126f0f9b9f6fb6365870876b419619
|
[
"MIT"
] | null | null | null |
scene_action2.py
|
Backup-eric645/ios_pythonista_examples
|
e136cdcb05126f0f9b9f6fb6365870876b419619
|
[
"MIT"
] | 15 |
2018-12-30T21:18:05.000Z
|
2022-01-30T13:17:07.000Z
|
import scene
class MyScene(scene.Scene):
def setup(self):
self.label_node = scene.LabelNode('A',
position=(100,400), parent=self)
self.start_flag = False
def update(self):
if self.start_flag:
x,y = self.label_node.position
if x < 340:
self.label_node.position = (x+2, y)
else:
self.start_flag = False
def touch_ended(self, touch):
self.start_flag = True
scene.run(MyScene())
| 25.380952 | 52 | 0.525328 | 64 | 533 | 4.25 | 0.453125 | 0.132353 | 0.191176 | 0.132353 | 0.154412 | 0 | 0 | 0 | 0 | 0 | 0 | 0.029851 | 0.371482 | 533 | 20 | 53 | 26.65 | 0.78209 | 0 | 0 | 0.125 | 0 | 0 | 0.001876 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1875 | false | 0 | 0.0625 | 0 | 0.3125 | 0 | 0 | 0 | 0 | null | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
0
| 1 |
865f59e775e337c6b42c37791b8b1b83a1c4fa34
| 2,522 |
py
|
Python
|
pysh/bash_vm/shell_command.py
|
JordanKoeller/Pysch
|
6775db00e6d551328ce49a50a5987223a9e9a9c3
|
[
"MIT"
] | null | null | null |
pysh/bash_vm/shell_command.py
|
JordanKoeller/Pysch
|
6775db00e6d551328ce49a50a5987223a9e9a9c3
|
[
"MIT"
] | null | null | null |
pysh/bash_vm/shell_command.py
|
JordanKoeller/Pysch
|
6775db00e6d551328ce49a50a5987223a9e9a9c3
|
[
"MIT"
] | null | null | null |
from __future__ import annotations
import subprocess
import os
from typing import List, Dict, Iterator, Optional, Tuple
class ShellCommand:
def __init__(self, cmd: str):
self.run_args = [
"bash", "-c", f'{cmd}'
]
# self.run_args: List[str] = [executable, *args]
def exec(self, **extra_environ: str) -> ShellCommandOutput:
result = subprocess.run(self.run_args,
stdout=subprocess.PIPE,
env={
**os.environ,
**(extra_environ if extra_environ else {})
}
)
print("Finished shell command")
return ShellCommandOutput(str(result.stdout, 'utf-8'), result.returncode)
class ShellCommandOutput:
def __init__(self, output_body: str, code: int):
self._code = code
self._value = output_body
@property
def succeeded(self) -> bool:
return self._code == 0
@property
def code(self) -> int:
return self._code
@property
def value(self) -> str:
return self._value
def lines(self) -> List[ShellCommandOutput]:
return [
ShellCommandOutput(substr, self.code)
for substr in self.value.splitlines()
if substr
]
def __iter__(self) -> Iterator[str]:
return iter(self._split_tokens())
def __str__(self) -> str:
return f'<STDOUT value={self.value} code={self.code} >'
def _split_tokens(self) -> List[str]:
ret = []
in_quotes = None
accumulator: List[str] = []
for char in self.value:
if _whitespace(char) and not in_quotes and accumulator:
ret.append(''.join(accumulator))
accumulator = []
elif in_quotes == None and _quotes(char):
in_quotes = char
elif in_quotes and in_quotes == char:
in_quotes = None
if accumulator:
ret.append(''.join(accumulator))
accumulator = []
elif in_quotes and _quotes(char):
raise ValueError(
f"Found unmatched quote characters in string {self.value}")
else:
accumulator.append(char)
return ret
def _quotes(c: str) -> bool:
return c in ['"', "'"]
def _whitespace(c: str) -> bool:
return str.isspace(c)
| 28.022222 | 81 | 0.527756 | 257 | 2,522 | 4.984436 | 0.307393 | 0.049961 | 0.025761 | 0.037471 | 0.090554 | 0.090554 | 0.090554 | 0.090554 | 0.090554 | 0 | 0 | 0.001261 | 0.371134 | 2,522 | 89 | 82 | 28.337079 | 0.806431 | 0.018239 | 0 | 0.134328 | 0 | 0 | 0.056589 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.179104 | false | 0 | 0.059701 | 0.119403 | 0.41791 | 0.014925 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 |
0
| 1 |
866017a177effed366d9a7810ad090cd23a963da
| 1,163 |
py
|
Python
|
ROS_packages/custom_ROS_envs/turtlebot2_maze_env/src/turtlebot2_maze_random.py
|
PierreExeter/custom_gym_envs
|
2b6a1c16a4198c8d9fa64f10fe09a041826ac81a
|
[
"MIT"
] | 1 |
2020-09-25T01:51:58.000Z
|
2020-09-25T01:51:58.000Z
|
ROS_packages/custom_ROS_envs/turtlebot2_maze_env/src/turtlebot2_maze_random.py
|
PierreExeter/custom_gym_envs
|
2b6a1c16a4198c8d9fa64f10fe09a041826ac81a
|
[
"MIT"
] | null | null | null |
ROS_packages/custom_ROS_envs/turtlebot2_maze_env/src/turtlebot2_maze_random.py
|
PierreExeter/custom_gym_envs
|
2b6a1c16a4198c8d9fa64f10fe09a041826ac81a
|
[
"MIT"
] | 1 |
2021-07-16T02:55:59.000Z
|
2021-07-16T02:55:59.000Z
|
#!/usr/bin/env python
import gym
import rospy
from openai_ros.openai_ros_common import StartOpenAI_ROS_Environment
# initialise environment
rospy.init_node('turtlebot2_maze_random', anonymous=True, log_level=rospy.WARN)
task_and_robot_environment_name = rospy.get_param('/turtlebot2/task_and_robot_environment_name')
env = StartOpenAI_ROS_Environment(task_and_robot_environment_name)
print("Environment: ", env)
print("Action space: ", env.action_space)
# print(env.action_space.high)
# print(env.action_space.low)
print("Observation space: ", env.observation_space)
print(env.observation_space.high)
print(env.observation_space.low)
for episode in range(20):
env.reset()
for t in range(100):
action = env.action_space.sample()
obs, reward, done, info = env.step(action)
print("episode: ", episode)
print("timestep: ", t)
print("obs: ", obs)
print("action:", action)
print("reward: ", reward)
print("done: ", done)
print("info: ", info)
if done:
print("Episode {} finished after {} timesteps".format(episode, t+1))
break
env.close()
| 27.690476 | 96 | 0.687016 | 149 | 1,163 | 5.154362 | 0.395973 | 0.071615 | 0.072917 | 0.089844 | 0.105469 | 0 | 0 | 0 | 0 | 0 | 0 | 0.008466 | 0.187446 | 1,163 | 42 | 97 | 27.690476 | 0.804233 | 0.085985 | 0 | 0 | 0 | 0 | 0.188679 | 0.061321 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.111111 | 0 | 0.111111 | 0.481481 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
0
| 1 |
866731500bf9de7d963d33a61b133cfd0fb18eda
| 1,727 |
py
|
Python
|
examples/src/python/join_streamlet_topology.py
|
aaronstjohn/incubator-heron
|
bdc35f8d23296472983956a477ea38da54d16b2b
|
[
"Apache-2.0"
] | 2 |
2016-07-04T07:10:31.000Z
|
2018-03-28T16:59:02.000Z
|
examples/src/python/join_streamlet_topology.py
|
aaronstjohn/incubator-heron
|
bdc35f8d23296472983956a477ea38da54d16b2b
|
[
"Apache-2.0"
] | 1 |
2019-05-08T22:30:16.000Z
|
2019-05-08T22:30:16.000Z
|
examples/src/python/join_streamlet_topology.py
|
aaronstjohn/incubator-heron
|
bdc35f8d23296472983956a477ea38da54d16b2b
|
[
"Apache-2.0"
] | 1 |
2017-06-05T17:55:45.000Z
|
2017-06-05T17:55:45.000Z
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
'''join_streamlet_topology.py: module is an example of how to use the join operator'''
import sys
from heronpy.streamlet.builder import Builder
from heronpy.streamlet.runner import Runner
from heronpy.streamlet.config import Config
from heronpy.streamlet.windowconfig import WindowConfig
from heronpy.connectors.mock.arraylooper import ArrayLooper
# pylint: disable=superfluous-parens
if __name__ == '__main__':
if len(sys.argv) != 2:
print("Topology's name is not specified")
sys.exit(1)
builder = Builder()
source_1 = builder.new_source(ArrayLooper([["key1", "a"], ["key1", "b"]], sleep=1))
source_2 = builder.new_source(ArrayLooper([["key1", "c"], ["key1", "d"]], sleep=1))
source_1.join(source_2, WindowConfig.create_sliding_window(2, 1), lambda x, y: x + y).log()
runner = Runner()
config = Config()
runner.run(sys.argv[1], config, builder)
| 35.979167 | 93 | 0.735379 | 251 | 1,727 | 4.988048 | 0.505976 | 0.047923 | 0.063898 | 0.025559 | 0.049521 | 0 | 0 | 0 | 0 | 0 | 0 | 0.013774 | 0.159236 | 1,727 | 47 | 94 | 36.744681 | 0.848485 | 0.536769 | 0 | 0 | 0 | 0 | 0.07732 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.352941 | 0 | 0.352941 | 0.058824 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 |
0
| 1 |
86695a9d77f6427fc910ad0a37e4e6d95359ee20
| 1,918 |
py
|
Python
|
tests/test.py
|
Nekmo/spice
|
717a2cc24ad969e1caec2aabeffc30a796c6ec91
|
[
"MIT"
] | null | null | null |
tests/test.py
|
Nekmo/spice
|
717a2cc24ad969e1caec2aabeffc30a796c6ec91
|
[
"MIT"
] | null | null | null |
tests/test.py
|
Nekmo/spice
|
717a2cc24ad969e1caec2aabeffc30a796c6ec91
|
[
"MIT"
] | null | null | null |
from bs4 import BeautifulSoup
import requests
import sys, os
from time import sleep
sys.path.insert(0, '/home/may/Dropbox/Programming/spice/')
import spice_api as spice
def main():
creds = spice.load_auth_from_file('auth')
print(creds)
results = spice.search('Re:Zero Kara Hajimeru Isekai Seikatsu', spice.get_medium('anime'), creds)
print(results[0].title)
souma = spice.search_id(1, spice.get_medium('manga'), creds)
print(souma.raw_data)
print(souma.title)
print(souma.chapters)
print(souma.volumes)
re_zero_data = spice.get_blank(spice.get_medium('anime'))
re_zero_data.episodes = 0
re_zero_data.status = spice.get_status('reading')
re_zero_data.score = 8
re_zero_data.tags = ['this the first time a show that made me cringe']
shokugeki_data = spice.get_blank(spice.get_medium('manga'))
shokugeki_data.chapters = 13
shokugeki_data.volumes = 1
shokugeki_data.status = 1
shokugeki_data.score = 8
spice.update(shokugeki_data, 45757, spice.get_medium('manga'), creds)
anime_list = spice.get_list(spice.get_medium('ANIME'), 'Utagai-', creds)
print(anime_list.avg_score())
print(anime_list.median_score())
print(anime_list.mode_score())
print(anime_list.extremes())
print(anime_list.p_stddev())
print(anime_list.p_var())
print(anime_list.get_num_status(1))
print(anime_list.get_total())
print(anime_list.get_days())
print(anime_list.exists(11734))
print(len(anime_list.get_ids()))
print(len(anime_list.get_titles()))
print(anime_list.get_status(1))
print(anime_list.get_score(10))
print(anime_list.exists_as_status(11734, 1))
print(anime_list.score_diff())
anime_list2 = spice.get_list(spice.get_medium('ANIME'), 'Pickleplatter', creds)
print("Similarity coefficient: {}".format(anime_list.compatibility(anime_list2)))
if __name__ == '__main__':
main()
| 34.25 | 101 | 0.717414 | 279 | 1,918 | 4.65233 | 0.333333 | 0.124807 | 0.151002 | 0.065485 | 0.200308 | 0.132512 | 0.095532 | 0 | 0 | 0 | 0 | 0.020258 | 0.150678 | 1,918 | 55 | 102 | 34.872727 | 0.77655 | 0 | 0 | 0 | 0 | 0 | 0.114181 | 0.01877 | 0 | 0 | 0 | 0 | 0 | 1 | 0.020833 | false | 0 | 0.104167 | 0 | 0.125 | 0.479167 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
0
| 1 |
86798d0504dd04df9298eafb92e49de14fb4653a
| 3,804 |
py
|
Python
|
cloudferry/actions/prechecks/check_vmax_prerequisites.py
|
SVilgelm/CloudFerry
|
4459c0d21ba7ccffe51176932197b352e426ba63
|
[
"Apache-2.0"
] | 6 |
2017-04-20T00:49:49.000Z
|
2020-12-20T16:27:10.000Z
|
cloudferry/actions/prechecks/check_vmax_prerequisites.py
|
SVilgelm/CloudFerry
|
4459c0d21ba7ccffe51176932197b352e426ba63
|
[
"Apache-2.0"
] | 3 |
2017-04-08T15:47:16.000Z
|
2017-05-18T17:40:59.000Z
|
cloudferry/actions/prechecks/check_vmax_prerequisites.py
|
SVilgelm/CloudFerry
|
4459c0d21ba7ccffe51176932197b352e426ba63
|
[
"Apache-2.0"
] | 8 |
2017-04-07T23:42:36.000Z
|
2021-08-10T11:05:10.000Z
|
# Copyright 2016 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import getpass
import logging
from cloudferry.lib.base import exception
from cloudferry.lib.base.action import action
from cloudferry.lib.utils import local
from cloudferry.lib.utils import remote_runner
LOG = logging.getLogger(__name__)
class CheckVMAXPrerequisites(action.Action):
"""This verifies prerequisites required for NFS to VMAX iSCSI cinder
volume migration"""
def _iscsiadm_is_installed_locally(self):
LOG.info("Checking if iscsiadm tool is installed")
try:
local.run('iscsiadm --help &>/dev/null')
except local.LocalExecutionFailed:
msg = ("iscsiadm is not available on the local host. Please "
"install iscsiadm tool on the node you running on or "
"choose other cinder backend for migration. iscsiadm is "
"mandatory for migrations with EMC VMAX cinder backend")
LOG.error(msg)
raise exception.AbortMigrationError(msg)
def _check_local_sudo_password_set(self):
current_user = getpass.getuser()
if current_user != 'root' and \
self.cfg.migrate.local_sudo_password is None:
try:
local.sudo('ls')
except local.LocalExecutionFailed:
msg = ("CloudFerry is running as '{user}' user, but "
"passwordless sudo does not seem to be configured on "
"current host. Please either specify password in "
"`local_sudo_password` config option, or run "
"CloudFerry as root user.").format(user=current_user)
LOG.error(msg)
raise exception.AbortMigrationError(msg)
def _ssh_connectivity_between_controllers(self):
src_host = self.cfg.src.ssh_host
src_user = self.cfg.src.ssh_user
dst_host = self.cfg.dst.ssh_host
dst_user = self.cfg.dst.ssh_user
LOG.info("Checking ssh connectivity between '%s' and '%s'",
src_host, dst_host)
rr = remote_runner.RemoteRunner(src_host, src_user)
ssh_opts = ('-o UserKnownHostsFile=/dev/null '
'-o StrictHostKeyChecking=no')
cmd = "ssh {opts} {user}@{host} 'echo ok'".format(opts=ssh_opts,
user=dst_user,
host=dst_host)
try:
rr.run(cmd)
except remote_runner.RemoteExecutionError:
msg = ("No ssh connectivity between source host '{src_host}' and "
"destination host '{dst_host}'. Make sure you have keys "
"and correct configuration on these nodes. To verify run "
"'{ssh_cmd}' from '{src_host}' node")
msg = msg.format(src_host=src_host, dst_host=dst_host, ssh_cmd=cmd)
LOG.error(msg)
raise exception.AbortMigrationError(msg)
def run(self, **kwargs):
if self.cfg.dst_storage.backend != 'iscsi-vmax':
return
self._iscsiadm_is_installed_locally()
self._ssh_connectivity_between_controllers()
self._check_local_sudo_password_set()
| 41.347826 | 79 | 0.624869 | 457 | 3,804 | 5.056893 | 0.380744 | 0.021203 | 0.023799 | 0.02077 | 0.168758 | 0.064907 | 0.064907 | 0.064907 | 0 | 0 | 0 | 0.002985 | 0.295478 | 3,804 | 91 | 80 | 41.802198 | 0.859328 | 0.166404 | 0 | 0.177419 | 0 | 0 | 0.268974 | 0.023182 | 0 | 0 | 0 | 0 | 0 | 1 | 0.064516 | false | 0.129032 | 0.096774 | 0 | 0.193548 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 |
0
| 1 |
8681dc9beb7ce1fcfe008337221dc6feb16aedb5
| 1,888 |
py
|
Python
|
conan/tools/env/virtualrunenv.py
|
dscole/conan
|
ff7b8e6703e8407773968517d68424b9ec59aa30
|
[
"MIT"
] | null | null | null |
conan/tools/env/virtualrunenv.py
|
dscole/conan
|
ff7b8e6703e8407773968517d68424b9ec59aa30
|
[
"MIT"
] | 1 |
2019-06-07T03:02:02.000Z
|
2019-06-07T03:02:02.000Z
|
conan/tools/env/virtualrunenv.py
|
dscole/conan
|
ff7b8e6703e8407773968517d68424b9ec59aa30
|
[
"MIT"
] | 1 |
2021-08-20T19:47:51.000Z
|
2021-08-20T19:47:51.000Z
|
from conan.tools.env import Environment
def runenv_from_cpp_info(conanfile, cpp_info):
""" return an Environment deducing the runtime information from a cpp_info
"""
dyn_runenv = Environment(conanfile)
if cpp_info is None: # This happens when the dependency is a private one = BINARY_SKIP
return dyn_runenv
if cpp_info.bin_paths: # cpp_info.exes is not defined yet
dyn_runenv.prepend_path("PATH", cpp_info.bin_paths)
# If it is a build_require this will be the build-os, otherwise it will be the host-os
if cpp_info.lib_paths:
dyn_runenv.prepend_path("LD_LIBRARY_PATH", cpp_info.lib_paths)
dyn_runenv.prepend_path("DYLD_LIBRARY_PATH", cpp_info.lib_paths)
if cpp_info.framework_paths:
dyn_runenv.prepend_path("DYLD_FRAMEWORK_PATH", cpp_info.framework_paths)
return dyn_runenv
class VirtualRunEnv:
""" captures the conanfile environment that is defined from its
dependencies, and also from profiles
"""
def __init__(self, conanfile):
self._conanfile = conanfile
def environment(self):
""" collects the runtime information from dependencies. For normal libraries should be
very occasional
"""
runenv = Environment(self._conanfile)
# FIXME: Missing profile info
# FIXME: Cache value?
host_req = self._conanfile.dependencies.host
test_req = self._conanfile.dependencies.test
for _, dep in list(host_req.items()) + list(test_req.items()):
if dep.runenv_info:
runenv.compose_env(dep.runenv_info)
runenv.compose_env(runenv_from_cpp_info(self._conanfile, dep.cpp_info))
return runenv
def generate(self, auto_activate=False):
run_env = self.environment()
if run_env:
run_env.save_script("conanrunenv", auto_activate=auto_activate)
| 37.76 | 94 | 0.697034 | 253 | 1,888 | 4.928854 | 0.351779 | 0.078589 | 0.028869 | 0.064154 | 0.158781 | 0.158781 | 0.056135 | 0.056135 | 0 | 0 | 0 | 0 | 0.227225 | 1,888 | 49 | 95 | 38.530612 | 0.854695 | 0.26589 | 0 | 0.068966 | 0 | 0 | 0.049364 | 0 | 0 | 0 | 0 | 0.020408 | 0 | 1 | 0.137931 | false | 0 | 0.034483 | 0 | 0.310345 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
0
| 1 |
868928ba8707be83d375694167add597b520a225
| 616 |
py
|
Python
|
SmartCache/sim/Utilities/setup.py
|
Cloud-PG/smart-cache
|
467987abece3fd4830fd615288046359761229f8
|
[
"Apache-2.0"
] | 1 |
2019-10-13T09:05:24.000Z
|
2019-10-13T09:05:24.000Z
|
SmartCache/sim/Utilities/setup.py
|
Cloud-PG/smart-cache
|
467987abece3fd4830fd615288046359761229f8
|
[
"Apache-2.0"
] | null | null | null |
SmartCache/sim/Utilities/setup.py
|
Cloud-PG/smart-cache
|
467987abece3fd4830fd615288046359761229f8
|
[
"Apache-2.0"
] | 1 |
2019-05-16T11:53:38.000Z
|
2019-05-16T11:53:38.000Z
|
from distutils.core import setup
setup(
name='utils',
version='1.0.0',
author='Mirco Tracolli',
author_email='mirco.tracolli@pg.infn.it',
packages=[
'utils',
],
scripts=[],
url='https://github.com/Cloud-PG/smart-cache',
license='Apache 2.0 License',
description='Utils for the SmartCache project',
long_description="To do...",
install_requires=open("requirements.txt").read(),
classifier=[
"Operating System :: POSIX :: Linux",
"License :: OSI Approved :: Apache 2.0 License",
"Programming Language :: Python :: 3 :: Only"
]
)
| 26.782609 | 56 | 0.61039 | 71 | 616 | 5.253521 | 0.774648 | 0.069705 | 0.042895 | 0.080429 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.016807 | 0.227273 | 616 | 22 | 57 | 28 | 0.766807 | 0 | 0 | 0 | 0 | 0 | 0.469156 | 0.040584 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0.047619 | 0 | 0.047619 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 |
0
| 1 |
868f63854096baf68c5ff8cc2009603138d30b30
| 5,195 |
py
|
Python
|
delphiIDE.py
|
JeisonJHA/Plugins-Development
|
cccb58908eed6114c569e53d5710e70b8d53f5c5
|
[
"MIT"
] | null | null | null |
delphiIDE.py
|
JeisonJHA/Plugins-Development
|
cccb58908eed6114c569e53d5710e70b8d53f5c5
|
[
"MIT"
] | null | null | null |
delphiIDE.py
|
JeisonJHA/Plugins-Development
|
cccb58908eed6114c569e53d5710e70b8d53f5c5
|
[
"MIT"
] | null | null | null |
import sublime_plugin
class MethodDeclaration(object):
"""docstring for MethodDeclaration"""
def __init__(self):
self._methodclass = None
self.has_implementation = False
self.has_interface = False
@property
def has_implementation(self):
return self._has_implementation
@has_implementation.setter
def has_implementation(self, value):
self._has_implementation = value
@property
def has_interface(self):
return self._has_interface
@has_interface.setter
def has_interface(self, value):
self._has_interface = value
@property
def methodname(self):
return self._methodname
@methodname.setter
def methodname(self, value):
self._methodname = value
@property
def methodregion(self):
return self._methodregion
@methodregion.setter
def methodregion(self, value):
self._methodregion = value
@property
def visibility(self):
return self._visibility
@visibility.setter
def visibility(self, value):
self._visibility = value
@property
def params(self):
return self._params
@params.setter
def params(self, value):
self._params = value
@property
def methodclass(self):
return self._methodclass
@methodclass.setter
def methodclass(self, value):
self._methodclass = value
class ClassDeclaration(object):
"""docstring for ClassDeclaration"""
@property
def classname(self):
return self._classname
@classname.setter
def classname(self, value):
self._classname = value
@property
def classregion(self):
return self._classregion
@classregion.setter
def classregion(self, value):
self._classregion = value
@property
def privateregion(self):
return self._privateregion
@privateregion.setter
def privateregion(self, value):
self._privateregion = value
@property
def protectedregion(self):
return self._protectedregion
@protectedregion.setter
def protectedregion(self, value):
self._protectedregion = value
@property
def publicregion(self):
return self._publicregion
@publicregion.setter
def publicregion(self, value):
self._publicregion = value
@property
def publishedregion(self):
return self._publishedregion
@publishedregion.setter
def publishedregion(self, value):
self._publishedregion = value
class DelphiIdeCommand(sublime_plugin.TextCommand):
# // { "keys": ["ctrl+shift+x"], "command": "delphi_ide", "args": {"teste": "delphimethodnav"}}
# view.window().run_command('show_panel',
# args={"panel": 'output.find_results', "toggle": True})
def run(self, edit, teste):
print('teste[0]:%s' % teste)
method = None
try:
method = getattr(self, teste)
except AttributeError:
raise NotImplementedError("Class `{}` does not implement `{}`".
format(self.__class__.__name__,
teste))
method()
def delphimethodnav(self):
print('vai doido')
def getMethodInformation(self):
view = self.view
cursor_region = view.sel()[0]
cursor_pt = view.sel()[0].begin()
if not view.match_selector(cursor_pt,
'function.implementation.delphi'):
# exit because it is not in a method
return None
def params(region):
params_region = view.find_by_selector(
'meta.function.parameters.delphi')
param_name_region = view.find_by_selector(
'variable.parameter.function.delphi')
params_region_filt = [
s for s in params_region if region.contains(s)]
params_region_filt = [
s for s in param_name_region if
params_region_filt[0].contains(s)]
return params_region_filt
def paramsFromRegion(region):
try:
params_region_filt = params(region)
x = [view.substr(x) for x in params_region_filt]
return x
except:
return []
def getFunctionName():
functionname = view.find_by_selector('entity.name.function')
functionnamefiltered = [
n for n in functionname if method.methodregion[0].contains(n)]
return view.substr(functionnamefiltered[0])
# has_implementation
# has_interface
# methodname
# methodregion
# visibility
# params
# methodclass
method = MethodDeclaration()
selector = view.find_by_selector
method.methodregion = [r for r in selector('meta.function.delphi')
if cursor_region.intersects(r)]
method.methodname = getFunctionName()
method.params = self.paramsFromRegion(method.methodregion[0])
return method
def getClassInformation(self):
pass
| 26.237374 | 99 | 0.609047 | 504 | 5,195 | 6.099206 | 0.212302 | 0.046519 | 0.059206 | 0.023422 | 0.030579 | 0.014964 | 0.014964 | 0 | 0 | 0 | 0 | 0.001937 | 0.304524 | 5,195 | 197 | 100 | 26.370558 | 0.848879 | 0.071992 | 0 | 0.124088 | 0 | 0 | 0.039375 | 0.019792 | 0 | 0 | 0 | 0 | 0 | 1 | 0.248175 | false | 0.007299 | 0.007299 | 0.094891 | 0.416058 | 0.014599 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 |
0
| 1 |
86ab8849571d80e31e545baaa8fc3a7e45faa001
| 6,176 |
py
|
Python
|
tests/test_agent/test_manhole.py
|
guidow/pyfarm-agent
|
bb5d464f9f6549a3db3529a93e3d9f388b365586
|
[
"Apache-2.0"
] | null | null | null |
tests/test_agent/test_manhole.py
|
guidow/pyfarm-agent
|
bb5d464f9f6549a3db3529a93e3d9f388b365586
|
[
"Apache-2.0"
] | null | null | null |
tests/test_agent/test_manhole.py
|
guidow/pyfarm-agent
|
bb5d464f9f6549a3db3529a93e3d9f388b365586
|
[
"Apache-2.0"
] | null | null | null |
# No shebang line, this module is meant to be imported
#
# Copyright 2014 Oliver Palmer
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from collections import namedtuple
from pprint import pprint
from random import randint
from StringIO import StringIO
from textwrap import dedent
try:
from unittest.mock import patch
except ImportError: # pragma: no cover
from mock import patch
from twisted.internet.protocol import ServerFactory
from twisted.cred.portal import Portal
from twisted.conch.telnet import (
ITelnetProtocol, TelnetBootstrapProtocol, TelnetTransport)
from pyfarm.agent.testutil import TestCase
from pyfarm.agent.manhole import (
LoggingManhole, TransportProtocolFactory, TelnetRealm,
manhole_factory, show)
Peer = namedtuple("Peer", ("host", "port"))
class FakeLoggingManhole(LoggingManhole):
QUIT = False
GET_PEER_CALLS = 0
class terminal(object):
RIGHT_ARROW, LEFT_ARROW = None, None
class transport(object):
@classmethod
def getPeer(cls):
FakeLoggingManhole.GET_PEER_CALLS += 1
return Peer(os.urandom(12).encode("hex"), randint(1024, 65535))
def handle_QUIT(self):
self.QUIT = True
class TestManholeBase(TestCase):
def setUp(self):
TelnetRealm.NAMESPACE = None
FakeLoggingManhole.GET_PEER_CALLS = 0
FakeLoggingManhole.QUIT = False
class TestManholeFactory(TestManholeBase):
def test_assertions(self):
with self.assertRaises(AssertionError):
manhole_factory(None, "", "")
with self.assertRaises(AssertionError):
manhole_factory({}, None, "")
with self.assertRaises(AssertionError):
manhole_factory({}, "", None)
def test_instance_one(self):
namespace = {"bob": None}
username = os.urandom(32).encode("hex")
password = os.urandom(32).encode("hex")
manhole_factory(namespace, username, password)
with self.assertRaises(AssertionError):
manhole_factory(namespace, username, password)
def test_instance(self):
namespace = {"bob": None}
username = os.urandom(32).encode("hex")
password = os.urandom(32).encode("hex")
manhole = manhole_factory(namespace, username, password)
self.assertEqual(namespace, {"bob": None})
self.assertEqual(
TelnetRealm.NAMESPACE,
{"bob": None, "pp": pprint, "show": show})
self.assertIsInstance(manhole, ServerFactory)
self.assertIsInstance(manhole.protocol, TransportProtocolFactory)
self.assertIsInstance(manhole.protocol.portal, Portal)
# There could be multiple password checkers, check for the one
# we know we should have added.
for _, instance in manhole.protocol.portal.checkers.items():
found = False
for user, passwd in instance.users.items():
if user == username and passwd == password:
found = True
if found:
break
else:
self.fail("Failed to find correct username and password.")
def test_request_avatar(self):
realm = TelnetRealm()
avatar = realm.requestAvatar(None, ITelnetProtocol)
self.assertEqual(len(avatar), 3)
self.assertIs(avatar[0], ITelnetProtocol)
self.assertIsInstance(avatar[1], TelnetBootstrapProtocol)
self.assertTrue(callable(avatar[2]))
def test_request_avatar_error(self):
realm = TelnetRealm()
with self.assertRaises(NotImplementedError):
realm.requestAvatar(None, None)
def test_protocol_factory(self):
factory = TransportProtocolFactory(None)
transport = factory()
self.assertIsInstance(transport, TelnetTransport)
class TestManholeShow(TestManholeBase):
def test_uses_namespace(self):
namespace = {"bob": None}
username = os.urandom(32).encode("hex")
password = os.urandom(32).encode("hex")
manhole_factory(namespace, username, password)
output = StringIO()
with patch("sys.stdout", output):
show()
output.seek(0)
output = output.getvalue().strip()
self.assertEqual(output, "objects: ['bob', 'pp', 'show']")
def test_custom_object(self):
class Foobar(object):
a, b, c, d, e = True, 1, "yes", {}, 0.0
output = StringIO()
with patch("sys.stdout", output):
show(Foobar)
output.seek(0)
output = output.getvalue().strip()
self.assertEqual(
output,
dedent("""
data attributes of <class 'tests.test_agent.test_manhole.Foobar'>
a : True
b : 1
c : yes
d : {} (0 elements)
e : 0.0
""").strip())
def test_wrap_long_line(self):
class Foobar(object):
a = " " * 90
output = StringIO()
with patch("sys.stdout", output):
show(Foobar)
output.seek(0)
output = output.getvalue().strip()
self.assertEqual(
output,
dedent("""
data attributes of <class 'tests.test_agent.test_manhole.Foobar'>
a : ' """ +
""" '...
""").strip())
class TestLoggingManhole(TestManholeBase):
def test_line_received(self):
f = FakeLoggingManhole()
f.lineReceived("exit")
self.assertTrue(f.QUIT)
| 32.505263 | 79 | 0.615771 | 647 | 6,176 | 5.812983 | 0.33694 | 0.018612 | 0.017549 | 0.02712 | 0.279713 | 0.250997 | 0.238235 | 0.238235 | 0.227067 | 0.227067 | 0 | 0.011322 | 0.284974 | 6,176 | 189 | 80 | 32.677249 | 0.840353 | 0.114475 | 0 | 0.316176 | 0 | 0 | 0.106221 | 0.014485 | 0 | 0 | 0 | 0 | 0.147059 | 1 | 0.095588 | false | 0.073529 | 0.102941 | 0 | 0.286765 | 0.014706 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 |
0
| 1 |
86abdce88613d6ee71e638ae7487297146c3e7a8
| 338 |
py
|
Python
|
func-button/klSigmode.py
|
xcgoo/uiKLine
|
80683401d7dc66262ae645db4c2780d6e71be551
|
[
"MIT"
] | 232 |
2017-10-11T09:19:03.000Z
|
2022-03-09T01:34:49.000Z
|
func-button/klSigmode.py
|
DON-2020-LEE/uiKLine-2
|
fd1d0dca5fd6b1542af4b10c110e39361b29d378
|
[
"MIT"
] | 8 |
2017-12-09T09:10:15.000Z
|
2021-04-22T03:35:26.000Z
|
func-button/klSigmode.py
|
DON-2020-LEE/uiKLine-2
|
fd1d0dca5fd6b1542af4b10c110e39361b29d378
|
[
"MIT"
] | 132 |
2017-10-11T09:16:29.000Z
|
2022-02-09T10:37:57.000Z
|
# coding: utf-8
"""
插入所有需要的库,和函数
"""
#----------------------------------------------------------------------
def klSigmode(self):
"""查找模式"""
if self.mode == 'deal':
self.canvas.updateSig(self.signalsOpen)
self.mode = 'dealOpen'
else:
self.canvas.updateSig(self.signals)
self.mode = 'deal'
| 21.125 | 71 | 0.446746 | 30 | 338 | 5.033333 | 0.6 | 0.15894 | 0.15894 | 0.304636 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.003759 | 0.213018 | 338 | 15 | 72 | 22.533333 | 0.56391 | 0.301775 | 0 | 0 | 0 | 0 | 0.072398 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.142857 | false | 0 | 0 | 0 | 0.142857 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
0
| 1 |
86ae868b0b9598e5f2e99607cce26d99b3a34dc3
| 4,147 |
py
|
Python
|
vantage6/server/resource/recover.py
|
jaspersnel/vantage6-server
|
88ad40d23cc36eaba57c170929f7ccdd0011720a
|
[
"Apache-2.0"
] | 2 |
2020-10-19T08:59:08.000Z
|
2022-03-07T10:30:21.000Z
|
vantage6/server/resource/recover.py
|
jaspersnel/vantage6-server
|
88ad40d23cc36eaba57c170929f7ccdd0011720a
|
[
"Apache-2.0"
] | 67 |
2020-04-15T09:43:31.000Z
|
2022-03-18T08:29:17.000Z
|
vantage6/server/resource/recover.py
|
jaspersnel/vantage6-server
|
88ad40d23cc36eaba57c170929f7ccdd0011720a
|
[
"Apache-2.0"
] | 2 |
2021-01-21T15:09:26.000Z
|
2021-04-19T14:58:10.000Z
|
# -*- coding: utf-8 -*-
import logging
import datetime
from flask import request, render_template
from flask_jwt_extended import (
create_access_token,
decode_token
)
from jwt.exceptions import DecodeError
from flasgger import swag_from
from http import HTTPStatus
from pathlib import Path
from sqlalchemy.orm.exc import NoResultFound
from vantage6.common import logger_name
from vantage6.server import db
from vantage6.server.resource import (
ServicesResources
)
module_name = logger_name(__name__)
log = logging.getLogger(module_name)
def setup(api, api_base, services):
path = "/".join([api_base, module_name])
log.info(f'Setting up "{path}" and subdirectories')
api.add_resource(
ResetPassword,
path+'/reset',
endpoint="reset_password",
methods=('POST',),
resource_class_kwargs=services
)
api.add_resource(
RecoverPassword,
path+'/lost',
endpoint='recover_password',
methods=('POST',),
resource_class_kwargs=services
)
# ------------------------------------------------------------------------------
# Resources / API's
# ------------------------------------------------------------------------------
class ResetPassword(ServicesResources):
"""user can use recover token to reset their password."""
@swag_from(str(Path(r"swagger/post_reset_password.yaml")),
endpoint='reset_password')
def post(self):
""""submit email-adress receive token."""
# retrieve user based on email or username
body = request.get_json()
reset_token = body.get("reset_token")
password = body.get("password")
if not reset_token or not password:
return {"msg": "reset token and/or password is missing!"}, \
HTTPStatus.BAD_REQUEST
# obtain user
try:
user_id = decode_token(reset_token)['identity'].get('id')
except DecodeError:
return {"msg": "Invalid recovery token!"}, HTTPStatus.BAD_REQUEST
log.debug(user_id)
user = db.User.get(user_id)
# set password
user.set_password(password)
user.save()
log.info(f"Successfull password reset for '{user.username}'")
return {"msg": "password successfully been reset!"}, \
HTTPStatus.OK
class RecoverPassword(ServicesResources):
"""send a mail containing a recover token"""
@swag_from(str(Path(r"swagger/post_recover_password.yaml")),
endpoint='recover_password')
def post(self):
"""username or email generates a token which is mailed."""
# default return string
ret = {"msg": "If the username or email is our database you "
"will soon receive an email"}
# obtain username/email from request'
body = request.get_json()
username = body.get("username")
email = body.get("email")
if not (email or username):
return {"msg": "No username or email provided!"}, \
HTTPStatus.BAD_REQUEST
# find user in the database, if not here we stop!
try:
if username:
user = db.User.get_by_username(username)
else:
user = db.User.get_by_email(email)
except NoResultFound:
# we do not tell them.... But we won't continue either
return ret
log.info(f"Password reset requested for '{user.username}'")
# generate a token that can reset their password
expires = datetime.timedelta(hours=1)
reset_token = create_access_token(
{"id": str(user.id)}, expires_delta=expires
)
self.mail.send_email(
"password reset",
sender="support@vantage6.ai",
recipients=[user.email],
text_body=render_template("mail/reset_password_token.txt",
token=reset_token),
html_body=render_template("mail/reset_password_token.html",
token=reset_token)
)
return ret
| 30.718519 | 80 | 0.590306 | 459 | 4,147 | 5.187364 | 0.346405 | 0.033599 | 0.01008 | 0.01638 | 0.107518 | 0.094918 | 0.094918 | 0 | 0 | 0 | 0 | 0.002001 | 0.276827 | 4,147 | 134 | 81 | 30.947761 | 0.791931 | 0.15674 | 0 | 0.175824 | 0 | 0 | 0.180375 | 0.036075 | 0 | 0 | 0 | 0 | 0 | 1 | 0.032967 | false | 0.21978 | 0.131868 | 0 | 0.252747 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 |
0
| 1 |
86bd70e7874de4b570ed32325f28d65eaa058486
| 4,544 |
py
|
Python
|
mandoline/line_segment3d.py
|
Spiritdude/mandoline-py
|
702cd1f9264c7d5d814600ff919406387fd86185
|
[
"BSD-2-Clause"
] | 5 |
2021-09-16T10:41:44.000Z
|
2021-11-04T14:45:24.000Z
|
mandoline/line_segment3d.py
|
Spiritdude/mandoline-py
|
702cd1f9264c7d5d814600ff919406387fd86185
|
[
"BSD-2-Clause"
] | null | null | null |
mandoline/line_segment3d.py
|
Spiritdude/mandoline-py
|
702cd1f9264c7d5d814600ff919406387fd86185
|
[
"BSD-2-Clause"
] | null | null | null |
class LineSegment3D(object):
"""A class to represent a 3D line segment."""
def __init__(self, p1, p2):
"""Initialize with two endpoints."""
if p1 > p2:
p1, p2 = (p2, p1)
self.p1 = p1
self.p2 = p2
self.count = 1
def __len__(self):
"""Line segment always has two endpoints."""
return 2
def __iter__(self):
"""Iterator generator for endpoints."""
yield self.p1
yield self.p2
def __getitem__(self, idx):
"""Given a vertex number, returns a vertex coordinate vector."""
if idx == 0:
return self.p1
if idx == 1:
return self.p2
raise LookupError()
def __hash__(self):
"""Returns hash value for endpoints"""
return hash((self.p1, self.p2))
def __lt__(self, p):
return self < p
def __cmp__(self, p):
"""Compare points for sort ordering in an arbitrary heirarchy."""
val = self[0].__cmp__(p[0])
if val != 0:
return val
return self[1].__cmp__(p[1])
def __format__(self, fmt):
"""Provides .format() support."""
pfx = ""
sep = " - "
sfx = ""
if "a" in fmt:
pfx = "["
sep = ", "
sfx = "]"
elif "s" in fmt:
pfx = ""
sep = " "
sfx = ""
p1 = self.p1.__format__(fmt)
p2 = self.p2.__format__(fmt)
return pfx + p1 + sep + p2 + sfx
def __repr__(self):
"""Standard string representation."""
return "<LineSegment3D: {0}>".format(self)
def __str__(self):
"""Returns a human readable coordinate string."""
return "{0:a}".format(self)
def translate(self,offset):
"""Translate the endpoint's vertices"""
self.p1 = (self.p1[a] + offset[a] for a in range(3))
self.p2 = (self.p2[a] + offset[a] for a in range(3))
def scale(self,scale):
"""Translate the endpoint's vertices"""
self.p1 = (self.p1[a] * scale[a] for a in range(3))
self.p2 = (self.p2[a] * scale[a] for a in range(3))
def length(self):
"""Returns the length of the line."""
return self.p1.distFromPoint(self.p2)
class LineSegment3DCache(object):
"""Cache class for 3D Line Segments."""
def __init__(self):
"""Initialize as an empty cache."""
self.endhash = {}
self.seghash = {}
def _add_endpoint(self, p, seg):
"""Remember that this segment has a given endpoint"""
if p not in self.endhash:
self.endhash[p] = []
self.endhash[p].append(seg)
def rehash(self):
"""Reset the hashes for changed edge vertices"""
oldseghash = self.seghash
self.seghash = {
(v[0], v[1]): v
for v in oldseghash.values()
}
oldendhash = self.endhash
self.endhash = {
k: v
for v in oldendhash.values()
for k in v
}
def translate(self,offset):
"""Translate vertices of all edges."""
for v in self.seghash.values():
v.translate(offset)
self.rehash()
def scale(self,scale):
"""Scale vertices of all edges."""
for v in self.seghash.values():
v.scale(scale)
self.rehash()
def endpoint_segments(self, p):
"""get list of edges that end at point p"""
if p not in self.endhash:
return []
return self.endhash[p]
def get(self, p1, p2):
"""Given 2 endpoints, return the cached LineSegment3D inst, if any."""
key = (p1, p2) if p1 < p2 else (p2, p1)
if key not in self.seghash:
return None
return self.seghash[key]
def add(self, p1, p2):
"""Given 2 endpoints, return the (new or cached) LineSegment3D inst."""
key = (p1, p2) if p1 < p2 else (p2, p1)
if key in self.seghash:
seg = self.seghash[key]
seg.count += 1
return seg
seg = LineSegment3D(p1, p2)
self.seghash[key] = seg
self._add_endpoint(p1, seg)
self._add_endpoint(p2, seg)
return seg
def __iter__(self):
"""Creates an iterator for the line segments in the cache."""
for pt in self.seghash.values():
yield pt
def __len__(self):
"""Length of sequence."""
return len(self.seghash)
# vim: expandtab tabstop=4 shiftwidth=4 softtabstop=4 nowrap
| 28.223602 | 79 | 0.53015 | 573 | 4,544 | 4.073298 | 0.230366 | 0.033419 | 0.027849 | 0.011997 | 0.218081 | 0.183376 | 0.167095 | 0.164524 | 0.116538 | 0.116538 | 0 | 0.028485 | 0.34331 | 4,544 | 160 | 80 | 28.4 | 0.753686 | 0.224692 | 0 | 0.205607 | 0 | 0 | 0.010291 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.214953 | false | 0 | 0 | 0.009346 | 0.401869 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 |
0
| 1 |
86d39cbeb38ed832359d8101e1462aeccc15eee8
| 1,400 |
py
|
Python
|
src/knownnodes.py
|
skeevey/PyBitmessage
|
196d688b138393d1d540df3322844dfe7e7c02ba
|
[
"MIT"
] | 1 |
2018-04-25T08:08:47.000Z
|
2018-04-25T08:08:47.000Z
|
src/knownnodes.py
|
skeevey/PyBitmessage
|
196d688b138393d1d540df3322844dfe7e7c02ba
|
[
"MIT"
] | null | null | null |
src/knownnodes.py
|
skeevey/PyBitmessage
|
196d688b138393d1d540df3322844dfe7e7c02ba
|
[
"MIT"
] | 1 |
2018-04-25T08:08:48.000Z
|
2018-04-25T08:08:48.000Z
|
import pickle
import threading
from bmconfigparser import BMConfigParser
import state
knownNodesLock = threading.Lock()
knownNodes = {}
knownNodesTrimAmount = 2000
def saveKnownNodes(dirName = None):
if dirName is None:
dirName = state.appdata
with knownNodesLock:
with open(dirName + 'knownnodes.dat', 'wb') as output:
pickle.dump(knownNodes, output)
def increaseRating(peer):
increaseAmount = 0.1
maxRating = 1
with knownNodesLock:
for stream in knownNodes.keys():
try:
knownNodes[stream][peer]["rating"] = min(knownNodes[stream][peer]["rating"] + increaseAmount, maxRating)
except KeyError:
pass
def decreaseRating(peer):
decreaseAmount = 0.1
minRating = -1
with knownNodesLock:
for stream in knownNodes.keys():
try:
knownNodes[stream][peer]["rating"] = max(knownNodes[stream][peer]["rating"] - decreaseAmount, minRating)
except KeyError:
pass
def trimKnownNodes(recAddrStream = 1):
if len(knownNodes[recAddrStream]) < BMConfigParser().get("knownnodes", "maxnodes"):
return
with knownNodesLock:
oldestList = sorted(knownNodes[recAddrStream], key=lambda x: x['lastseen'])[:knownNodesTrimAmount]
for oldest in oldestList:
del knownNodes[recAddrStream][oldest]
| 30.434783 | 120 | 0.648571 | 134 | 1,400 | 6.776119 | 0.440299 | 0.079295 | 0.088106 | 0.114537 | 0.160793 | 0.160793 | 0.160793 | 0.160793 | 0.160793 | 0.160793 | 0 | 0.010496 | 0.251429 | 1,400 | 45 | 121 | 31.111111 | 0.855916 | 0 | 0 | 0.315789 | 0 | 0 | 0.047143 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.105263 | false | 0.052632 | 0.105263 | 0 | 0.236842 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 |
0
| 1 |
86dbf2f275a336e7d12bde00e6cd729b126ef190
| 1,883 |
py
|
Python
|
packages/facilities/diagnostics/py/custom_checkbox.py
|
Falcons-Robocup/code
|
2281a8569e7f11cbd3238b7cc7341c09e2e16249
|
[
"Apache-2.0"
] | 2 |
2021-01-15T13:27:19.000Z
|
2021-08-04T08:40:52.000Z
|
packages/facilities/diagnostics/py/custom_checkbox.py
|
Falcons-Robocup/code
|
2281a8569e7f11cbd3238b7cc7341c09e2e16249
|
[
"Apache-2.0"
] | null | null | null |
packages/facilities/diagnostics/py/custom_checkbox.py
|
Falcons-Robocup/code
|
2281a8569e7f11cbd3238b7cc7341c09e2e16249
|
[
"Apache-2.0"
] | 5 |
2018-05-01T10:39:31.000Z
|
2022-03-25T03:02:35.000Z
|
# Copyright 2020 Jan Feitsma (Falcons)
# SPDX-License-Identifier: Apache-2.0
#!/usr/bin/python
import matplotlib.pyplot as plt
from matplotlib.patches import Rectangle
class Checkbox():
def __init__(self, name, position, default=False, label=None, rsize=0.6, enabled=True):
self.name = name # unique ID associated with
# label to display next to the checkbox
if label == None:
self.label = name # reuse
else:
self.label = label
self.callback = None
self.enabled = enabled
self.ticked = default
self.ax = plt.axes(position) # position is a tuple (x,y,w,h)
self.ax.axis('off')
self.canvas = self.ax.figure.canvas
# draw text
if len(self.label):
self.text = self.ax.text(-0.15, 0.5, self.label, horizontalalignment='right', verticalalignment='center')
# draw a rectangle, add a bit of spacing
self.ax.add_patch(Rectangle((0,(1.0-rsize)/2), rsize, rsize, fill=True))
# setup event handling
self.canvas.mpl_connect('button_release_event', self._handle_event)
self.redraw()
def __repr__(self):
s = 'checkbox:' + self.name + '=' + str(self.ticked)
if not self.enabled:
s += ' (disabled)'
return s
def on_changed(self, cb):
self.callback = cb
def _handle_event(self, e):
if self.enabled and e.inaxes == self.ax: # TODO: exclude spacing margin for inaxes calculation
self.ticked = not self.ticked
self.redraw()
if self.callback != None:
self.callback(self.name, self.ticked)
def redraw(self):
col = 'grey'
if self.enabled:
col = ['lightgoldenrodyellow', 'blue'][self.ticked]
self.ax.patches[0].set_facecolor(col)
self.ax.figure.canvas.draw()
| 33.625 | 117 | 0.601699 | 243 | 1,883 | 4.588477 | 0.45679 | 0.043049 | 0.0287 | 0.035874 | 0.039462 | 0 | 0 | 0 | 0 | 0 | 0 | 0.013284 | 0.280404 | 1,883 | 55 | 118 | 34.236364 | 0.809594 | 0.164631 | 0 | 0.051282 | 0 | 0 | 0.053205 | 0 | 0 | 0 | 0 | 0.018182 | 0 | 1 | 0.128205 | false | 0 | 0.051282 | 0 | 0.230769 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
0
| 1 |
86dfb9b0ac538e587eb0952c661e061a843edff2
| 1,544 |
py
|
Python
|
src/sol/handle_metaplex.py
|
terra-dashboard/staketaxcsv
|
5793105488bf799c61aee64a45f44e9ae8fef397
|
[
"MIT"
] | 140 |
2021-12-11T23:37:46.000Z
|
2022-03-29T23:04:36.000Z
|
src/sol/handle_metaplex.py
|
terra-dashboard/staketaxcsv
|
5793105488bf799c61aee64a45f44e9ae8fef397
|
[
"MIT"
] | 80 |
2021-12-17T15:13:47.000Z
|
2022-03-31T13:33:53.000Z
|
src/sol/handle_metaplex.py
|
terra-dashboard/staketaxcsv
|
5793105488bf799c61aee64a45f44e9ae8fef397
|
[
"MIT"
] | 52 |
2021-12-12T00:37:17.000Z
|
2022-03-29T23:25:09.000Z
|
from common.make_tx import make_swap_tx
from sol.handle_simple import handle_unknown_detect_transfers
def handle_metaplex(exporter, txinfo):
transfers_in, transfers_out, _ = txinfo.transfers_net
if len(transfers_in) == 1 and len(transfers_out) == 1:
sent_amount, sent_currency, _, _ = transfers_out[0]
received_amount, received_currency, _, _ = transfers_in[0]
row = make_swap_tx(txinfo, sent_amount, sent_currency, received_amount, received_currency)
exporter.ingest_row(row)
else:
handle_unknown_detect_transfers(exporter, txinfo)
def is_nft_mint(txinfo):
log_instructions = txinfo.log_instructions
transfers_in, transfers_out, _ = txinfo.transfers_net
if "MintTo" in log_instructions and len(transfers_out) == 1 and len(transfers_in) == 0:
return True
elif ("MintTo" in log_instructions
and len(transfers_out) == 1
and len(transfers_in) == 1
and transfers_in[0][0] == 1):
return True
else:
return False
def handle_nft_mint(exporter, txinfo):
transfers_in, transfers_out, transfers_unknown = txinfo.transfers_net
if len(transfers_in) == 1 and len(transfers_out) == 1:
sent_amount, sent_currency, _, _ = transfers_out[0]
received_amount, received_currency, _, _ = transfers_in[0]
row = make_swap_tx(txinfo, sent_amount, sent_currency, received_amount, received_currency)
exporter.ingest_row(row)
return
handle_unknown_detect_transfers(exporter, txinfo)
| 34.311111 | 98 | 0.709845 | 200 | 1,544 | 5.095 | 0.205 | 0.107949 | 0.088322 | 0.062807 | 0.752699 | 0.748773 | 0.61629 | 0.61629 | 0.551521 | 0.551521 | 0 | 0.012265 | 0.207902 | 1,544 | 44 | 99 | 35.090909 | 0.820932 | 0 | 0 | 0.5625 | 0 | 0 | 0.007772 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.09375 | false | 0 | 0.0625 | 0 | 0.28125 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
0
| 1 |
86e150137bde5dca549d0321cdc857bd542bc500
| 3,878 |
py
|
Python
|
cinemasci/cis/__init__.py
|
cinemascience/cinemasc
|
5b00a0c2e3c886f65cfbf1f59e914fc458d7068b
|
[
"BSD-3-Clause"
] | null | null | null |
cinemasci/cis/__init__.py
|
cinemascience/cinemasc
|
5b00a0c2e3c886f65cfbf1f59e914fc458d7068b
|
[
"BSD-3-Clause"
] | 3 |
2020-04-22T16:26:44.000Z
|
2020-04-22T16:30:12.000Z
|
cinemasci/cis/__init__.py
|
cinemascience/cinemasc
|
5b00a0c2e3c886f65cfbf1f59e914fc458d7068b
|
[
"BSD-3-Clause"
] | 1 |
2020-03-06T21:21:19.000Z
|
2020-03-06T21:21:19.000Z
|
from . import imageview
from . import cisview
from . import renderer
from . import convert
class cis:
"""Composible Image Set Class
The data structure to hold properties of a Composible Image Set.
"""
def __init__(self, filename):
""" The constructor. """
self.fname = filename
self.classname = "COMPOSABLE_IMAGE_SET"
self.dims = [0,0]
self.flags = "CONSTANT_CHANNELS"
self.version = "1.0"
self.parameterlist = []
self.parametertable = None
self.variables = {}
self.images = {}
self.colormaps = {}
def debug_print(self):
""" Debug print statement for CIS properties. """
print("printing cis")
print(" fname: {}".format(self.fname))
print(" classname: {}".format(self.classname))
print(" dims: {}".format(self.dims))
print(" flags: {}".format(self.flags))
print(" version: {}".format(self.version))
print(" colormaps: ")
for m in self.colormaps:
print(m)
for i in self.get_images():
print(" image: {}".format(self.get_image(i).name))
for l in self.get_image(i).get_layers():
print(" layer: {}".format(self.get_image(i).get_layer(l).name))
print("\n")
def get_image(self, key):
""" Returns an image given its key. """
result = False
if key in self.images:
result = self.images[key]
return result
def get_images(self):
""" Returns all images. """
for i in self.images:
yield i
def get_image_names(self):
""" Returns list of image names. """
return list(self.images.keys())
def set_parameter_table(self, table):
""" Set parameter table using a deep copy. """
self.parametertable = table.copy(deep=True)
def add_parameter(self, name, type):
""" Add a parameter to the list of parameters for the CIS. """
# check for duplicates
self.parameterlist.append([name, type])
def add_variable(self, name, type, min, max):
""" Add a variable to the set of variables. """
# check for duplicates
self.variables[name] = {'type':type, 'min':min, 'max':max}
def add_image(self, name):
""" Add an image to the set of images in the CIS. """
# check for duplicates
self.images[name] = image.image(name)
return self.images[name]
def get_variables(self):
""" Return all variables. """
for i in self.variables:
yield i
def get_variable(self, name):
""" Return a variable. """
variable = None
if name in self.variables:
variable = self.variables[name]
return variable
def get_image(self,name):
""" Return an image. """
image = None
if name in self.images:
image = self.images[name]
return image
def get_colormap(self,name):
""" Return a colormap. """
colormap = None
if name in self.colormaps:
colormap = self.colormaps[name]
return colormap
def add_colormap(self, name, path):
""" Add a colormap to the set of colormaps. """
#if colormap not in dict
if (name not in self.colormaps):
self.colormaps[name] = colormap.colormap(path)
def remove_colormap(self, name):
""" Remove a colormap from the set of colormaps. """
self.colormaps.pop(name)
def get_colormaps(self):
""" Return all colormaps. """
for i in self.colormaps:
yield i
def set_dims(self, w, h):
""" Set the dimensions of the CIS given a width and height. """
self.dims = [w, h]
| 29.603053 | 84 | 0.555183 | 463 | 3,878 | 4.583153 | 0.207343 | 0.031103 | 0.028275 | 0.01885 | 0.075872 | 0.02639 | 0 | 0 | 0 | 0 | 0 | 0.001533 | 0.326973 | 3,878 | 130 | 85 | 29.830769 | 0.811494 | 0.194688 | 0 | 0.038462 | 0 | 0 | 0.05994 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.217949 | false | 0 | 0.051282 | 0 | 0.358974 | 0.153846 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 |
0
| 1 |
86e1817f75ca21dff7ecb06d87908e9887be1bfd
| 2,172 |
py
|
Python
|
applications/spaghetti.py
|
fos/fos-legacy
|
db6047668781a0615abcebc7d55a7164f3105047
|
[
"BSD-3-Clause"
] | 2 |
2016-08-03T10:33:08.000Z
|
2021-06-23T18:50:14.000Z
|
applications/spaghetti.py
|
fos/fos-legacy
|
db6047668781a0615abcebc7d55a7164f3105047
|
[
"BSD-3-Clause"
] | null | null | null |
applications/spaghetti.py
|
fos/fos-legacy
|
db6047668781a0615abcebc7d55a7164f3105047
|
[
"BSD-3-Clause"
] | null | null | null |
import numpy as np
import nibabel as nib
import os.path as op
import pyglet
#pyglet.options['debug_gl'] = True
#pyglet.options['debug_x11'] = True
#pyglet.options['debug_gl_trace'] = True
#pyglet.options['debug_texture'] = True
#fos modules
from fos.actor.axes import Axes
from fos import World, Window, WindowManager
from labeler import TrackLabeler
from fos.actor.slicer import Slicer
#dipy modules
from dipy.segment.quickbundles import QuickBundles
from dipy.io.dpy import Dpy
from dipy.io.pickles import load_pickle,save_pickle
from dipy.viz.colormap import orient2rgb
import copy
if __name__ == '__main__':
subject = 5
seeds = 1
qb_dist = 30
#load T1 volume registered in MNI space
img = nib.load('data/subj_'+("%02d" % subject)+'/MPRAGE_32/T1_flirt_out.nii.gz')
data = img.get_data()
affine = img.get_affine()
#load the tracks registered in MNI space
fdpyw = 'data/subj_'+("%02d" % subject)+'/101_32/DTI/tracks_gqi_'+str(seeds)+'M_linear.dpy'
dpr = Dpy(fdpyw, 'r')
T = dpr.read_tracks()
dpr.close()
#load initial QuickBundles with threshold 30mm
fpkl = 'data/subj_'+("%02d" % subject)+'/101_32/DTI/qb_gqi_'+str(seeds)+'M_linear_'+str(qb_dist)+'.pkl'
#qb=QuickBundles(T,30.,12)
qb=load_pickle(fpkl)
#create the interaction system for tracks
tl = TrackLabeler(qb,qb.downsampled_tracks(),vol_shape=data.shape,tracks_alpha=1)
#add a interactive slicing/masking tool
sl = Slicer(affine,data)
#add one way communication between tl and sl
tl.slicer=sl
#OpenGL coordinate system axes
ax = Axes(100)
x,y,z=data.shape
#add the actors to the world
w=World()
w.add(tl)
w.add(sl)
#w.add(ax)
#create a window
wi = Window(caption="Interactive Spaghetti using Diffusion Imaging in Python (dipy.org) and Free On Shades (fos.me)",\
bgcolor=(0.3,0.3,0.6,1),width=1200,height=800)
#attach the world to the window
wi.attach(w)
#create a manager which can handle multiple windows
wm = WindowManager()
wm.add(wi)
wm.run()
print('Everything is running ;-)')
| 31.941176 | 122 | 0.675414 | 325 | 2,172 | 4.393846 | 0.473846 | 0.036415 | 0.05042 | 0.046218 | 0.061625 | 0.036415 | 0.036415 | 0 | 0 | 0 | 0 | 0.0296 | 0.206722 | 2,172 | 67 | 123 | 32.41791 | 0.799187 | 0.282228 | 0 | 0 | 0 | 0.02439 | 0.173377 | 0.034416 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.317073 | 0 | 0.317073 | 0.02439 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 |
0
| 1 |
86e708b4a5fa05856a6c8d0dde3c26f2006621e1
| 4,340 |
py
|
Python
|
py_cfeve/module/CFAF240400E0-030TN-A1.py
|
crystalfontz/CFA-EVE-Python-Library
|
c5aca10b9b6ee109d4df8a9a692dcef083dafc88
|
[
"Unlicense"
] | 1 |
2021-12-08T00:12:02.000Z
|
2021-12-08T00:12:02.000Z
|
py_cfeve/module/CFAF240400E0-030TN-A1.py
|
crystalfontz/CFA-EVE-Python-Library
|
c5aca10b9b6ee109d4df8a9a692dcef083dafc88
|
[
"Unlicense"
] | null | null | null |
py_cfeve/module/CFAF240400E0-030TN-A1.py
|
crystalfontz/CFA-EVE-Python-Library
|
c5aca10b9b6ee109d4df8a9a692dcef083dafc88
|
[
"Unlicense"
] | null | null | null |
#===========================================================================
#
# Crystalfontz Raspberry-Pi Python example library for FTDI / BridgeTek
# EVE graphic accelerators.
#
#---------------------------------------------------------------------------
#
# This file is part of the port/adaptation of existing C based EVE libraries
# to Python for Crystalfontz EVE based displays.
#
# 2021-10-20 Mark Williams / Crystalfontz America Inc.
# https:#www.crystalfontz.com/products/eve-accelerated-tft-displays.php
#---------------------------------------------------------------------------
#
# This is free and unencumbered software released into the public domain.
# Anyone is free to copy, modify, publish, use, compile, sell, or
# distribute this software, either in source code form or as a compiled
# binary, for any purpose, commercial or non-commercial, and by any
# means.
# In jurisdictions that recognize copyright laws, the author or authors
# of this software dedicate any and all copyright interest in the
# software to the public domain. We make this dedication for the benefit
# of the public at large and to the detriment of our heirs and
# successors. We intend this dedication to be an overt act of
# relinquishment in perpetuity of all present and future rights to this
# software under copyright law.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
# For more information, please refer to <http:#unlicense.org/>
#
#============================================================================
#EVE Device Type
EVE_DEVICE = 811
# EVE Clock Speed
EVE_CLOCK_SPEED = 60000000
# Touch
TOUCH_RESISTIVE = False
TOUCH_CAPACITIVE = False
TOUCH_GOODIX_CAPACITIVE = False
# Define RGB output pins order, determined by PCB layout
LCD_SWIZZLE = 2
# Define active edge of PCLK. Observed by scope:
# 0: Data is put out coincident with falling edge of the clock.
# Rising edge of the clock is in the middle of the data.
# 1: Data is put out coincident with rising edge of the clock.
# Falling edge of the clock is in the middle of the data.
LCD_PCLKPOL = 0
# LCD drive strength: 0=5mA, 1=10mA
LCD_DRIVE_10MA = 0
# Spread Spectrum on RGB signals. Probably not a good idea at higher
# PCLK frequencies.
LCD_PCLK_CSPREAD = 0
#This is not a 24-bit display, so dither
LCD_DITHER = 0
# Pixel clock divisor
LCD_PCLK = 5
#----------------------------------------------------------------------------
# Frame_Rate = 60Hz / 16.7mS
#----------------------------------------------------------------------------
# Horizontal timing
# Target 60Hz frame rate, using the largest possible line time in order to
# maximize the time that the EVE has to process each line.
HPX = 240 # Horizontal Pixel Width
HSW = 10 # Horizontal Sync Width
HBP = 20 # Horizontal Back Porch
HFP = 10 # Horizontal Front Porch
HPP = 209 # Horizontal Pixel Padding
# FTDI needs at least 1 here
# Define the constants needed by the EVE based on the timing
# Active width of LCD display
LCD_WIDTH = HPX
# Start of horizontal sync pulse
LCD_HSYNC0 = HFP
# End of horizontal sync pulse
LCD_HSYNC1 = HFP+HSW
# Start of active line
LCD_HOFFSET = HFP+HSW+HBP
# Total number of clocks per line
LCD_HCYCLE = HPX+HFP+HSW+HBP+HPP
#----------------------------------------------------------------------------
# Vertical timing
VLH = 400 # Vertical Line Height
VS = 2 # Vertical Sync (in lines)
VBP = 2 # Vertical Back Porch
VFP = 4 # Vertical Front Porch
VLP = 1 # Vertical Line Padding
# FTDI needs at least 1 here
# Define the constants needed by the EVE based on the timing
# Active height of LCD display
LCD_HEIGHT = VLH
# Start of vertical sync pulse
LCD_VSYNC0 = VFP
# End of vertical sync pulse
LCD_VSYNC1 = VFP+VS
# Start of active screen
LCD_VOFFSET = VFP+VS+VBP
# Total number of lines per screen
LCD_VCYCLE = VLH+VFP+VS+VBP+VLP
| 38.070175 | 78 | 0.645392 | 605 | 4,340 | 4.586777 | 0.436364 | 0.014414 | 0.012973 | 0.02018 | 0.156396 | 0.103784 | 0.085045 | 0.085045 | 0.085045 | 0.085045 | 0 | 0.019225 | 0.185023 | 4,340 | 114 | 79 | 38.070175 | 0.765338 | 0.807373 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
0
| 1 |
86ecf632226839fdabd506f9f83e9358864f79de
| 4,447 |
py
|
Python
|
annotation_gui_gcp/orthophoto_view.py
|
lioncorpo/sfm.lion-judge-corporation
|
95fb11bff263c3faab62269cc907eec18b527e22
|
[
"BSD-2-Clause"
] | 1 |
2019-05-31T13:50:41.000Z
|
2019-05-31T13:50:41.000Z
|
annotation_gui_gcp/orthophoto_view.py
|
Pandinosaurus/OpenSfM
|
b892ba9fd5e7fd6c7a9e3c81edddca80f71c1cd5
|
[
"BSD-2-Clause"
] | null | null | null |
annotation_gui_gcp/orthophoto_view.py
|
Pandinosaurus/OpenSfM
|
b892ba9fd5e7fd6c7a9e3c81edddca80f71c1cd5
|
[
"BSD-2-Clause"
] | 2 |
2017-03-31T16:54:34.000Z
|
2018-07-10T11:32:22.000Z
|
from typing import Tuple
import numpy as np
import rasterio.warp
from opensfm import features
from .orthophoto_manager import OrthoPhotoManager
from .view import View
class OrthoPhotoView(View):
def __init__(
self,
main_ui,
path: str,
init_lat: float,
init_lon: float,
is_geo_reference: bool = False,
):
"""[summary]
Args:
main_ui (GUI.Gui)
path (str): path containing geotiffs
"""
self.image_manager = OrthoPhotoManager(path, 100.0)
self.images_in_list = self.image_manager.image_keys
self.zoom_window_size_px = 500
self.is_geo_reference = is_geo_reference
self.size = 50 # TODO add widget for zoom level
super(OrthoPhotoView, self).__init__(main_ui, False)
self.refocus(init_lat, init_lon)
self.populate_image_list()
if self.images_in_list:
self.bring_new_image(self.images_in_list[0])
self.set_title()
def get_image(self, new_image):
crop, image_window, geot = self.image_manager.read_image_around_latlon(
new_image, self.center_lat, self.center_lon, self.size
)
self.image_window = image_window
self.geot = geot
return crop
def get_candidate_images(self):
return self.image_manager.get_candidate_images(
self.center_lat, self.center_lon, self.size
)
def pixel_to_latlon(self, x: float, y: float):
"""
From pixels (in the viewing window) to latlon
"""
if not self.is_geo_reference:
return None
# Pixel to whatever crs the image is in
# pyre-fixme[16]: `OrthoPhotoView` has no attribute `geot`.
x, y = self.geot.xy(y, x)
# And then to WSG84 (lat/lon)
lons, lats = rasterio.warp.transform(self.geot.crs, "EPSG:4326", [x], [y])
return lats[0], lons[0]
def gcp_to_pixel_coordinates(self, x: float, y: float) -> Tuple[float, float]:
"""
Transforms from normalized coordinates (in the whole geotiff) to
pixels (in the viewing window)
"""
h, w = self.image_manager.get_image_size(self.current_image)
px = features.denormalized_image_coordinates(np.array([[x, y]]), w, h)[0]
# pyre-fixme[16]: `OrthoPhotoView` has no attribute `image_window`.
x = px[0] - self.image_window.col_off
y = px[1] - self.image_window.row_off
# pyre-fixme[7]: Expected `Tuple[float, float]` but got `List[typing.Any]`.
return [x, y]
def pixel_to_gcp_coordinates(self, x: float, y: float) -> Tuple[float, float]:
"""
Transforms from pixels (in the viewing window) to normalized coordinates
(in the whole geotiff)
"""
# pyre-fixme[16]: `OrthoPhotoView` has no attribute `image_window`.
x += self.image_window.col_off
y += self.image_window.row_off
h, w = self.image_manager.get_image_size(self.current_image)
coords = features.normalized_image_coordinates(np.array([[x, y]]), w, h)[0]
return coords.tolist()
def refocus(self, lat, lon):
self.center_lat = lat
self.center_lon = lon
self.populate_image_list()
if self.images_in_list:
if self.current_image not in self.images_in_list:
self.bring_new_image(self.images_in_list[0])
else:
self.bring_new_image(self.current_image)
self.set_title()
def bring_new_image(self, new_image):
super(OrthoPhotoView, self).bring_new_image(new_image, force=True)
xlim = self.ax.get_xlim()
ylim = self.ax.get_ylim()
artists = self.ax.plot(np.mean(xlim), np.mean(ylim), "rx")
self.plt_artists.extend(artists)
self.canvas.draw_idle()
def set_title(self):
lat, lon = self.center_lat, self.center_lon
if self.images_in_list:
t = "Images covering lat:{:.4f}, lon:{:.4f}".format(lat, lon)
shot = self.current_image
seq_ix = self.images_in_list.index(shot)
title = f"{t} [{seq_ix+1}/{len(self.images_in_list)}]: {shot}"
else:
title = f"No orthophotos around {lat}, {lon}"
self.current_image = None
self.ax.clear()
self.ax.axis("off")
self.canvas.draw_idle()
self.window.title(title)
| 35.293651 | 83 | 0.613672 | 597 | 4,447 | 4.351759 | 0.247906 | 0.038106 | 0.04157 | 0.055427 | 0.389915 | 0.335643 | 0.266744 | 0.230177 | 0.204003 | 0.182448 | 0 | 0.010274 | 0.277715 | 4,447 | 125 | 84 | 35.576 | 0.798568 | 0.152687 | 0 | 0.174419 | 0 | 0 | 0.037658 | 0.010995 | 0 | 0 | 0 | 0.032 | 0 | 1 | 0.104651 | false | 0 | 0.069767 | 0.011628 | 0.255814 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
0
| 1 |
86ef4e909fe2cea39d77e8fe80f71f1e8cdcd676
| 1,844 |
py
|
Python
|
main.py
|
Light-Lens/PassGen
|
8f4f2ef08299d6243b939d0f08ac75bde3cabf5e
|
[
"MIT"
] | 3 |
2021-07-19T16:39:06.000Z
|
2021-11-08T11:53:50.000Z
|
main.py
|
Light-Lens/PassGen
|
8f4f2ef08299d6243b939d0f08ac75bde3cabf5e
|
[
"MIT"
] | null | null | null |
main.py
|
Light-Lens/PassGen
|
8f4f2ef08299d6243b939d0f08ac75bde3cabf5e
|
[
"MIT"
] | null | null | null |
# PassGen
# These imports will be used for this project.
from colorama import Fore, Style
from colorama import init
import datetime
import string
import random
import sys
import os
# Initilaze File organizer.
os.system('title PassGen')
init(autoreset = True)
# Create Log Functions.
class LOG:
def INFO_LOG(message):
CurrentTime = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
print(f"{CurrentTime} - INFO: {message}")
def STATUS_LOG(message):
CurrentTime = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
print(f"{CurrentTime} - STATUS: {message}")
def ERROR_LOG(message):
CurrentTime = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
print(Fore.RED + Style.BRIGHT + f"{CurrentTime} - ERROR: {message}")
def WARN_LOG(message):
CurrentTime = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
print(Fore.YELLOW + Style.BRIGHT + f"{CurrentTime} - WARNING: {message}")
# This will Generate a Strong Password for the User!
def Generate(PassLen):
JoinChars = [] # Create an Empty List.
# Split the List of these String Operations, and Join them to JoinChars List.
JoinChars.extend(list(string.ascii_letters))
JoinChars.extend(list(string.digits))
JoinChars.extend(list(string.punctuation))
random.shuffle(JoinChars) # Shuffle the List.
# Get the random passoword.
return "".join(JoinChars[0:PassLen])
# Code Logic here.
LOG.WARN_LOG("Initialized PassGen!")
LOG.STATUS_LOG("Generating a Random Password for You.")
Password = Generate(random.randint(5, 17))
LOG.INFO_LOG(f"Your Password is: {Password}")
with open("Password.log", "a") as File: File.write(f"{Password}\n")
if (len(sys.argv) == 1) or (len(sys.argv) > 1 and sys.argv[1].lower() != "-o"):
os.system("start Password.log")
sys.exit() # Exiting the program successfully.
| 32.350877 | 80 | 0.691432 | 262 | 1,844 | 4.835878 | 0.408397 | 0.031571 | 0.066298 | 0.091555 | 0.211523 | 0.211523 | 0.211523 | 0.211523 | 0.211523 | 0.211523 | 0 | 0.004487 | 0.154013 | 1,844 | 56 | 81 | 32.928571 | 0.807692 | 0.186551 | 0 | 0.108108 | 0 | 0 | 0.238128 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.135135 | false | 0.243243 | 0.189189 | 0 | 0.378378 | 0.108108 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 |
0
| 1 |
86f1999802fa178d60effb8dd046d22fbb0dd814
| 4,643 |
py
|
Python
|
adventure-cards/package/main.py
|
DaneRosa/adventure-cards
|
0685feeec8b56627795e685ff4fffad187881e1c
|
[
"MIT"
] | null | null | null |
adventure-cards/package/main.py
|
DaneRosa/adventure-cards
|
0685feeec8b56627795e685ff4fffad187881e1c
|
[
"MIT"
] | null | null | null |
adventure-cards/package/main.py
|
DaneRosa/adventure-cards
|
0685feeec8b56627795e685ff4fffad187881e1c
|
[
"MIT"
] | null | null | null |
import json
def hydrateCards(rawDeckDataPath):
pack = []
rawDeckData = json.load(open(rawDeckDataPath,))
for index, item in enumerate(rawDeckData):
deck = []
# print(index,item)
for i in rawDeckData[item]:
card ={
f'{index}':
{
"name": "",
"type": "",
"level": None,
"spell_name": "",
"creature_name": "",
"artifact_name": "",
"enchantment_name": "",
"spell_magnifier": "",
"spell_type": "",
"name_modifier": "",
"creature_modifier": "",
"mythic_creature_modifier": "",
"location": "",
"mythic_location": ""
}
}
nameSplit = i[0].split()
card[f'{index}']['name'] = i[0]
card[f'{index}']['type']= i[1]
card[f'{index}']['level']=i[2]
if i[1] == 'spell':
if len(nameSplit) == 1:
card[f'{index}']['spell_name']= i[0]
elif len(nameSplit) == 2:
card[f'{index}']['spell_type']= nameSplit[0]
card[f'{index}']['spell_name']= nameSplit[1]
elif len(nameSplit) == 3:
card[f'{index}']['spell_magnifier']=nameSplit[0]
card[f'{index}']['spell_type']=nameSplit[1]
card[f'{index}']['spell_name']=nameSplit[2]
elif i[1] == 'artifact':
if 'Divine Robe' or 'Ghost Wand' in i[0]:
if 'Divine Robe' in i[0]:
i[0] = i[0].replace('Divine Robe', 'DivineRobe')
if 'Ghost Wand' in i[0]:
i[0] = i[0].replace('Ghost Wand', 'GhostWand')
nameSplit = i[0].split()
card[f'{index}']['name'] = i[0]
if len(nameSplit) == 1:
card[f'{index}']['artifact_name']= i[0]
elif len(nameSplit) == 2:
card[f'{index}']['artifact_name']= nameSplit[1]
card[f'{index}']['spell_type']= nameSplit[0]
elif len(nameSplit) == 3:
card[f'{index}']['artifact_name']= nameSplit[2]
card[f'{index}']['spell_magnifier']= nameSplit[0]
card[f'{index}']['spell_type']= nameSplit[1]
elif i[1] == 'enchantment':
if len(nameSplit) == 1:
card[f'{index}']['enchantment_name']= i[0]
if len(nameSplit) == 2:
card[f'{index}']['enchantment_name']= nameSplit[1]
card[f'{index}']['spell_type']= nameSplit[0]
if len(nameSplit) == 3:
card[f'{index}']['enchantment_name']=nameSplit[2]
card[f'{index}']['spell_type']=nameSplit[1]
card[f'{index}']['spell_magnifier']=nameSplit[0]
elif i[1] == 'monster':
card[f'{index}']['type']= 'creature'
if len(nameSplit) == 1:
card[f'{index}']['creature_name']= nameSplit[0]
if len(nameSplit) == 3:
card[f'{index}']['creature_name']= nameSplit[2]
card[f'{index}']['creature_modifier']= nameSplit[1]
card[f'{index}']['name_modifier']= nameSplit[0]
if len(nameSplit) >3:
keyword = 'of'
before_keyword, keyword, after_keyword = i[0].partition(keyword)
if i[2] == 2:
card[f'{index}']['creature_name']= nameSplit[2]
card[f'{index}']['creature_modifier']= nameSplit[1]
card[f'{index}']['name_modifier']= nameSplit[0]
card[f'{index}']['location']= nameSplit[2] = keyword + after_keyword
elif i[2] == 3:
card[f'{index}']['creature_name']= nameSplit[2]
card[f'{index}']['mythic_creature_modifier']= nameSplit[1]
card[f'{index}']['name_modifier']= nameSplit[0]
card[f'{index}']['mythic_location']= keyword + after_keyword
deck.append(card[f'{index}'])
index +=1
if len(deck) == 45:
break
pack.append(deck)
return(pack)
| 48.873684 | 92 | 0.420633 | 443 | 4,643 | 4.306998 | 0.130926 | 0.09696 | 0.19392 | 0.069182 | 0.626834 | 0.604822 | 0.535115 | 0.43501 | 0.404612 | 0.378931 | 0 | 0.024345 | 0.41611 | 4,643 | 95 | 93 | 48.873684 | 0.679454 | 0.003661 | 0 | 0.329787 | 0 | 0 | 0.208432 | 0.010378 | 0 | 0 | 0 | 0 | 0 | 1 | 0.010638 | false | 0 | 0.010638 | 0 | 0.021277 | 0 | 0 | 0 | 0 | null | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
0
| 1 |
86f3b70aa2f3c882bdd1f178d6aa80fab1793aab
| 5,898 |
py
|
Python
|
pmdarima/preprocessing/endog/boxcox.py
|
tuomijal/pmdarima
|
5bf84a2a5c42b81b949bd252ad3d4c6c311343f8
|
[
"MIT"
] | 736 |
2019-12-02T01:33:31.000Z
|
2022-03-31T21:45:29.000Z
|
pmdarima/preprocessing/endog/boxcox.py
|
tuomijal/pmdarima
|
5bf84a2a5c42b81b949bd252ad3d4c6c311343f8
|
[
"MIT"
] | 186 |
2019-12-01T18:01:33.000Z
|
2022-03-31T18:27:56.000Z
|
pmdarima/preprocessing/endog/boxcox.py
|
tuomijal/pmdarima
|
5bf84a2a5c42b81b949bd252ad3d4c6c311343f8
|
[
"MIT"
] | 126 |
2019-12-07T04:03:19.000Z
|
2022-03-31T17:40:14.000Z
|
# -*- coding: utf-8 -*-
from scipy import stats
import numpy as np
import warnings
from ...compat import check_is_fitted, pmdarima as pm_compat
from .base import BaseEndogTransformer
__all__ = ['BoxCoxEndogTransformer']
class BoxCoxEndogTransformer(BaseEndogTransformer):
r"""Apply the Box-Cox transformation to an endogenous array
The Box-Cox transformation is applied to non-normal data to coerce it more
towards a normal distribution. It's specified as::
(((y + lam2) ** lam1) - 1) / lam1, if lmbda != 0, else
log(y + lam2)
Parameters
----------
lmbda : float or None, optional (default=None)
The lambda value for the Box-Cox transformation, if known. If not
specified, it will be estimated via MLE.
lmbda2 : float, optional (default=0.)
The value to add to ``y`` to make it non-negative. If, after adding
``lmbda2``, there are still negative values, a ValueError will be
raised.
neg_action : str, optional (default="raise")
How to respond if any values in ``y <= 0`` after adding ``lmbda2``.
One of ('raise', 'warn', 'ignore'). If anything other than 'raise',
values <= 0 will be truncated to the value of ``floor``.
floor : float, optional (default=1e-16)
A positive value that truncate values to if there are values in ``y``
that are zero or negative and ``neg_action`` is not 'raise'. Note that
if values are truncated, invertibility will not be preserved, and the
transformed array may not be perfectly inverse-transformed.
"""
def __init__(self, lmbda=None, lmbda2=0, neg_action="raise", floor=1e-16):
self.lmbda = lmbda
self.lmbda2 = lmbda2
self.neg_action = neg_action
self.floor = floor
def fit(self, y, X=None, **kwargs): # TODO: kwargs go away
"""Fit the transformer
Learns the value of ``lmbda``, if not specified in the constructor.
If defined in the constructor, is not re-learned.
Parameters
----------
y : array-like or None, shape=(n_samples,)
The endogenous (time-series) array.
X : array-like or None, shape=(n_samples, n_features), optional
The exogenous array of additional covariates. Not used for
endogenous transformers. Default is None, and non-None values will
serve as pass-through arrays.
"""
lam1 = self.lmbda
lam2 = self.lmbda2
# Temporary shim until we remove `exogenous` support completely
X, _ = pm_compat.get_X(X, **kwargs)
if lam2 < 0:
raise ValueError("lmbda2 must be a non-negative scalar value")
if lam1 is None:
y, _ = self._check_y_X(y, X)
_, lam1 = stats.boxcox(y + lam2, lmbda=None, alpha=None)
self.lam1_ = lam1
self.lam2_ = lam2
return self
def transform(self, y, X=None, **kwargs):
"""Transform the new array
Apply the Box-Cox transformation to the array after learning the
lambda parameter.
Parameters
----------
y : array-like or None, shape=(n_samples,)
The endogenous (time-series) array.
X : array-like or None, shape=(n_samples, n_features), optional
The exogenous array of additional covariates. Not used for
endogenous transformers. Default is None, and non-None values will
serve as pass-through arrays.
Returns
-------
y_transform : array-like or None
The Box-Cox transformed y array
X : array-like or None
The X array
"""
check_is_fitted(self, "lam1_")
# Temporary shim until we remove `exogenous` support completely
X, _ = pm_compat.get_X(X, **kwargs)
lam1 = self.lam1_
lam2 = self.lam2_
y, exog = self._check_y_X(y, X)
y += lam2
neg_mask = y <= 0.
if neg_mask.any():
action = self.neg_action
msg = "Negative or zero values present in y"
if action == "raise":
raise ValueError(msg)
elif action == "warn":
warnings.warn(msg, UserWarning)
y[neg_mask] = self.floor
if lam1 == 0:
return np.log(y), exog
return (y ** lam1 - 1) / lam1, exog
def inverse_transform(self, y, X=None, **kwargs): # TODO: kwargs go away
"""Inverse transform a transformed array
Inverse the Box-Cox transformation on the transformed array. Note that
if truncation happened in the ``transform`` method, invertibility will
not be preserved, and the transformed array may not be perfectly
inverse-transformed.
Parameters
----------
y : array-like or None, shape=(n_samples,)
The transformed endogenous (time-series) array.
X : array-like or None, shape=(n_samples, n_features), optional
The exogenous array of additional covariates. Not used for
endogenous transformers. Default is None, and non-None values will
serve as pass-through arrays.
Returns
-------
y : array-like or None
The inverse-transformed y array
X : array-like or None
The inverse-transformed X array
"""
check_is_fitted(self, "lam1_")
# Temporary shim until we remove `exogenous` support completely
X, _ = pm_compat.get_X(X, **kwargs)
lam1 = self.lam1_
lam2 = self.lam2_
y, exog = self._check_y_X(y, X)
if lam1 == 0:
return np.exp(y) - lam2, exog
numer = y * lam1 # remove denominator
numer += 1. # add 1 back to it
de_exp = numer ** (1. / lam1) # de-exponentiate
return de_exp - lam2, exog
| 33.511364 | 78 | 0.598847 | 764 | 5,898 | 4.537958 | 0.231675 | 0.019037 | 0.031728 | 0.043265 | 0.462359 | 0.448515 | 0.417652 | 0.40525 | 0.40525 | 0.366023 | 0 | 0.015207 | 0.308749 | 5,898 | 175 | 79 | 33.702857 | 0.835173 | 0.56375 | 0 | 0.232143 | 0 | 0 | 0.059846 | 0.010618 | 0 | 0 | 0 | 0.011429 | 0 | 1 | 0.071429 | false | 0 | 0.089286 | 0 | 0.267857 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
0
| 1 |
86f9d5c800a3592d64ffbc26d845ced72a00288c
| 4,005 |
py
|
Python
|
src/python/pants/backend/android/tasks/aapt_builder.py
|
hythloday/pants
|
107e9b0957f6949ac4bd535fbef8d2d8cba05c5c
|
[
"Apache-2.0"
] | 11 |
2015-01-20T01:39:41.000Z
|
2019-08-08T07:27:44.000Z
|
src/python/pants/backend/android/tasks/aapt_builder.py
|
hythloday/pants
|
107e9b0957f6949ac4bd535fbef8d2d8cba05c5c
|
[
"Apache-2.0"
] | 1 |
2016-03-15T20:35:18.000Z
|
2016-03-15T20:35:18.000Z
|
src/python/pants/backend/android/tasks/aapt_builder.py
|
fakeNetflix/square-repo-pants
|
28a018c7f47900aec4f576c81a52e0e4b41d9fec
|
[
"Apache-2.0"
] | 5 |
2015-03-30T02:46:53.000Z
|
2018-03-08T20:10:43.000Z
|
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (nested_scopes, generators, division, absolute_import, with_statement,
print_function, unicode_literals)
import os
import subprocess
from twitter.common import log
from pants.backend.android.targets.android_binary import AndroidBinary
from pants.backend.android.targets.android_resources import AndroidResources
from pants.backend.android.tasks.aapt_task import AaptTask
from pants.base.build_environment import get_buildroot
from pants.base.exceptions import TaskError
from pants.base.workunit import WorkUnit
from pants.util.dirutil import safe_mkdir
class AaptBuilder(AaptTask):
"""Build an android bundle with compiled code and assets.
This class gathers compiled classes (an Android dex archive) and packages it with the
target's resource files. The output is an unsigned .apk, an Android application package file.
"""
@classmethod
def product_types(cls):
return ['apk']
@staticmethod
def is_app(target):
return isinstance(target, AndroidBinary)
def __init__(self, *args, **kwargs):
super(AaptBuilder, self).__init__(*args, **kwargs)
def prepare(self, round_manager):
round_manager.require_data('dex')
def render_args(self, target, resource_dir, inputs):
args = []
# Glossary of used aapt flags. Aapt handles a ton of action, this will continue to expand.
# : 'package' is the main aapt operation (see class docstring for more info).
# : '-M' is the AndroidManifest.xml of the project.
# : '-S' points to the resource_dir to "spider" down while collecting resources.
# : '-I' packages to add to base "include" set, here the android.jar of the target-sdk.
# : '--ignored-assets' patterns for the aapt to skip. This is the default w/ 'BUILD*' added.
# : '-F' The name and location of the .apk file to output
# : additional positional arguments are treated as input directories to gather files from.
args.extend([self.aapt_tool(target.build_tools_version)])
args.extend(['package', '-M', target.manifest])
args.extend(['-S'])
args.extend(resource_dir)
args.extend(['-I', self.android_jar_tool(target.target_sdk)])
args.extend(['--ignore-assets', self.ignored_assets])
args.extend(['-F', os.path.join(self.workdir, target.app_name + '-unsigned.apk')])
args.extend(inputs)
log.debug('Executing: {0}'.format(args))
return args
def execute(self):
safe_mkdir(self.workdir)
# TODO(mateor) map stderr and stdout to workunit streams (see CR 859)
with self.context.new_workunit(name='apk-bundle', labels=[WorkUnit.MULTITOOL]):
targets = self.context.targets(self.is_app)
with self.invalidated(targets) as invalidation_check:
invalid_targets = []
for vt in invalidation_check.invalid_vts:
invalid_targets.extend(vt.targets)
for target in invalid_targets:
# 'input_dirs' is the folder containing the Android dex file
input_dirs = []
# 'gen_out' holds resource folders (e.g. 'res')
gen_out = []
mapping = self.context.products.get('dex')
for basedir in mapping.get(target):
input_dirs.append(basedir)
def gather_resources(target):
"""Gather the 'resource_dir' of the target"""
if isinstance(target, AndroidResources):
gen_out.append(os.path.join(get_buildroot(), target.resource_dir))
target.walk(gather_resources)
process = subprocess.Popen(self.render_args(target, gen_out, input_dirs))
result = process.wait()
if result != 0:
raise TaskError('Android aapt tool exited non-zero ({code})'.format(code=result))
for target in targets:
self.context.products.get('apk').add(target, self.workdir).append(target.app_name + "-unsigned.apk")
| 41.71875 | 106 | 0.698127 | 530 | 4,005 | 5.154717 | 0.403774 | 0.029283 | 0.01757 | 0.025256 | 0.044656 | 0.027086 | 0 | 0 | 0 | 0 | 0 | 0.00373 | 0.196754 | 4,005 | 95 | 107 | 42.157895 | 0.845508 | 0.30437 | 0 | 0 | 0 | 0 | 0.048657 | 0 | 0 | 0 | 0 | 0.010526 | 0 | 1 | 0.118644 | false | 0 | 0.186441 | 0.033898 | 0.372881 | 0.016949 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
0
| 1 |
8101888cafdd6d738a67d105df6945c67e4d48e2
| 773 |
py
|
Python
|
tools/amp_segment/ina_speech_segmenter.py
|
saratkumar/galaxy
|
35cd0987239c1b006d6eaf70b4a03a58fb857a12
|
[
"CC-BY-3.0"
] | 1 |
2020-03-11T15:17:32.000Z
|
2020-03-11T15:17:32.000Z
|
tools/amp_segment/ina_speech_segmenter.py
|
saratkumar/galaxy
|
35cd0987239c1b006d6eaf70b4a03a58fb857a12
|
[
"CC-BY-3.0"
] | 72 |
2019-06-06T18:52:41.000Z
|
2022-02-17T02:53:18.000Z
|
tools/done/amp_segment/ina_speech_segmenter.py
|
AudiovisualMetadataPlatform/amp_mgms
|
593d4f4d40b597a7753cd152cd233976e6b28c75
|
[
"Apache-2.0"
] | 1 |
2022-03-01T08:07:54.000Z
|
2022-03-01T08:07:54.000Z
|
#!/usr/bin/env python3
import os
import os.path
import shutil
import subprocess
import sys
import tempfile
import uuid
import mgm_utils
def main():
(root_dir, input_file, json_file) = sys.argv[1:4]
tmpName = str(uuid.uuid4())
tmpdir = "/tmp"
temp_input_file = f"{tmpdir}/{tmpName}.dat"
temp_output_file = f"{tmpdir}/{tmpName}.json"
shutil.copy(input_file, temp_input_file)
sif = mgm_utils.get_sif_dir(root_dir) + "/ina_segmentation.sif"
r = subprocess.run(["singularity", "run", sif, temp_input_file, temp_output_file])
shutil.copy(temp_output_file, json_file)
if os.path.exists(temp_input_file):
os.remove(temp_input_file)
if os.path.exists(temp_output_file):
os.remove(temp_output_file)
exit(r.returncode)
if __name__ == "__main__":
main()
| 21.472222 | 83 | 0.742561 | 122 | 773 | 4.385246 | 0.385246 | 0.117757 | 0.121495 | 0.06729 | 0.082243 | 0.082243 | 0 | 0 | 0 | 0 | 0 | 0.005908 | 0.124191 | 773 | 35 | 84 | 22.085714 | 0.784343 | 0.027167 | 0 | 0 | 0 | 0 | 0.122503 | 0.087883 | 0 | 0 | 0 | 0 | 0 | 1 | 0.04 | false | 0 | 0.32 | 0 | 0.36 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 |
0
| 1 |
81050df4590617cea7e0daedc54d45bd783c7cfa
| 367 |
py
|
Python
|
stickmanZ/__main__.py
|
MichaelMcFarland98/cse210-project
|
9e5a45a75f465fe123e33712d3c19dd88e98246a
|
[
"MIT"
] | 1 |
2021-07-24T00:40:14.000Z
|
2021-07-24T00:40:14.000Z
|
stickmanZ/__main__.py
|
MichaelMcFarland98/cse210-project
|
9e5a45a75f465fe123e33712d3c19dd88e98246a
|
[
"MIT"
] | null | null | null |
stickmanZ/__main__.py
|
MichaelMcFarland98/cse210-project
|
9e5a45a75f465fe123e33712d3c19dd88e98246a
|
[
"MIT"
] | null | null | null |
from game.game_view import GameView
from game.menu_view import menu_view
from game import constants
import arcade
SCREEN_WIDTH = constants.SCREEN_WIDTH
SCREEN_HEIGHT = constants.SCREEN_HEIGHT
SCREEN_TITLE = constants.SCREEN_TITLE
window = arcade.Window(SCREEN_WIDTH, SCREEN_HEIGHT, SCREEN_TITLE)
start_view = menu_view()
window.show_view(start_view)
arcade.run()
| 22.9375 | 65 | 0.836512 | 54 | 367 | 5.388889 | 0.296296 | 0.082474 | 0.116838 | 0.158076 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.100817 | 367 | 15 | 66 | 24.466667 | 0.881818 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.363636 | 0 | 0.363636 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 |
0
| 1 |
810b128cc1280e3c864be85f0fd7db633ecb097d
| 35,104 |
py
|
Python
|
knx-test.py
|
WAvdBeek/CoAPthon3
|
5aa9d6a6d9a2903d86b113da538df9bd970e6b44
|
[
"MIT"
] | 1 |
2021-11-05T08:04:33.000Z
|
2021-11-05T08:04:33.000Z
|
knx-test.py
|
WAvdBeek/CoAPthon3
|
5aa9d6a6d9a2903d86b113da538df9bd970e6b44
|
[
"MIT"
] | 1 |
2021-07-21T12:40:54.000Z
|
2021-07-21T14:42:42.000Z
|
knx-test.py
|
WAvdBeek/CoAPthon3
|
5aa9d6a6d9a2903d86b113da538df9bd970e6b44
|
[
"MIT"
] | 1 |
2021-07-20T10:18:17.000Z
|
2021-07-20T10:18:17.000Z
|
#!/usr/bin/env python
import getopt
import socket
import sys
import cbor
#from cbor2 import dumps, loads
import json
import time
import traceback
from coapthon.client.helperclient import HelperClient
from coapthon.utils import parse_uri
from coapthon import defines
client = None
paths = {}
paths_extend = {}
my_base = ""
def usage(): # pragma: no cover
print("Command:\tknxcoapclient.py -o -p [-P]")
print("Options:")
print("\t-o, --operation=\tGET|GETNONE|PUT|POST|DELETE|DISCOVER|OBSERVE")
print("\t-p, --path=\t\t\tPath of the request")
print("\t-P, --payload=\t\tPayload of the request")
print("\t-c, --contenttype=\t\tcontenttype of the request")
print("\t-f, --payload-file=\t\tFile with payload of the request")
def get_url(line):
data = line.split(">")
url = data[0]
return url[1:]
def get_ct(line):
tagvalues = line.split(";")
for tag in tagvalues:
if tag.startswith("ct"):
ct_value_all = tag.split("=")
ct_value = ct_value_all[1].split(",")
return ct_value[0]
return ""
def get_base(url):
# python3 knxcoapclient.py -o GET -p coap://[fe80::6513:3050:71a7:5b98]:63914/a -c 50
my_url = url.replace("coap://","")
mybase = my_url.split("/")
return mybase[0]
def get_base_from_link(payload):
print("get_base_from_link\n")
global paths
global paths_extend
lines = payload.splitlines()
# add the
if len(paths) == 0:
my_base = get_base(get_url(lines[0]))
return my_base
def get_sn(my_base):
print("Get SN :");
sn = execute_get("coap://"+my_base+"/dev/sn", 60)
json_data = cbor.loads(sn.payload)
#print ("SN : ", json_data)
return json_data
def install(my_base):
sn = get_sn(my_base)
print (" SN : ", sn)
iid = "5" # installation id
if "000001" == sn :
# sensor, e.g sending
print ("--------------------")
print ("Installing SN: ", sn)
content = { 2: "reset"}
print("reset :", content);
execute_post("coap://"+my_base+"/.well-known/knx", 60, 60, content)
content = True
print("set PM :", content);
execute_put("coap://"+my_base+"/dev/pm", 60, 60, content)
content = 1
print("set IA :", content);
execute_put("coap://"+my_base+"/dev/ia", 60, 60, content)
content = iid
execute_put("coap://"+my_base+"/dev/iid", 60, 60, content)
content = { 2: "startLoading"}
print("lsm :", content);
execute_post("coap://"+my_base+"/a/lsm", 60, 60, content)
execute_get("coap://"+my_base+"/a/lsm", 60)
# group object table
# id (0)= 1
# url (11)= /p/light
# ga (7 )= 1
# cflags (8) = ["r" ] ; read = 1, write = 2, transmit = 3 update = 4
content = [ {0: 1, 11: "p/push", 7:[1], 8: [2] } ]
execute_post("coap://"+my_base+"/fp/g", 60, 60, content)
execute_get("coap://"+my_base+"/fp/g", 40)
# recipient table
# id (0)= 1
# ia (12)
# url (11)= .knx
# ga (7 )= 1
# cflags (8) = ["r" ] ; read = 1, write = 2, transmit = 3 update = 4
content = [ {0: 1, 11: "/p/push", 7:[1], 12 :"blah.blah" } ]
execute_post("coap://"+my_base+"/fp/r", 60, 60, content)
content = False
print("set PM :", content);
execute_put("coap://"+my_base+"/dev/pm", 60, 60, content)
content = { 2: "loadComplete"}
print("lsm :", content);
execute_post("coap://"+my_base+"/a/lsm", 60, 60, content)
execute_get("coap://"+my_base+"/a/lsm", 60)
if "000002" == sn :
# actuator ==> receipient
# should use /fp/r
print ("--------------------")
print ("installing SN: ", sn)
content = True
print("set PM :", content);
execute_put("coap://"+my_base+"/dev/pm", 60, 60, content)
content = 2
print("set IA :", content);
execute_put("coap://"+my_base+"/dev/ia", 60, 60, content)
content = iid
execute_put("coap://"+my_base+"/dev/iid", 60, 60, content)
content = { 2: "startLoading"}
print("lsm :", content);
execute_post("coap://"+my_base+"/a/lsm", 60, 60, content)
execute_get("coap://"+my_base+"/a/lsm", 60)
# group object table
# id (0)= 1
# url (11)= /p/light
# ga (7 )= 1
# cflags (8) = ["r" ] ; read = 1, write = 2, transmit = 3 update = 4
content = [ { 0: 1, 11: "/p/light", 7:[1], 8: [1] } ]
execute_post("coap://"+my_base+"/fp/g", 60, 60, content)
execute_get("coap://"+my_base+"/fp/g", 40)
# publisher table
# id (0)= 1
# ia (12)
# url (11)= .knx
# ga (7 )= 1
# cflags (8) = ["r" ] ; read = 1, write = 2, transmit = 3 update = 4
content = [ {0: 1, 11: ".knx", 7:[1], 12 :"blah.blah" } ]
execute_post("coap://"+my_base+"/fp/p", 60, 60, content)
content = False
print("set PM :", content);
execute_put("coap://"+my_base+"/dev/pm", 60, 60, content)
content = { 2: "loadComplete"}
print("lsm :", content);
execute_post("coap://"+my_base+"/a/lsm", 60, 60, content)
execute_get("coap://"+my_base+"/a/lsm", 60)
# do a post
content = {"sia": 5678, "st": 55, "ga": 1, "value": 100 }
content = { 4: 5678, "st": 55, 7: 1, "value": 100 }
# st ga value (1)
#content = { 5: { 6: 1, 7: 1, 1: True } }
#execute_post("coap://"+my_base+"/.knx", 60, 60, content)
content = {4: 5678, 5: { 6: 1, 7: 1, 1: False } }
#execute_post("coap://"+my_base+"/.knx", 60, 60, content)
#execute_post("coap://[FF02::FD]:5683/.knx", 60, 60, content)
# no json tags as strings
def do_sequence_dev(my_base):
print("===================")
print("Get SN :");
sn = execute_get("coap://"+my_base+"/dev/sn", 60)
sn = get_sn(my_base)
print (" SN : ", sn)
print("===================")
print("Get HWT :");
execute_get("coap://"+my_base+"/dev/hwt", 60)
print("===================")
print("Get HWV :");
execute_get("coap://"+my_base+"/dev/hwv", 60)
print("===================")
print("Get FWV :");
execute_get("coap://"+my_base+"/dev/fwv", 60)
print("===================")
print("Get Model :");
execute_get("coap://"+my_base+"/dev/model", 60)
print("===================")
content = True
print("set PM :", content);
execute_put("coap://"+my_base+"/dev/pm", 60, 60, content)
execute_get("coap://"+my_base+"/dev/pm", 60)
content = False
print("set PM :", content);
execute_put("coap://"+my_base+"/dev/pm", 60, 60, content)
execute_get("coap://"+my_base+"/dev/pm", 60)
print("===================")
content = 44
print("set IA :", content);
execute_put("coap://"+my_base+"/dev/ia", 60, 60, content)
execute_get("coap://"+my_base+"/dev/ia", 60)
print("===================")
content = "my host name"
print("set hostname :", content);
execute_put("coap://"+my_base+"/dev/hostname", 60, 60, content)
execute_get("coap://"+my_base+"/dev/hostname", 60)
print("===================")
content = " iid xxx"
print("set iid :", content);
execute_put("coap://"+my_base+"/dev/iid", 60, 60, content)
execute_get("coap://"+my_base+"/dev/iid", 60)
# id ==> 0
# href ==> 11
# ga ==> 7
# cflag ==> 8
def do_sequence_fp_g_int(my_base):
# url, content, accept, contents
content = [ {0: 1, 11: "xxxx1", 8: [1,2,3,4,5], 7:[2222,3333]} ]
execute_post("coap://"+my_base+"/fp/g", 60, 60, content)
execute_get("coap://"+my_base+"/fp/g/1", 60)
execute_get("coap://"+my_base+"/fp/g", 40)
content = [ {0: 2, 11: "xxxxyyy2", 8: [1,4,5], 7:[44,55,33]}, {0: 3, 1: "xxxxyyy3", 8: [1,4,5], 7:[44,55,33]} ]
execute_post("coap://"+my_base+"/fp/g", 60, 60, content)
execute_get("coap://"+my_base+"/fp/g/2", 60)
execute_get("coap://"+my_base+"/fp/g/3", 60)
execute_get("coap://"+my_base+"/fp/g", 40)
execute_del("coap://"+my_base+"/fp/g/3", 60)
execute_get("coap://"+my_base+"/fp/g/3", 60)
execute_get("coap://"+my_base+"/fp/g", 40)
def do_sequence_fp_g(my_base):
# url, content, accept, contents
content = [ {"id": 1, "href": "xxxx1", "cflag": [1,2,3,4,5], "ga":[2222,3333]} ]
execute_post("coap://"+my_base+"/fp/g", 60, 60, content)
execute_get("coap://"+my_base+"/fp/g/1", 60)
execute_get("coap://"+my_base+"/fp/g", 40)
content = [ {"id": 2, "href": "xxxxyyy2", "cflag": [1,4,5], "ga":[44,55,33]}, {"id": 3, "href": "xxxxyyy3", "cflag": [1,4,5], "ga":[44,55,33]} ]
execute_post("coap://"+my_base+"/fp/g", 60, 60, content)
execute_get("coap://"+my_base+"/fp/g/2", 60)
execute_get("coap://"+my_base+"/fp/g/3", 60)
execute_get("coap://"+my_base+"/fp/g", 40)
execute_del("coap://"+my_base+"/fp/g/3", 60)
execute_get("coap://"+my_base+"/fp/g/3", 60)
execute_get("coap://"+my_base+"/fp/g", 40)
# id ==> 0
# ia ==> 12
# path ==> 112
# url ==> 10
# ga ==> 7
def do_sequence_fp_p_int(my_base):
# url, content, accept, contents
content = [ {0: 1, 12: "Ia.IA1", 112: "path1", 7:[2222,3333]} ]
execute_post("coap://"+my_base+"/fp/p", 60, 60, content)
execute_get("coap://"+my_base+"/fp/p/1", 60)
# 40 == application-link format
execute_get("coap://"+my_base+"/fp/p", 40)
content = [ {0: 2, 12: "xxxxyyyia2", 112: "path2", 7:[44,55,33]},
{0: 3, 12: "xxxxyyyia3", 112: "path3", 7:[44,55,33]} ]
execute_post("coap://"+my_base+"/fp/p", 60, 60, content)
execute_get("coap://"+my_base+"/fp/p/2", 60)
execute_get("coap://"+my_base+"/fp/p/3", 60)
execute_get("coap://"+my_base+"/fp/p", 40)
execute_del("coap://"+my_base+"/fp/p/3", 60)
execute_get("coap://"+my_base+"/fp/p/3", 60)
execute_get("coap://"+my_base+"/fp/p", 40)
def do_sequence_fp_p(my_base):
# url, content, accept, contents
content = [ {"id": 1, "ia": "Ia.IA1", "path": "path1", "ga":[2222,3333]} ]
execute_post("coap://"+my_base+"/fp/p", 60, 60, content)
execute_get("coap://"+my_base+"/fp/p/1", 60)
# 40 == application-link format
execute_get("coap://"+my_base+"/fp/p", 40)
content = [ {"id": 2, "ia": "xxxxyyyia2", "path": "path2","ga":[44,55,33]}, {"id": 3, "ia": "xxxxyyyia3", "path": "path3","ga":[44,55,33]} ]
execute_post("coap://"+my_base+"/fp/p", 60, 60, content)
execute_get("coap://"+my_base+"/fp/p/2", 60)
execute_get("coap://"+my_base+"/fp/p/3", 60)
execute_get("coap://"+my_base+"/fp/p", 40)
execute_del("coap://"+my_base+"/fp/p/3", 60)
execute_get("coap://"+my_base+"/fp/p/3", 60)
execute_get("coap://"+my_base+"/fp/p", 40)
# id ==> 0
# ia ==> 12
# path ==> 112
# url ==> 10
# ga ==> 7
def do_sequence_fp_r_int(my_base):
# url, content, accept, contents
content = [ { 0: 1, 12: "r-Ia.IA1", 112: "r-path1", 7:[2222,3333]} ]
execute_post("coap://"+my_base+"/fp/r", 60, 60, content)
execute_get("coap://"+my_base+"/fp/r/1", 60)
execute_get("coap://"+my_base+"/fp/r", 40)
content = [ { 0: 2, 12: "r-Ia.IA2", 10: "url2", 112: "r-path2", 7:[44,55,33]},
{0: 3, 12: "r-Ia.IA3", 112: "r-path3", 7:[44,55,33]} ]
execute_post("coap://"+my_base+"/fp/r", 60, 60, content)
execute_get("coap://"+my_base+"/fp/r/2", 60)
execute_get("coap://"+my_base+"/fp/r/3", 60)
execute_get("coap://"+my_base+"/fp/r", 40)
execute_del("coap://"+my_base+"/fp/r/3", 60)
execute_get("coap://"+my_base+"/fp/r/3", 60)
execute_get("coap://"+my_base+"/fp/r", 40)
def do_sequence_fp_r(my_base):
# url, content, accept, contents
content = [ {"id": 1, "ia": "r-Ia.IA1", "path": "r-path1", "ga":[2222,3333]} ]
execute_post("coap://"+my_base+"/fp/r", 60, 60, content)
execute_get("coap://"+my_base+"/fp/r/1", 60)
execute_get("coap://"+my_base+"/fp/r", 40)
content = [ {"id": 2, "ia": "r-Ia.IA2", "path": "r-path2", "ga":[44,55,33]}, {"id": 3, "ia": "r-Ia.IA3", "path": "r-path3", "ga":[44,55,33]} ]
execute_post("coap://"+my_base+"/fp/r", 60, 60, content)
execute_get("coap://"+my_base+"/fp/r/2", 60)
execute_get("coap://"+my_base+"/fp/r/3", 60)
execute_get("coap://"+my_base+"/fp/r", 40)
execute_del("coap://"+my_base+"/fp/r/3", 60)
execute_get("coap://"+my_base+"/fp/r/3", 60)
execute_get("coap://"+my_base+"/fp/r", 40)
# cmd ==> 2
def do_sequence_lsm_int(my_base):
# url, content, accept, contents
execute_get("coap://"+my_base+"/a/lsm", 60)
content = {2 : "startLoading"}
execute_post("coap://"+my_base+"/a/lsm", 60, 60, content)
execute_get("coap://"+my_base+"/a/lsm", 60)
content = {2 : "loadComplete"}
execute_post("coap://"+my_base+"/a/lsm", 60, 60, content)
execute_get("coap://"+my_base+"/a/lsm", 60)
content = {2 : "unload"}
execute_post("coap://"+my_base+"/a/lsm", 60, 60, content)
execute_get("coap://"+my_base+"/a/lsm", 60)
def do_sequence_lsm(my_base):
# url, content, accept, contents
execute_get("coap://"+my_base+"/a/lsm", 60)
content = {"cmd": "startLoading"}
execute_post("coap://"+my_base+"/a/lsm", 60, 60, content)
execute_get("coap://"+my_base+"/a/lsm", 60)
content = {"cmd": "loadComplete"}
execute_post("coap://"+my_base+"/a/lsm", 60, 60, content)
execute_get("coap://"+my_base+"/a/lsm", 60)
content = {"cmd": "unload"}
execute_post("coap://"+my_base+"/a/lsm", 60, 60, content)
execute_get("coap://"+my_base+"/a/lsm", 60)
# ./knx resource
# sia ==> 4
# ga ==> 7
# st 6
def do_sequence_knx_knx_int(my_base):
# url, content, accept, contents
execute_get("coap://"+my_base+"/.knx", 60)
content = {"value": { 4 : 5, 7: 7777 , 6 : "rp"}}
execute_post("coap://"+my_base+"/.knx", 60, 60, content)
execute_get("coap://"+my_base+"/.knx", 60)
# ./knx resource
def do_sequence_knx_knx(my_base):
# url, content, accept, contents
execute_get("coap://"+my_base+"/.knx", 60)
content = {"value": { "sia" : 5, "ga": 7, "st": "rp"}}
execute_post("coap://"+my_base+"/.knx", 60, 60, content)
execute_get("coap://"+my_base+"/.knx", 60)
def do_sequence_knx_spake(my_base):
# url, content, accept, contents
# sequence:
# - parameter exchange: 15 (rnd)- return value
# - credential exchange: 10 - return value
# - pase verification exchange: 14 - no return value
content = { 15: b"a-15-sdfsdred"}
execute_post("coap://"+my_base+"/.well-known/knx/spake", 60, 60, content)
# pa
content = { 10: b"s10dfsdfsfs" }
execute_post("coap://"+my_base+"/.well-known/knx/spake", 60, 60, content)
# ca
content = { 14: b"a15sdfsdred"}
execute_post("coap://"+my_base+"/.well-known/knx/spake", 60, 60, content)
# expecting return
def do_sequence_knx_idevid(my_base):
# url, content, accept, contents
execute_get("coap://"+my_base+"/.well-known/knx/idevid", 282)
def do_sequence_knx_ldevid(my_base):
# url, content, accept, contents
execute_get("coap://"+my_base+"/.well-known/knx/ldevid", 282)
def do_sequence_knx_osn(my_base):
# url, content, accept, contents
execute_get("coap://"+my_base+"/.well-known/knx/osn", 60)
def do_sequence_knx_crc(my_base):
# url, content, accept, contents
execute_get("coap://"+my_base+"/.well-known/knx/crc", 60)
def do_sequence_oscore(my_base):
# url, content, accept, contents
execute_get("coap://"+my_base+"/f/oscore", 40)
execute_get("coap://"+my_base+"/p/oscore/replwdo", 60)
content = 105
execute_put("coap://"+my_base+"/p/oscore/replwdo", 60, 60, content)
execute_get("coap://"+my_base+"/p/oscore/replwdo", 60)
execute_get("coap://"+my_base+"/p/oscore/osndelay", 60)
content = 1050
execute_put("coap://"+my_base+"/p/oscore/osndelay", 60, 60, content)
execute_get("coap://"+my_base+"/p/oscore/osndelay", 60)
def do_sequence_core_knx(my_base):
# url, content, accept, contents
execute_get("coap://"+my_base+"/.well-known/knx", 60)
content = { 1 : 5, 2: "reset"}
execute_post("coap://"+my_base+"/.well-known/knx", 60, 60, content)
def do_sequence_a_sen(my_base):
# url, content, accept, contents
content = {2: "reset"}
execute_post("coap://"+my_base+"/a/sen", 60, 60, content)
def do_sequence_auth(my_base):
# url, content, accept, contents
execute_get("coap://"+my_base+"/auth", 40)
def do_sequence_auth_at(my_base):
# url, content, accept, contents
execute_get("coap://"+my_base+"/auth/at", 40)
#
content = {0: b"id", 1 : 20, 2:b"ms",3:"hkdf", 4:"alg", 5:b"salt", 6:b"contextId"}
execute_post("coap://"+my_base+"/auth/at", 60, 60, content)
content = {0: b"id2", 1 : 20, 2:b"ms",3:"hkdf", 4:"alg", 5:b"salt", 6:b"contextId2"}
execute_post("coap://"+my_base+"/auth/at", 60, 60, content)
execute_get("coap://"+my_base+"/auth/at", 40)
execute_get("coap://"+my_base+"/auth/at/id", 60)
execute_del("coap://"+my_base+"/auth/at/id", 60)
def do_sequence_f(my_base):
# url, content, accept, contents
execute_get("coap://"+my_base+"/f", 40)
# note this one is a bit dirty hard coded...
execute_get("coap://"+my_base+"/f/417", 40)
execute_get("coap://"+my_base+"/.well-known/core", 40)
def do_sequence(my_base):
#sn = get_sn(my_base)
install(my_base)
return
do_sequence_dev(my_base)
#return
do_sequence_fp_g_int(my_base)
#do_sequence_fp_g(my_base)
do_sequence_fp_p_int(my_base)
#do_sequence_fp_p(my_base)
do_sequence_fp_r_int(my_base)
#do_sequence_fp_r(my_base)
do_sequence_lsm_int(my_base)
#do_sequence_lsm(my_base)
do_sequence_lsm_int(my_base)
# .knx
do_sequence_knx_knx_int(my_base)
#do_sequence_knx_knx(my_base)
do_sequence_knx_spake(my_base)
do_sequence_knx_idevid(my_base)
do_sequence_knx_ldevid(my_base)
do_sequence_knx_crc(my_base)
do_sequence_knx_osn(my_base)
do_sequence_oscore(my_base)
do_sequence_core_knx(my_base)
do_sequence_a_sen(my_base)
do_sequence_auth(my_base)
do_sequence_auth_at(my_base)
do_sequence_f(my_base)
def client_callback_discovery(response, checkdata=None):
print(" --- Discovery Callback ---")
global my_base
if response is not None:
print ("response code:",response.code)
print ("response type:",response.content_type)
if response.code > 100:
print("+++returned error+++")
return
if response.content_type == defines.Content_types["application/link-format"]:
print (response.payload.decode())
my_base = get_base_from_link(response.payload.decode())
do_sequence(my_base)
def code2string(code):
if code == 68:
return "(Changed)"
if code == 69:
return "(Content)"
if code == 132:
return "(Not Found)"
if code == 133:
return "(METHOD_NOT_ALLOWED)"
if code == 160:
return "(INTERNAL_SERVER_ERROR)"
return ""
def client_callback(response, checkdata=None):
print(" --- Callback ---")
if response is not None:
print ("response code:",response.code, code2string(response.code))
print ("response type:",response.content_type)
if response.code > 100:
print("+++returned error+++")
return
#print(response.pretty_print())
if response.content_type == defines.Content_types["text/plain"]:
if response.payload is not None:
print (type(response.payload), len(response.payload))
print ("=========")
print (response.payload)
print ("=========")
else:
print ("payload: none")
elif response.content_type == defines.Content_types["application/cbor"]:
print (type(response.payload), len(response.payload))
print ("=========")
print (response.payload)
print ("=========")
#json_data = loads(response.payload)
#print(json_data)
#print ("=========")
json_string = ""
try:
json_data = cbor.loads(response.payload)
json_string = json.dumps(json_data, indent=2, sort_keys=True)
except:
print("error in cbor..")
print (json_string)
print ("===+++===")
if checkdata is not None:
check_data = cbor.loads(checkdata)
check_string = json.dumps(check_data, indent=2, sort_keys=True)
print(" check: ")
print (check_string)
if check_string == json_string:
print(" =+++===> OK ")
else:
print(" =+++===> NOT OK ")
print (json_string)
elif response.content_type == defines.Content_types["application/vnd.ocf+cbor"]:
print ("application/vnd.ocf+cbor")
try:
print (type(response.payload), len(response.payload))
print ("=========")
print (response.payload)
print ("=========")
json_data = cbor.loads(response.payload)
print (json_data)
print ("---------")
except:
traceback.print_exc()
json_string = json.dumps(json_data, indent=2, sort_keys=True)
print (json_string)
elif response.content_type == defines.Content_types["application/link-format"]:
print (response.payload.decode())
else:
if response.payload is not None:
print ("type, len", type(response.payload), len(response.payload))
print (response.payload)
#else:
# print (" not handled: ", response)
else:
print (" Response : None")
#check = True
#while check:
# chosen = eval(input("Stop observing? [y/N]: "))
# if chosen != "" and not (chosen == "n" or chosen == "N" or chosen == "y" or chosen == "Y"):
# print("Unrecognized choose.")
# continue
def client_callback_observe(response): # pragma: no cover
global client
print("Callback_observe")
check = True
while check:
chosen = eval(input("Stop observing? [y/N]: "))
if chosen != "" and not (chosen == "n" or chosen == "N" or chosen == "y" or chosen == "Y"):
print("Unrecognized choose.")
continue
elif chosen == "y" or chosen == "Y":
while True:
rst = eval(input("Send RST message? [Y/n]: "))
if rst != "" and not (rst == "n" or rst == "N" or rst == "y" or rst == "Y"):
print("Unrecognized choose.")
continue
elif rst == "" or rst == "y" or rst == "Y":
client.cancel_observing(response, True)
else:
client.cancel_observing(response, False)
check = False
break
else:
break
def execute_get(mypath, ct_value):
print ("---------------------------")
print ("execute_get: ", ct_value, mypath)
print (type(mypath))
if (mypath is None or len(mypath) < 5):
return
if mypath.startswith("coap://") == False:
print(" not executing: ", mypath);
return;
ct = {}
ct['accept'] = ct_value
host, port, path = parse_uri(mypath)
try:
tmp = socket.gethostbyname(host)
host = tmp
except socket.gaierror:
pass
nclient = HelperClient(server=(host, port))
response = nclient.get(path, None, None, **ct)
client_callback(response)
nclient.stop()
return response
def execute_del(mypath, ct_value):
print ("---------------------------")
print ("execute_del: ", ct_value, mypath)
do_exit = False
ct = {}
ct['accept'] = ct_value
ct['content_type'] = ct_value
if mypath.startswith("coap://") == False:
print(" not executing: ", mypath);
return;
host, port, path = parse_uri(mypath)
try:
tmp = socket.gethostbyname(host)
host = tmp
except socket.gaierror:
pass
nclient = HelperClient(server=(host, port))
nclientcheck = HelperClient(server=(host, port))
payload = 0
response = nclient.delete(path, None, None, **ct)
client_callback(response)
#nclient.stop()
#sys.exit(2)
print ("=======")
def execute_put(mypath, ct_value, accept, content):
print ("---------------------------")
print ("execute_put: ", ct_value, mypath)
do_exit = False
ct = {}
ct['accept'] = accept
ct['content_type'] = ct_value
if mypath.startswith("coap://") == False:
print(" not executing: ", mypath);
return
host, port, path = parse_uri(mypath)
try:
tmp = socket.gethostbyname(host)
host = tmp
except socket.gaierror:
pass
nclient = HelperClient(server=(host, port))
nclientcheck = HelperClient(server=(host, port))
payload = 0
if accept == 60:
payload = cbor.dumps(content)
else:
payload = content
print ("payload: ", payload)
response = nclient.put(path, payload, None, None , None, **ct)
client_callback(response)
nclient.stop()
def execute_post(mypath, ct_value, accept, content):
print ("---------------------------")
print ("execute_post: ", ct_value, mypath)
print (content)
print (" ---------------------")
do_exit = False
ct = {}
ct['accept'] = accept
ct['content_type'] = ct_value
if mypath.startswith("coap://") == False:
print(" not executing: ", mypath);
return
host, port, path = parse_uri(mypath)
try:
tmp = socket.gethostbyname(host)
host = tmp
except socket.gaierror:
pass
nclient = HelperClient(server=(host, port))
#nclientcheck = HelperClient(server=(host, port))
payload = 0
if accept == 60:
#print(" content :", content)
payload = cbor.dumps(content)
else:
payload = content
response = nclient.post(path, payload, None, None , None, **ct)
client_callback(response)
nclient.stop()
def main(): # pragma: no cover
global client
op = None
path = None
payload = None
content_type = None
#ct = {'content_type': defines.Content_types["application/link-format"]}
ct = {}
ct['accept'] = 40
try:
opts, args = getopt.getopt(sys.argv[1:], "ho:p:P:f:c:", ["help", "operation=", "path=", "payload=",
"payload_file=","content-type"])
except getopt.GetoptError as err:
# print help information and exit:
print((str(err))) # will print something like "option -a not recognized"
usage()
sys.exit(2)
for o, a in opts:
if o in ("-o", "--operation"):
op = a
elif o in ("-p", "--path"):
path = a
elif o in ("-P", "--payload"):
payload = a
elif o in ("-c", "--content-type"):
ct['accept'] = a
print ("content type request : ", ct)
elif o in ("-f", "--payload-file"):
with open(a, 'r') as f:
payload = f.read()
elif o in ("-h", "--help"):
usage()
sys.exit()
else:
usage()
sys.exit(2)
if op is None:
print("Operation must be specified")
usage()
sys.exit(2)
if path is None:
print("Path must be specified")
usage()
sys.exit(2)
if not path.startswith("coap://"):
print("Path must be conform to coap://host[:port]/path")
usage()
sys.exit(2)
host, port, path = parse_uri(path)
try:
tmp = socket.gethostbyname(host)
host = tmp
except socket.gaierror:
pass
client = HelperClient(server=(host, port))
if op == "GET":
if path is None:
print("Path cannot be empty for a GET request")
usage()
sys.exit(2)
response = client.get(path, None, None, **ct)
print((response.pretty_print()))
if response.content_type == defines.Content_types["application/json"]:
json_data = json.loads(response.payload)
json_string = json.dumps(json_data, indent=2, sort_keys=True)
print ("JSON ::")
print (json_string)
if response.content_type == defines.Content_types["application/cbor"]:
json_data = cbor.loads(response.payload)
json_string = json.dumps(json_data, indent=2, sort_keys=True)
print ("JSON ::")
print (json_string)
if response.content_type == defines.Content_types["application/link-format"]:
#json_data = cbor.loads(response.payload)
#json_string = json.dumps(json_data, indent=2, sort_keys=True)
#print ("JSON ::")
print (response.payload.decode())
print ("\n\n")
if response.content_type == defines.Content_types["application/vnd.ocf+cbor"]:
json_data = cbor.loads(response.payload)
json_string = json.dumps(json_data, indent=2, sort_keys=True)
print ("JSON ::")
print (json_string)
client.stop()
elif op == "GETNONE":
if path is None:
print("Path cannot be empty for a GET-None request")
usage()
sys.exit(2)
response = client.get_non(path, None, None, **ct)
print((response.pretty_print()))
if response.content_type == defines.Content_types["application/json"]:
json_data = json.loads(response.payload)
json_string = json.dumps(json_data, indent=2, sort_keys=True)
print ("JSON ::")
print (json_string)
if response.content_type == defines.Content_types["application/cbor"]:
json_data = cbor.loads(response.payload)
json_string = json.dumps(json_data, indent=2, sort_keys=True)
print ("JSON ::")
print (json_string)
if response.content_type == defines.Content_types["application/vnd.ocf+cbor"]:
json_data = cbor.loads(response.payload)
json_string = json.dumps(json_data, indent=2, sort_keys=True)
print ("JSON ::")
print (json_string)
client.stop()
elif op == "OBSERVE":
if path is None:
print("Path cannot be empty for a GET request")
usage()
sys.exit(2)
client.observe(path, client_callback_observe)
elif op == "DELETE":
if path is None:
print("Path cannot be empty for a DELETE request")
usage()
sys.exit(2)
response = client.delete(path)
print((response.pretty_print()))
client.stop()
elif op == "POST":
if path is None:
print("Path cannot be empty for a POST request")
usage()
sys.exit(2)
if payload is None:
print("Payload cannot be empty for a POST request")
usage()
sys.exit(2)
print ( "payload for POST (ascii):", payload )
print (ct['accept'] )
if ct['accept'] == str(defines.Content_types["application/cbor"]):
json_data = json.loads(payload)
cbor_data = cbor.dumps(json_data)
payload = bytes(cbor_data)
if ct['accept'] == str(defines.Content_types["application/vnd.ocf+cbor"]):
json_data = json.loads(payload)
cbor_data = cbor.loads(json_data)
payload = cbor_data
response = client.post(path, payload, None, None, **ct)
print((response.pretty_print()))
if response.content_type == defines.Content_types["application/cbor"]:
json_data = cbor.loads(response.payload)
json_string = json.dumps(json_data, indent=2, sort_keys=True)
print (json_string)
if response.content_type == defines.Content_types["application/vnd.ocf+cbor"]:
json_data = cbor.loads(response.payload)
json_string = json.dumps(json_data, indent=2, sort_keys=True)
print (json_string)
client.stop()
elif op == "PUT":
if path is None:
print("Path cannot be empty for a PUT request")
usage()
sys.exit(2)
if payload is None:
print("Payload cannot be empty for a PUT request")
usage()
sys.exit(2)
response = client.put(path, payload)
print((response.pretty_print()))
client.stop()
elif op == "DISCOVER":
#response = client.discover( path, client_callback, None, **ct)
response = client.discover( path, None, None, **ct)
if response is not None:
print(response.pretty_print())
if response.content_type == defines.Content_types["application/cbor"]:
json_data = cbor.loads(response.payload)
json_string = json.dumps(json_data, indent=2, sort_keys=True)
print (json_string)
if response.content_type == defines.Content_types["application/vnd.ocf+cbor"]:
json_data = cbor.loads(response.payload)
json_string = json.dumps(json_data, indent=2, sort_keys=True)
print (json_string)
if response.content_type == defines.Content_types["application/link-format"]:
#json_data = cbor.loads(response.payload)
#json_string = json.dumps(json_data, indent=2, sort_keys=True)
print (response.payload.decode())
# do_get(response.payload.decode(), client)
client_callback_discovery(response)
counter = 2
try:
while counter > 0:
time.sleep(1)
counter = counter - 1
#client.stop()
except KeyboardInterrupt:
print("Client Shutdown")
#client.stop()
#execute_list()
client.stop()
else:
print("Operation not recognized")
usage()
sys.exit(2)
if __name__ == '__main__': # pragma: no cover
main()
| 33.786333 | 149 | 0.546206 | 4,538 | 35,104 | 4.065668 | 0.078669 | 0.066992 | 0.080217 | 0.076314 | 0.758753 | 0.732358 | 0.681843 | 0.643089 | 0.602764 | 0.575285 | 0 | 0.040155 | 0.269998 | 35,104 | 1,038 | 150 | 33.818882 | 0.679817 | 0.097425 | 0 | 0.574555 | 0 | 0.001368 | 0.177289 | 0.022312 | 0 | 0 | 0 | 0 | 0 | 1 | 0.053352 | false | 0.00684 | 0.01368 | 0 | 0.093023 | 0.199726 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
0
| 1 |
810ccb8df33ca9c859d68156c3d23f37b798cbf1
| 1,301 |
py
|
Python
|
tests/components/zwave_js/test_discovery.py
|
tbarbette/core
|
8e58c3aa7bc8d2c2b09b6bd329daa1c092d52d3c
|
[
"Apache-2.0"
] | 1 |
2020-12-18T12:23:04.000Z
|
2020-12-18T12:23:04.000Z
|
tests/components/zwave_js/test_discovery.py
|
tbarbette/core
|
8e58c3aa7bc8d2c2b09b6bd329daa1c092d52d3c
|
[
"Apache-2.0"
] | 60 |
2020-07-06T15:10:30.000Z
|
2022-03-31T06:01:46.000Z
|
tests/components/zwave_js/test_discovery.py
|
tbarbette/core
|
8e58c3aa7bc8d2c2b09b6bd329daa1c092d52d3c
|
[
"Apache-2.0"
] | 4 |
2017-01-10T04:17:33.000Z
|
2021-09-02T16:37:24.000Z
|
"""Test discovery of entities for device-specific schemas for the Z-Wave JS integration."""
async def test_iblinds_v2(hass, client, iblinds_v2, integration):
"""Test that an iBlinds v2.0 multilevel switch value is discovered as a cover."""
node = iblinds_v2
assert node.device_class.specific.label == "Unused"
state = hass.states.get("light.window_blind_controller")
assert not state
state = hass.states.get("cover.window_blind_controller")
assert state
async def test_ge_12730(hass, client, ge_12730, integration):
"""Test GE 12730 Fan Controller v2.0 multilevel switch is discovered as a fan."""
node = ge_12730
assert node.device_class.specific.label == "Multilevel Power Switch"
state = hass.states.get("light.in_wall_smart_fan_control")
assert not state
state = hass.states.get("fan.in_wall_smart_fan_control")
assert state
async def test_inovelli_lzw36(hass, client, inovelli_lzw36, integration):
"""Test LZW36 Fan Controller multilevel switch endpoint 2 is discovered as a fan."""
node = inovelli_lzw36
assert node.device_class.specific.label == "Unused"
state = hass.states.get("light.family_room_combo")
assert state.state == "off"
state = hass.states.get("fan.family_room_combo_2")
assert state
| 34.236842 | 91 | 0.731745 | 186 | 1,301 | 4.946237 | 0.317204 | 0.058696 | 0.097826 | 0.117391 | 0.438043 | 0.343478 | 0.206522 | 0.136957 | 0.136957 | 0.136957 | 0 | 0.034291 | 0.170638 | 1,301 | 37 | 92 | 35.162162 | 0.81835 | 0.065334 | 0 | 0.333333 | 0 | 0 | 0.209544 | 0.170124 | 0 | 0 | 0 | 0 | 0.428571 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
0
| 1 |
810e3e3e48092c408dee59bf8a6eb974e84689eb
| 1,475 |
py
|
Python
|
Final-Project/server/art/serializers.py
|
wendy006/Web-Dev-Course
|
2f0cfddb7ab4db88ffb4483c7cd4a00abf36c720
|
[
"MIT"
] | null | null | null |
Final-Project/server/art/serializers.py
|
wendy006/Web-Dev-Course
|
2f0cfddb7ab4db88ffb4483c7cd4a00abf36c720
|
[
"MIT"
] | null | null | null |
Final-Project/server/art/serializers.py
|
wendy006/Web-Dev-Course
|
2f0cfddb7ab4db88ffb4483c7cd4a00abf36c720
|
[
"MIT"
] | null | null | null |
from rest_framework import serializers
from .models import *
class CollectionSerializer(serializers.ModelSerializer):
class Meta:
model = Collection
fields = ('collectionID', 'name', 'display_name', 'description', 'img_url')
class ArtSerializer(serializers.ModelSerializer):
img_url = serializers.ReadOnlyField()
thumb_url = serializers.ReadOnlyField()
class Meta:
model = Art
fields = ('artID', 'title', 'filename', 'rarity', 'collection', 'img_url', 'thumb_url')
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ('id', 'username', 'email', 'password', 'coins', 'art')
extra_kwargs = {
'password': {'write_only': True}
}
def create(self, validated_data):
password = validated_data.pop('password', None)
instance = self.Meta.model(**validated_data)
if password is not None:
instance.set_password(password)
instance.save()
return instance
class OwnSerializer(serializers.ModelSerializer):
duplicates = serializers.ReadOnlyField()
class Meta:
model = Own
fields = ('ownID', 'user', 'art', 'duplicates')
class SaleSerializer(serializers.ModelSerializer):
class Meta:
model = Sale
fields = ('saleID', 'seller', 'buyer', 'ownership', 'art', 'price', 'available', 'sold', 'postDate', 'purchaseDate')
| 35.97561 | 124 | 0.626441 | 136 | 1,475 | 6.698529 | 0.5 | 0.059276 | 0.076839 | 0.115258 | 0.215148 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.247458 | 1,475 | 41 | 124 | 35.97561 | 0.820721 | 0 | 0 | 0.142857 | 0 | 0 | 0.168524 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.028571 | false | 0.142857 | 0.057143 | 0 | 0.485714 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 |
0
| 1 |
811c9730165b0d65d78610ed7c5cc6d9f073addc
| 5,039 |
py
|
Python
|
sifter/grammar/grammar.py
|
russell/sifter
|
03e85349fd2329439ae3f7eb3c1f484ba2ebf807
|
[
"BSD-2-Clause"
] | null | null | null |
sifter/grammar/grammar.py
|
russell/sifter
|
03e85349fd2329439ae3f7eb3c1f484ba2ebf807
|
[
"BSD-2-Clause"
] | null | null | null |
sifter/grammar/grammar.py
|
russell/sifter
|
03e85349fd2329439ae3f7eb3c1f484ba2ebf807
|
[
"BSD-2-Clause"
] | 1 |
2020-08-19T06:30:47.000Z
|
2020-08-19T06:30:47.000Z
|
# Parser based on RFC 5228, especially the grammar as defined in section 8. All
# references are to sections in RFC 5228 unless stated otherwise.
import ply.yacc
import sifter.grammar
from sifter.grammar.lexer import tokens
import sifter.handler
import logging
__all__ = ('parser',)
def parser(**kwargs):
return ply.yacc.yacc(**kwargs)
def p_commands_list(p):
"""commands : commands command"""
p[0] = p[1]
# section 3.2: REQUIRE command must come before any other commands
if p[2].RULE_IDENTIFIER == 'REQUIRE':
if any(command.RULE_IDENTIFIER != 'REQUIRE'
for command in p[0].commands):
log = logging.getLogger("sifter")
log.error(("REQUIRE command on line %d must come before any "
"other non-REQUIRE commands" % p.lineno(2)))
raise SyntaxError
# section 3.1: ELSIF and ELSE must follow IF or another ELSIF
elif p[2].RULE_IDENTIFIER in ('ELSIF', 'ELSE'):
if p[0].commands[-1].RULE_IDENTIFIER not in ('IF', 'ELSIF'):
log = logging.getLogger("sifter")
log.error(("ELSIF/ELSE command on line %d must follow an IF/ELSIF "
"command" % p.lineno(2)))
raise SyntaxError
p[0].commands.append(p[2])
def p_commands_empty(p):
"""commands : """
p[0] = sifter.grammar.CommandList()
def p_command(p):
"""command : IDENTIFIER arguments ';'
| IDENTIFIER arguments block"""
#print("COMMAND:", p[1], p[2], p[3])
tests = p[2].get('tests')
block = None
if p[3] != ';': block = p[3]
handler = sifter.handler.get('command', p[1])
if handler is None:
log = logging.getLogger("sifter")
log.error(("No handler registered for command '%s' on line %d" %
(p[1], p.lineno(1))))
raise SyntaxError
p[0] = handler(arguments=p[2]['args'], tests=tests, block=block)
def p_command_error(p):
"""command : IDENTIFIER error ';'
| IDENTIFIER error block"""
log = logging.getLogger("sifter")
log.error(("Syntax error in command definition after %s on line %d" %
(p[1], p.lineno(1))))
raise SyntaxError
def p_block(p):
"""block : '{' commands '}' """
# section 3.2: REQUIRE command must come before any other commands,
# which means it can't be in the block of another command
if any(command.RULE_IDENTIFIER == 'REQUIRE'
for command in p[2].commands):
log = logging.getLogger("sifter")
log.error(("REQUIRE command not allowed inside of a block (line %d)" %
(p.lineno(2))))
raise SyntaxError
p[0] = p[2]
def p_block_error(p):
"""block : '{' error '}'"""
log = logging.getLogger("sifter")
log.error(("Syntax error in command block that starts on line %d" %
(p.lineno(1),)))
raise SyntaxError
def p_arguments(p):
"""arguments : argumentlist
| argumentlist test
| argumentlist '(' testlist ')'"""
p[0] = { 'args' : p[1], }
if len(p) > 2:
if p[2] == '(':
p[0]['tests'] = p[3]
else:
p[0]['tests'] = [ p[2] ]
def p_testlist_error(p):
"""arguments : argumentlist '(' error ')'"""
log = logging.getLogger("sifter")
log.error(("Syntax error in test list that starts on line %d" % p.lineno(2)))
raise SyntaxError
def p_argumentlist_list(p):
"""argumentlist : argumentlist argument"""
p[0] = p[1]
p[0].append(p[2])
def p_argumentlist_empty(p):
"""argumentlist : """
p[0] = []
def p_test(p):
"""test : IDENTIFIER arguments"""
#print("TEST:", p[1], p[2])
tests = p[2].get('tests')
handler = sifter.handler.get('test', p[1])
if handler is None:
log = logging.getLogger("sifter")
log.error(("No handler registered for test '%s' on line %d" %
(p[1], p.lineno(1))))
raise SyntaxError
p[0] = handler(arguments=p[2]['args'], tests=tests)
def p_testlist_list(p):
"""testlist : test ',' testlist"""
p[0] = p[3]
p[0].insert(0, p[1])
def p_testlist_single(p):
"""testlist : test"""
p[0] = [ p[1] ]
def p_argument_stringlist(p):
"""argument : '[' stringlist ']'"""
p[0] = p[2]
def p_argument_string(p):
"""argument : string"""
# for simplicity, we treat all single strings as a string list
p[0] = [ p[1] ]
def p_argument_number(p):
"""argument : NUMBER"""
p[0] = p[1]
def p_argument_tag(p):
"""argument : TAG"""
p[0] = sifter.grammar.Tag(p[1])
def p_stringlist_error(p):
"""argument : '[' error ']'"""
log = logging.getLogger("sifter")
log.error(("Syntax error in string list that starts on line %d" %
p.lineno(1)))
raise SyntaxError
def p_stringlist_list(p):
"""stringlist : string ',' stringlist"""
p[0] = p[3]
p[0].insert(0, p[1])
def p_stringlist_single(p):
"""stringlist : string"""
p[0] = [ p[1] ]
def p_string(p):
"""string : QUOTED_STRING"""
p[0] = sifter.grammar.String(p[1])
| 29.467836 | 81 | 0.581663 | 695 | 5,039 | 4.14964 | 0.164029 | 0.017337 | 0.010402 | 0.078017 | 0.460125 | 0.407767 | 0.388696 | 0.345007 | 0.333911 | 0.29577 | 0 | 0.024494 | 0.254614 | 5,039 | 170 | 82 | 29.641176 | 0.743344 | 0.227228 | 0 | 0.352381 | 0 | 0 | 0.169852 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.209524 | false | 0 | 0.047619 | 0.009524 | 0.266667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 |
0
| 1 |
811eb205fb191ad48270915e49e393d586962cb9
| 26,184 |
py
|
Python
|
smipyping/_targetstable.py
|
KSchopmeyer/smipyping
|
9c60b3489f02592bd9099b8719ca23ae43a9eaa5
|
[
"MIT"
] | null | null | null |
smipyping/_targetstable.py
|
KSchopmeyer/smipyping
|
9c60b3489f02592bd9099b8719ca23ae43a9eaa5
|
[
"MIT"
] | 19 |
2017-10-18T15:31:25.000Z
|
2020-03-04T19:31:59.000Z
|
smipyping/_targetstable.py
|
KSchopmeyer/smipyping
|
9c60b3489f02592bd9099b8719ca23ae43a9eaa5
|
[
"MIT"
] | null | null | null |
# (C) Copyright 2017 Inova Development Inc.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Define the base of targets (i.e. systems to be tested)
TargetID = Column(Integer(11), primary_key=True)
IPAddress = Column(String(15), nullable=False)
CompanyID = Column(Integer(11), ForeignKey("Companies.CompanyID"))
Namespace = Column(String(30), nullable=False)
SMIVersion = Column(String(15), nullable=False)
Product = Column(String(30), nullable=False)
Principal = Column(String(30), nullable=False)
Credential = Column(String(30), nullable=False)
CimomVersion = Column(String(30), nullable=False)
InteropNamespace = Column(String(30), nullable=False)
Notify = Column(Enum('Enabled', 'Disabled'), default='Disabled')
NotifyUsers = Column(String(12), nullable=False)
ScanEnabled = Column(Enum('Enabled', 'Disabled'), default='Enabled')
Protocol = Column(String(10), default='http')
Port = Column(String(10), nullable=False)
"""
# TODO change ip_address to hostname where host name is name : port
from __future__ import print_function, absolute_import
import os
import csv
import re
from collections import OrderedDict
from textwrap import wrap
import six
from mysql.connector import Error as mysqlerror
from ._dbtablebase import DBTableBase
from ._mysqldbmixin import MySQLDBMixin
from ._common import get_url_str
from ._logging import AUDIT_LOGGER_NAME, get_logger
from ._companiestable import CompaniesTable
__all__ = ['TargetsTable']
class TargetsTable(DBTableBase):
"""
Class representing the targets db table.
This base contains information on the targets, host systems, etc. in the
environment.
The factory method should be used to construct a new TargetsTable object
since that creates the correct object for the defined database type.
"""
table_name = 'Targets'
key_field = 'TargetID'
# Fields that are required to create new records
required_fields = [
'IPAddress', 'CompanyID', 'Namespace',
'SMIVersion', 'Product', 'Principal', 'Credential',
'CimomVersion', 'InteropNamespace', 'Notify', 'NotifyUsers',
'ScanEnabled', 'Protocol', 'Port']
# All fields in each record.
fields = [key_field] + required_fields
join_fields = ['CompanyName']
all_fields = fields + join_fields
hints = {
'IPAddress': "Host name or ip address",
'CompanyID': "DB id of company",
'Namespace': "User namespace",
'SMIVersion': "SMI version",
'Product': "Product name",
'Principal': "User Name to access target",
'Credential': "User password to access target",
'CimomVersion': "Version of CIMOM",
'InteropNamespace': "Interop Namespace name",
'Notify': "'Enabled' if users to be notified of issues, else "
"'Disabled'",
'NotifyUsers': "List of UserIDs to notify",
'ScanEnabled': "Enabled if this target to be scanned",
'Protocol': '"http" or "https"',
'Port': "Integer defining WBEM server port."}
# # Defines each record for the data base and outputs.
# # The Name is the database name for the property
# # The value tuple is display name and max width for the record
table_format_dict = OrderedDict([
('TargetID', ('ID', 2, int)),
('CompanyName', ('CompanyName', 12, str)),
('Namespace', ('Namespace', 12, str)),
('SMIVersion', ('SMIVersion', 12, str)),
('Product', ('Product', 15, str)),
('Principal', ('Principal', 12, str)),
('Credential', ('Credential', 12, str)),
('CimomVersion', ('CimomVersion', 15, str)),
('IPAddress', ('IPAddress', 12, str)),
('InteropNamespace', ('Interop', 8, str)),
('Notify', ('Notify', 12, str)),
('NotifyUsers', ('NotifyUsers', 12, str)),
('Protocol', ('Prot', 5, str)),
('Port', ('Port', 4, int)),
('ScanEnabled', ('Enabled', 6, str)),
]) # noqa: E123
def __init__(self, db_dict, db_type, verbose, output_format):
"""Initialize the abstract Targets instance.
This controls all other
target bases. This defines the common definition of all targets bases
including field names, and common methods.
Parameters:
db_dict (:term: `dictionary')
Dictionary containing all of the parameters to open the database
defined by the db_dict attribute.
db_type (:term: `string`)
String defining one of the allowed database types for the
target database.
verbose (:class:`py:bool`)
Boolean. If true detailed info is displayed on the processing
of the TargetData class
output_format (:term:`string`)
String defining one of the legal report output formats. If not
provided, the default is a simple report format.
"""
super(TargetsTable, self).__init__(db_dict, db_type, verbose)
self.output_format = output_format
# def __str__(self):
# # # TODO this and __repr__ do not really match.
# # """String info on targetdata. TODO. Put more info here"""
# # return ('type=%s db=%s, len=%s' % (self.db_type, self.get_dbdict(),
# # # len(self.data_dict)))
# def __repr__(self):
# # """Rep of target data"""
# # return ('Targetdata db_type %s, rep count=%s' %
# # # (self.db_type, len(self.data_dict)))
def test_fieldnames(self, fields):
"""Test a list of field names. This test generates an exception,
KeyError if a field in fields is not in the table
"""
for field in fields:
self.table_format_dict[field] # pylint: disable=pointless-statement
def get_dbdict(self):
"""Get string for the db_dict"""
return '%s' % self.db_dict
@classmethod
def factory(cls, db_dict, db_type, verbose, output_format='simple'):
"""Factory method to select subclass based on database type (db_type).
Currently the types sql and csv are supported.
Returns instance object of the defined provider type.
"""
inst = None
if verbose:
print('targetdata factory datafile %s dbtype %s verbose %s'
% (db_dict, db_type, verbose))
if db_type == ('csv'):
inst = CsvTargetsTable(db_dict, db_type, verbose,
output_format=output_format)
elif db_type == ('mysql'):
inst = MySQLTargetsTable(db_dict, db_type, verbose,
output_format=output_format)
else:
ValueError('Invalid targets factory db_type %s' % db_type)
if verbose:
print('Resulting targets factory inst %r' % inst)
return inst
def get_field_list(self):
"""Return a list of the base table field names in the order defined."""
return list(self.table_format_dict)
def get_format_dict(self, name):
"""Return tuple of display name and length for name."""
return self.table_format_dict[name]
def get_enabled_targetids(self):
"""Get list of target ids that are marked enabled."""
return [x for x in self.data_dict if not self.disabled_target_id(x)]
def get_disabled_targetids(self):
"""Get list of target ids that are marked disabled"""
return [x for x in self.data_dict
if self.disabled_target_id(x)]
# TODO we have multiple of these. See get dict_for_host,get_hostid_list
def get_targets_host(self, host_data):
"""
If an record for `host_data` exists return that record,
otherwise return None.
There may be multiple ipaddress, port entries for a
single ipaddress, port in the database
Parameters:
host_id(tuple of hostname or ipaddress and port)
Returns list of targetdata keys
"""
# TODO clean up for PY 3
return_list = []
for key, value in self.data_dict.items():
port = value["Port"]
# TODO port from database is a string. Should be int internal.
if value["IPAddress"] == host_data[0] and int(port) == host_data[1]:
return_list.append(key)
return return_list
def get_target(self, targetid):
"""
Get the target data for the parameter target_id.
This is alternate to using [id] directly. It does an additonal check
for correct type for target_id
Returns:
target as dictionary
Exceptions:
KeyError if target not in targets dictionary
"""
if not isinstance(targetid, six.integer_types):
targetid = int(targetid)
return self.data_dict[targetid]
def filter_targets(self, ip_filter=None, company_name_filter=None):
"""
Filter for match of ip_filter and companyname filter if they exist
and return list of any targets that match.
The filters are regex strings.
"""
rtn = OrderedDict()
for key, value in self.data_dict.items():
if ip_filter and re.match(ip_filter, value['IPAddress']):
rtn[key] = value
if company_name_filter and \
re.match(value['CompanyName'], company_name_filter):
rtn[key] = value
return rtn
def build_url(self, targetid):
"""Get the string representing the url for targetid. Gets the
Protocol, IPaddress and port and uses the common get_url_str to
create a string. Port info is included only if it is not the
WBEM CIM-XML standard definitions.
"""
target = self[targetid]
return get_url_str(target['Protocol'], target['IPAddress'],
target['Port'])
def get_hostid_list(self, ip_filter=None, company_name_filter=None):
"""
Get all WBEM Server ipaddresses in the targets base.
Returns list of IP addresses:port entries.
TODO: Does not include port right now.
"""
output_list = []
# TODO clean up for python 3
for _id, value in self.data_dict.items():
if self.verbose:
print('get_hostid_list value %s' % (value,))
output_list.append(value['IPAddress'])
return output_list
def tbl_hdr(self, record_list):
"""Return a list of all the column headers from the record_list."""
hdr = []
for name in record_list:
value = self.get_format_dict(name)
hdr.append(value[0])
return hdr
def get_notifyusers(self, targetid):
"""
Get list of entries in the notify users field and split into python
list and return the list of integers representing the userids.
This list stored in db as string of integers separated by commas.
Returns None if there is no data in NotifyUsers.
"""
notify_users = self[targetid]['NotifyUsers']
if notify_users:
notify_users_list = notify_users.split(',')
notify_users_list = [int(userid) for userid in notify_users_list]
return notify_users_list
return None
def format_record(self, record_id, fields, fold=False):
"""Return the fields defined in field_list for the record_id in
display format.
String fields will be folded if their width is greater than the
specification in the format_dictionary and fold=True
"""
# TODO can we make this a std cvt function.
target = self.get_target(record_id)
line = []
for field_name in fields:
field_value = target[field_name]
fmt_value = self.get_format_dict(field_name)
max_width = fmt_value[1]
field_type = fmt_value[2]
if isinstance(field_type, six.string_types) and field_value:
if max_width < len(field_value):
line.append('\n'.join(wrap(field_value, max_width)))
else:
line.append('%s' % field_value)
else:
line.append('%s' % field_value)
return line
def disabled_target(self, target_record): # pylint: disable=no-self-use
"""
If target_record disabled, return true, else return false.
"""
val = target_record['ScanEnabled'].lower()
if val == 'enabled':
return False
if val == 'disabled':
return True
ValueError('ScanEnabled field must contain "Enabled" or "Disabled'
' string. %s is invalid.' % val)
def disabled_target_id(self, targetid):
"""
Return True if target recorded for this target_id marked
disabled. Otherwise return True
Parameters:
target_id(:term:`integer`)
Valid target Id for the Target_Tableue .
Returns: (:class:`py:bool`)
True if this target id disabled
Exceptions:
KeyError if target_id not in database
"""
return(self.disabled_target(self.data_dict[targetid]))
def get_output_width(self, col_list):
"""
Get the width of a table from the column names in the list
"""
total_width = 0
for name in col_list:
value = self.get_format_dict(name)
total_width += value[1]
return total_width
def get_unique_creds(self):
"""
Get the set of Credentials and Principal that represents the
unique combination of both. The result could be used to test with
all Principals/Credentials knows in the db.
Return list of targetIDs that represent unique sets of Principal and
Credential
"""
creds = {k: '%s%s' % (v['Principal'], v['Credential'])
for k, v in self.data_dict.items()}
ucreds = dict([[v, k] for k, v in creds.items()])
unique_keys = dict([[v, k] for k, v in ucreds.items()])
unique_creds = [(self.data_dict[k]['Principal'],
self.data_dict[k]['Credential']) for k in unique_keys]
return unique_creds
class SQLTargetsTable(TargetsTable):
"""
Subclass of Targets data for all SQL databases. Subclasses of this class
support specialized sql databases.
"""
def __init__(self, db_dict, dbtype, verbose, output_format):
"""Pass through to SQL"""
if verbose:
print('SQL Database type %s verbose=%s' % (db_dict, verbose))
super(SQLTargetsTable, self).__init__(db_dict, dbtype, verbose,
output_format)
self.connection = None
class MySQLTargetsTable(SQLTargetsTable, MySQLDBMixin):
"""
This subclass of TargetsTable process targets infromation from an sql
database.
Generate the targetstable from the sql database targets table and
the companies table, by mapping the data to the dictionary defined
for targets
"""
# TODO filename is config file name, not actual file name.
def __init__(self, db_dict, dbtype, verbose, output_format):
"""Read the input file into a dictionary."""
super(MySQLTargetsTable, self).__init__(db_dict, dbtype, verbose,
output_format)
self.connectdb(db_dict, verbose)
self._load_table()
self._load_joins()
def _load_joins(self):
"""
Load the tables that would normally be joins. In this case it is the
companies table. Move the companyName into the targets table
TODO we should not be doing this in this manner but with a
join.
"""
# Get companies table and insert into targets table:
# TODO in smipyping name is db_dict. Elsewhere it is db_info
companies_tbl = CompaniesTable.factory(self.db_dict,
self.db_type,
self.verbose)
try:
# set the companyname into the targets table
for target_key in self.data_dict:
target = self.data_dict[target_key]
if target['CompanyID'] in companies_tbl:
company = companies_tbl[target['CompanyID']]
target['CompanyName'] = company['CompanyName']
else:
target['CompanyName'] = "TableError CompanyID %s" % \
target['CompanyID']
except Exception as ex:
raise ValueError('Error: putting Company Name in table %r error %s'
% (self.db_dict, ex))
def update_fields(self, targetid, changes):
"""
Update the database record defined by targetid with the dictionary
of items defined by changes where each item is an entry in the
target record. Update does NOT test if the new value is the same
as the original value.
"""
cursor = self.connection.cursor()
# dynamically build the update sql based on the changes dictionary
set_names = "SET "
values = []
comma = False
for key, value in changes.items():
if comma:
set_names = set_names + ", "
else:
comma = True
set_names = set_names + "{0} = %s".format(key)
values.append(value)
values.append(targetid)
sql = "Update Targets " + set_names
# append targetid component
sql = sql + " WHERE TargetID=%s"
# Record the original data for the audit log.
original_data = {}
target_record = self.get_target(targetid)
for change in changes:
original_data[change] = target_record[change]
try:
cursor.execute(sql, tuple(values))
self.connection.commit()
audit_logger = get_logger(AUDIT_LOGGER_NAME)
audit_logger.info('TargetsTable TargetID: %s, update fields: %s, '
'original fields: %s',
targetid, changes, original_data)
except Exception as ex:
self.connection.rollback()
audit_logger = get_logger(AUDIT_LOGGER_NAME)
audit_logger.error('TargetsTable TargetID: %s failed SQL update. '
'SQL: %s Changes: %s Exception: %s',
targetid, sql, changes, ex)
raise ex
finally:
self._load_table()
self._load_joins()
cursor.close()
def activate(self, targetid, activate_flag):
"""
Activate or deactivate the table entry defined by the
targetid parameter to the value defined by the activate_flag
Parameters:
targetid (:term:`py:integer`):
The database key property for this table
activate_flag (:class:`py:bool`):
Next state that will be set into the database for this target.
Since the db field is an enum it actually sete Active or Inactive
strings into the field
"""
cursor = self.connection.cursor()
enabled_kw = 'Enabled' if activate_flag else 'Disabled'
sql = 'UPDATE Targets SET ScanEnabled = %s WHERE TargetID = %s'
try:
cursor.execute(sql, (enabled_kw, targetid)) # noqa F841
self.connection.commit()
audit_logger = get_logger(AUDIT_LOGGER_NAME)
audit_logger.info('TargetTable TargetId %s,set scanEnabled to %s',
targetid, enabled_kw)
except mysqlerror as ex:
audit_logger = get_logger(AUDIT_LOGGER_NAME)
audit_logger.error('TargetTable userid %s failed SQL change '
'ScanEnabled. SQL=%s '
'Change to %s exception %s: %s',
targetid, sql, enabled_kw, ex.__class__.__name__,
ex)
self.connection.rollback()
raise ex
finally:
self._load_table()
self._load_joins()
def delete(self, targetid):
"""
Delete the target in the targets table defined by the targetid
"""
cursor = self.connection.cursor()
sql = "DELETE FROM Targets WHERE TargetID=%s"
try:
# pylint: disable=unused-variable
mydata = cursor.execute(sql, (targetid,)) # noqa F841
self.connection.commit()
audit_logger = get_logger(AUDIT_LOGGER_NAME)
audit_logger.info('TargetTable TargetId %s Deleted', targetid)
except mysqlerror as ex:
audit_logger = get_logger(AUDIT_LOGGER_NAME)
audit_logger.error('TargetTable targetid %s failed SQL DELETE. '
'SQL=%s exception %s: %s',
targetid, sql, ex.__class__.__name__, ex)
self.connection.rollback()
raise ex
finally:
self._load_table()
self._load_joins()
self.connection.close()
def insert(self, fields):
"""
Write a new record to the database containing the fields defined in
the input.
Parameters:
field_data ()
Dictionary of fields to be inserted into the table. There is
one entry in the dictionary for each field to be inserted.
Exceptions:
"""
cursor = self.connection.cursor()
placeholders = ', '.join(['%s'] * len(fields))
columns = ', '.join(fields.keys())
sql = "INSERT INTO %s ( %s ) VALUES ( %s )" % (self.table_name,
columns,
placeholders)
try:
cursor.execute(sql, fields.values())
self.connection.commit()
new_targetid = cursor.lastrowid
audit_logger = get_logger(AUDIT_LOGGER_NAME)
audit_logger.info('TargetsTable TargetId %s added. %s',
new_targetid, fields)
except mysqlerror as ex:
audit_logger = get_logger(AUDIT_LOGGER_NAME)
audit_logger.error('TargetTable INSERT failed SQL update. SQL=%s. '
'data=%s. Exception %s: %s', sql, fields,
ex.__class__.__name__, ex)
self.connection.rollback()
raise ex
finally:
self._load_table()
self._load_joins()
self.connection.close()
class CsvTargetsTable(TargetsTable):
"""Comma Separated Values form of the Target base."""
def __init__(self, db_dict, dbtype, verbose, output_format):
"""Read the input file into a dictionary."""
super(CsvTargetsTable, self).__init__(db_dict, dbtype, verbose,
output_format)
fn = db_dict['targetsfilename']
self.filename = fn
# If the filename is not a full directory, the data file must be
# either in the local directory or the same directory as the
# config file defined by the db_dict entry directory
if os.path.isabs(fn):
if not os.path.isfile(fn):
ValueError('CSV file %s does not exist ' % fn)
else:
self.filename = fn
else:
if os.path.isfile(fn):
self.filename = fn
else:
full_fn = os.path.join(db_dict['directory'], fn)
if not os.path.isfile(full_fn):
ValueError('CSV file %s does not exist '
'in local directory or config directory %s' %
(fn, db_dict['directory']))
else:
self.filename = full_fn
with open(self.filename) as input_file:
reader = csv.DictReader(input_file)
# create dictionary (id = key) with dictionary for
# each set of entries
result = {}
for row in reader:
key = int(row['TargetID'])
if key in result:
# duplicate row handling
print('ERROR. Duplicate Id in table: %s\nrow=%s' %
(key, row))
raise ValueError('Input Error. duplicate Id')
else:
result[key] = row
self.data_dict = result
def write_updated_record(self, record_id):
"""Backup the existing file and write the new one.
with cvs it writes the whole file back
"""
backfile = '%s.bak' % self.filename
# TODO does this cover directories/clean up for possible exceptions.
if os.path.isfile(backfile):
os.remove(backfile)
os.rename(self.filename, backfile)
self.write_file(self.filename)
def write_file(self, file_name):
"""Write the current Target base to the named file."""
with open(file_name, 'wb') as f:
writer = csv.DictWriter(f, fieldnames=self.get_field_list())
writer.writeheader()
for key, value in sorted(self.data_dict.items()):
writer.writerow(value)
| 37.512894 | 80 | 0.588718 | 3,079 | 26,184 | 4.871387 | 0.169211 | 0.018335 | 0.012801 | 0.010667 | 0.185812 | 0.146343 | 0.133009 | 0.114874 | 0.099073 | 0.078539 | 0 | 0.004404 | 0.323595 | 26,184 | 697 | 81 | 37.566714 | 0.842471 | 0.331424 | 0 | 0.230986 | 0 | 0 | 0.143384 | 0 | 0 | 0 | 0 | 0.015782 | 0 | 1 | 0.084507 | false | 0.002817 | 0.03662 | 0 | 0.208451 | 0.016901 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
0
| 1 |
812066ffbcb9013a2cc703f8d57626a63964c5af
| 9,057 |
py
|
Python
|
QUANTAXIS/QASU/crawl_eastmoney.py
|
QUANTAXISER/QUANTAXIS
|
6ebd727b2900e8910fa45814bf45eeffca395250
|
[
"MIT"
] | 1 |
2018-09-09T02:55:10.000Z
|
2018-09-09T02:55:10.000Z
|
QUANTAXIS/QASU/crawl_eastmoney.py
|
frosthaoz/QUANTAXIS
|
f5f482418e5f6e23ac3530089b8d17300d931b48
|
[
"MIT"
] | null | null | null |
QUANTAXIS/QASU/crawl_eastmoney.py
|
frosthaoz/QUANTAXIS
|
f5f482418e5f6e23ac3530089b8d17300d931b48
|
[
"MIT"
] | 3 |
2018-11-29T07:07:56.000Z
|
2021-02-09T17:24:56.000Z
|
import os
from QUANTAXIS.QASetting import QALocalize
#from QUANTAXIS_CRAWLY.run_selenium_alone import (read_east_money_page_zjlx_to_sqllite, open_chrome_driver, close_chrome_dirver)
from QUANTAXIS_CRAWLY.run_selenium_alone import *
import urllib
import pandas as pd
import time
from QUANTAXIS.QAUtil import (DATABASE)
def QA_request_eastmoney_zjlx( param_stock_code_list ):
# 改用
strUrl = "http://data.eastmoney.com/zjlx/{}.html".format(param_stock_code_list[0])
# 延时
time.sleep(1.223)
response = urllib.request.urlopen(strUrl)
content = response.read()
# 🛠todo 改用 re 正则表达式做匹配
strings = content.decode("utf-8", "ignore")
string_lines = strings.split("\r\n")
#for aline in string_lines:
# aline = aline.strip()
# if '_stockCode' in aline:
# _stockCode = aline[len('var _stockCode = '):]
# _stockCode = _stockCode.strip("\"\"\,")
# if '_stockMarke' in aline:
# _stockMarke = aline[len('_stockMarke = '):]
# _stockMarke = _stockMarke.strip("\"\"\,")
# # 60XXXX ,
#_stockMarke = 1
# 00XXXX ,
# _stockMarke = 2
# 30XXXX ,
# _stockMarke = 2
# if '_stockName' in aline:
# _stockName = aline[len('_stockName = '):]
# _stockName = _stockName.strip("\"\"\,")
# if '_market' in aline:
# _market = aline[len('_market = '):]
# _market = _market.strip("\"\"\,")
# break
#_market= 'hsa'
# print(_stockCode)
# print(_stockMarke)
# print(_stockName)
# print(_market)
values = []
for aline in string_lines:
aline = aline.strip()
if 'EM_CapitalFlowInterface' in aline:
# print(aline)
# print('------------------')
aline = aline.strip()
if aline.startswith('var strUrl = '):
if 'var strUrl = ' in aline:
aline = aline[len('var strUrl = '):]
values = aline.split('+')
# print(values)
break
# print('------------------')
print(values)
for iStockCode in range(len(param_stock_code_list)):
requestStr = ""
strCode = param_stock_code_list[iStockCode]
if strCode[0:2] == '60':
_stockMarke = '1'
elif strCode[0:2] == '00' or strCode[0:2] == '30':
_stockMarke = '2'
else:
print(strCode + " 暂不支持, 60, 00, 30 开头的股票代码")
return
for iItem in values:
if '_stockCode' in iItem:
requestStr = requestStr + param_stock_code_list[iStockCode]
elif '_stockMarke' in iItem:
requestStr = requestStr + _stockMarke
else:
if 'http://ff.eastmoney.com/' in iItem:
requestStr = 'http://ff.eastmoney.com/'
else:
iItem = iItem.strip(' "')
iItem = iItem.rstrip(' "')
requestStr = requestStr + iItem
# print(requestStr)
# 延时
time.sleep(1.456)
response = urllib.request.urlopen(requestStr)
content2 = response.read()
# print(content2)
strings = content2.decode("utf-8", "ignore")
# print(strings)
list_data_zjlx = []
if 'var aff_data=({data:[["' in strings:
leftChars = strings[len('var aff_data=({data:[["'):]
# print(leftChars)
dataArrays = leftChars.split(',')
# print(dataArrays)
for aItemIndex in range(0, len(dataArrays), 13):
'''
日期
收盘价
涨跌幅
主力净流入 净额 净占比
超大单净流入 净额 净占比
大单净流入 净额 净占比
中单净流入 净额 净占比
小单净流入 净额 净占比
'''
dict_row = {}
dict_row['stock_code'] = param_stock_code_list[iStockCode]
# 日期
# print(aItemIndex)
data01 = dataArrays[aItemIndex]
data01 = data01.strip('"')
# print('日期',data01)
dict_row['date'] = data01
# 主力净流入 净额
data02 = dataArrays[aItemIndex + 1]
data02 = data02.strip('"')
# print('主力净流入 净额',data02)
dict_row['zljll_je_wy'] = data02
# 主力净流入 净占比
data03 = dataArrays[aItemIndex + 2]
data03 = data03.strip('"')
# print('主力净流入 净占比',data03)
# date01 = aItemData.strip('[\'\'')
dict_row['zljll_jzb_bfb'] = data03
# 超大单净流入 净额
data04 = dataArrays[aItemIndex + 3]
data04 = data04.strip('"')
# print('超大单净流入 净额',data04)
dict_row['cddjll_je_wy'] = data04
# 超大单净流入 净占比
data05 = dataArrays[aItemIndex + 4]
data05 = data05.strip('"')
# print('超大单净流入 净占比',data05)
dict_row['cddjll_je_jzb'] = data05
# 大单净流入 净额
data06 = dataArrays[aItemIndex + 5]
data06 = data06.strip('"')
# print('大单净流入 净额',data06)
dict_row['ddjll_je_wy'] = data06
# 大单净流入 净占比
data07 = dataArrays[aItemIndex + 6]
data07 = data07.strip('"')
# print('大单净流入 净占比',data07)
dict_row['ddjll_je_jzb'] = data07
# 中单净流入 净额
data08 = dataArrays[aItemIndex + 7]
data08 = data08.strip('"')
# print('中单净流入 净额',data08)
dict_row['zdjll_je_wy'] = data08
# 中单净流入 净占比
data09 = dataArrays[aItemIndex + 8]
data09 = data09.strip('"')
# print('中单净流入 净占比',data09)
dict_row['zdjll_je_jzb'] = data09
# 小单净流入 净额
data10 = dataArrays[aItemIndex + 9]
data10 = data10.strip('"')
# print('小单净流入 净额',data10)
dict_row['xdjll_je_wy'] = data10
# 小单净流入 净占比
data11 = dataArrays[aItemIndex + 10]
data11 = data11.strip('"')
# print('小单净流入 净占比',data11)
dict_row['xdjll_je_jzb'] = data11
# 收盘价
data12 = dataArrays[aItemIndex + 11]
data12 = data12.strip('"')
# print('收盘价',data12)
dict_row['close_price'] = data12
# 涨跌幅
data13 = dataArrays[aItemIndex + 12]
data13 = data13.strip('"')
data13 = data13.strip('"]]})')
# print('涨跌幅',data13)
dict_row['change_price'] = data13
# 读取一条记录成功
# print("成功读取一条记录")
# print(dict_row)
list_data_zjlx.append(dict_row)
# print(list_data_zjlx)
df = pd.DataFrame(list_data_zjlx)
# print(df)
client = DATABASE
coll_stock_zjlx = client.eastmoney_stock_zjlx
# coll_stock_zjlx.insert_many(QA_util_to_json_from_pandas(df))
for i in range(len(list_data_zjlx)):
aRec = list_data_zjlx[i]
# 🛠todo 当天结束后,获取当天的资金流相,当天的资金流向是瞬时间点的
ret = coll_stock_zjlx.find_one(aRec)
if ret == None:
coll_stock_zjlx.insert_one(aRec)
print("🤑 插入新的记录 ", aRec)
else:
print("😵 记录已经存在 ", ret)
'''
作为测试用例来获取, 对比 reqeust 方式的获取数据是否一致
'''
def QA_read_eastmoney_zjlx_web_page_to_sqllite(stockCodeList = None):
# todo 🛠 check stockCode 是否存在有效合法
# todo 🛠 QALocalize 从QALocalize 目录中读取 固定位置存放驱动文件
print("📨当前工作路径文件位置 : ",os.getcwd())
path_check = os.getcwd()+"/QUANTAXIS_WEBDRIVER"
if os.path.exists(path_check) == False:
print("😵 确认当前路径是否包含selenium_driver目录 😰 ")
return
else:
print(os.getcwd()+"/QUANTAXIS_WEBDRIVER"," 目录存在 😁")
print("")
# path_for_save_data = QALocalize.download_path + "/eastmoney_stock_zjlx"
# isExists = os.path.exists(path_for_save_data)
# if isExists == False:
# os.mkdir(path_for_save_data)
# isExists = os.path.exists(path_for_save_data)
# if isExists == True:
# print(path_for_save_data,"目录不存在! 成功建立目录 😢")
# else:
# print(path_for_save_data,"目录不存在! 失败建立目录 🤮, 可能没有权限 🈲")
# return
# else:
# print(path_for_save_data,"目录存在!准备读取数据 😋")
browser = open_chrome_driver()
for indexCode in range(len(stockCodeList)):
#full_path_name = path_for_save_data + "/" + stockCodeList[indexCode] + "_zjlx.sqlite.db"
read_east_money_page_zjlx_to_sqllite(stockCodeList[indexCode], browser)
pass
close_chrome_dirver(browser)
#创建目录
#启动线程读取网页,写入数据库
#等待完成
| 30.392617 | 128 | 0.510765 | 891 | 9,057 | 4.98541 | 0.259259 | 0.02679 | 0.019811 | 0.027015 | 0.110311 | 0.086898 | 0.069338 | 0.037371 | 0.037371 | 0.020261 | 0 | 0.034689 | 0.372971 | 9,057 | 298 | 129 | 30.392617 | 0.745026 | 0.25207 | 0 | 0.072581 | 0 | 0 | 0.088015 | 0.007803 | 0 | 0 | 0 | 0.010067 | 0 | 1 | 0.016129 | false | 0.008065 | 0.056452 | 0 | 0.08871 | 0.064516 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
0
| 1 |
8123d51391f52c37336172ab4d3305871857e10f
| 16,865 |
py
|
Python
|
flexget/tests/test_next_series_seasons.py
|
metaMMA/Flexget
|
a38986422461d7935ead1e2b4ed4c88bcd0a90f5
|
[
"MIT"
] | null | null | null |
flexget/tests/test_next_series_seasons.py
|
metaMMA/Flexget
|
a38986422461d7935ead1e2b4ed4c88bcd0a90f5
|
[
"MIT"
] | 1 |
2017-10-09T23:06:44.000Z
|
2017-10-09T23:06:44.000Z
|
flexget/tests/test_next_series_seasons.py
|
metaMMA/Flexget
|
a38986422461d7935ead1e2b4ed4c88bcd0a90f5
|
[
"MIT"
] | null | null | null |
from __future__ import unicode_literals, division, absolute_import
from builtins import * # noqa pylint: disable=unused-import, redefined-builtin
import pytest
from flexget.entry import Entry
# TODO Add more standard tests
class TestNextSeriesSeasonSeasonsPack(object):
_config = """
templates:
global:
parsing:
series: internal
anchors:
_nss_backfill: &nss_backfill
next_series_seasons:
backfill: yes
_nss_from_start: &nss_from_start
next_series_seasons:
from_start: yes
_nss_backfill_from_start: &nss_backfill_from_start
next_series_seasons:
backfill: yes
from_start: yes
_series_ep_pack: &series_ep_pack
identified_by: ep
tracking: backfill
season_packs:
threshold: 1000
reject_eps: yes
_series_ep_tracking_pack: &series_ep_tracking_pack
identified_by: ep
tracking: backfill
season_packs:
threshold: 1000
reject_eps: yes
_series_ep_tracking_begin_s02e01: &series_ep_tracking_pack_begin_s02e01
identified_by: ep
tracking: backfill
begin: s02e01
season_packs:
threshold: 1000
reject_eps: yes
_series_ep_tracking_begin_s04e01: &series_ep_tracking_pack_begin_s04e01
identified_by: ep
tracking: backfill
begin: s04e01
season_packs:
threshold: 1000
reject_eps: yes
tasks:
inject_series:
series:
settings:
test_series:
season_packs: always
test_series:
- Test Series 1
- Test Series 2
- Test Series 3
- Test Series 4
- Test Series 5
- Test Series 6
- Test Series 7
- Test Series 8
- Test Series 9
- Test Series 10
- Test Series 11
- Test Series 12
- Test Series 13
- Test Series 14
- Test Series 15
- Test Series 16
- Test Series 17
- Test Series 18
- Test Series 19
- Test Series 20
- Test Series 21
- Test Series 22
- Test Series 23
- Test Series 24
- Test Series 25
- Test Series 50
- Test Series 100
test_next_series_seasons_season_pack:
next_series_seasons: yes
series:
- Test Series 1:
<<: *series_ep_pack
max_reruns: 0
test_next_series_seasons_season_pack_backfill:
<<: *nss_backfill
series:
- Test Series 2:
<<: *series_ep_tracking_pack
max_reruns: 0
test_next_series_seasons_season_pack_backfill_and_begin:
<<: *nss_backfill
series:
- Test Series 3:
<<: *series_ep_tracking_pack_begin_s02e01
max_reruns: 0
test_next_series_seasons_season_pack_from_start:
<<: *nss_from_start
series:
- Test Series 4:
<<: *series_ep_pack
max_reruns: 0
test_next_series_seasons_season_pack_from_start_backfill:
<<: *nss_backfill_from_start
series:
- Test Series 5:
<<: *series_ep_tracking_pack
max_reruns: 0
test_next_series_seasons_season_pack_from_start_backfill_and_begin:
<<: *nss_backfill_from_start
series:
- Test Series 6:
<<: *series_ep_tracking_pack_begin_s02e01
max_reruns: 0
test_next_series_seasons_season_pack_and_ep:
next_series_seasons: yes
series:
- Test Series 7:
<<: *series_ep_pack
max_reruns: 0
test_next_series_seasons_season_pack_and_ep_backfill:
<<: *nss_backfill
series:
- Test Series 8:
<<: *series_ep_tracking_pack
max_reruns: 0
test_next_series_seasons_season_pack_and_ep_backfill_and_begin:
<<: *nss_backfill
series:
- Test Series 9:
<<: *series_ep_tracking_pack_begin_s02e01
max_reruns: 0
test_next_series_seasons_season_pack_and_ep_from_start:
<<: *nss_from_start
series:
- Test Series 10:
<<: *series_ep_pack
max_reruns: 0
test_next_series_seasons_season_pack_and_ep_from_start_backfill:
<<: *nss_backfill_from_start
series:
- Test Series 11:
<<: *series_ep_tracking_pack
max_reruns: 0
test_next_series_seasons_season_pack_and_ep_from_start_backfill_and_begin:
<<: *nss_backfill_from_start
series:
- Test Series 12:
<<: *series_ep_tracking_pack_begin_s02e01
max_reruns: 0
test_next_series_seasons_season_pack_gap:
next_series_seasons: yes
series:
- Test Series 13:
<<: *series_ep_pack
max_reruns: 0
test_next_series_seasons_season_pack_gap_backfill:
<<: *nss_backfill
series:
- Test Series 14:
<<: *series_ep_tracking_pack
max_reruns: 0
test_next_series_seasons_season_pack_gap_backfill_and_begin:
<<: *nss_backfill
series:
- Test Series 15:
<<: *series_ep_tracking_pack_begin_s04e01
max_reruns: 0
test_next_series_seasons_season_pack_gap_from_start:
<<: *nss_from_start
series:
- Test Series 16:
<<: *series_ep_pack
max_reruns: 0
test_next_series_seasons_season_pack_gap_from_start_backfill:
<<: *nss_backfill_from_start
series:
- Test Series 17:
<<: *series_ep_tracking_pack
max_reruns: 0
test_next_series_seasons_season_pack_gap_from_start_backfill_and_begin:
<<: *nss_backfill_from_start
series:
- Test Series 18:
<<: *series_ep_tracking_pack_begin_s04e01
max_reruns: 0
test_next_series_seasons_season_pack_and_ep_gap:
next_series_seasons: yes
series:
- Test Series 19:
<<: *series_ep_pack
max_reruns: 0
test_next_series_seasons_season_pack_and_ep_gap_backfill:
<<: *nss_backfill
series:
- Test Series 20:
<<: *series_ep_tracking_pack
max_reruns: 0
test_next_series_seasons_season_pack_and_ep_gap_backfill_and_begin:
<<: *nss_backfill
series:
- Test Series 21:
<<: *series_ep_tracking_pack_begin_s04e01
max_reruns: 0
test_next_series_seasons_season_pack_and_ep_gap_from_start:
<<: *nss_from_start
series:
- Test Series 22:
<<: *series_ep_pack
max_reruns: 0
test_next_series_seasons_season_pack_and_ep_gap_from_start_backfill:
<<: *nss_backfill_from_start
series:
- Test Series 23:
<<: *series_ep_tracking_pack
max_reruns: 0
test_next_series_seasons_season_pack_and_ep_gap_from_start_backfill_and_begin:
<<: *nss_backfill_from_start
series:
- Test Series 24:
<<: *series_ep_tracking_pack_begin_s04e01
max_reruns: 0
test_next_series_seasons_season_pack_begin_completed:
next_series_seasons: yes
series:
- Test Series 50:
identified_by: ep
begin: S02E01
season_packs:
threshold: 1000
reject_eps: yes
max_reruns: 0
test_next_series_seasons_season_pack_from_start_multirun:
next_series_seasons:
from_start: yes
series:
- Test Series 100:
<<: *series_ep_pack
max_reruns: 0
"""
@pytest.fixture()
def config(self):
"""Season packs aren't supported by guessit yet."""
return self._config
def inject_series(self, execute_task, release_name):
execute_task(
'inject_series',
options={'inject': [Entry(title=release_name, url='')], 'disable_tracking': True},
)
@pytest.mark.parametrize(
"task_name,inject,result_find",
[
('test_next_series_seasons_season_pack', ['Test Series 1 S02'], ['Test Series 1 S03']),
(
'test_next_series_seasons_season_pack_backfill',
['Test Series 2 S02'],
['Test Series 2 S01', 'Test Series 2 S03'],
),
(
'test_next_series_seasons_season_pack_backfill_and_begin',
['Test Series 3 S02'],
['Test Series 3 S03'],
),
(
'test_next_series_seasons_season_pack_from_start',
['Test Series 4 S02'],
['Test Series 4 S03'],
),
(
'test_next_series_seasons_season_pack_from_start_backfill',
['Test Series 5 S02'],
['Test Series 5 S03', 'Test Series 5 S01'],
),
(
'test_next_series_seasons_season_pack_from_start_backfill_and_begin',
['Test Series 6 S02'],
['Test Series 6 S03'],
),
(
'test_next_series_seasons_season_pack_and_ep',
['Test Series 7 S02', 'Test Series 7 S03E01'],
['Test Series 7 S03'],
),
(
'test_next_series_seasons_season_pack_and_ep_backfill',
['Test Series 8 S02', 'Test Series 8 S03E01'],
['Test Series 8 S01', 'Test Series 8 S03'],
),
(
'test_next_series_seasons_season_pack_and_ep_backfill_and_begin',
['Test Series 9 S02', 'Test Series 9 S03E01'],
['Test Series 9 S03'],
),
(
'test_next_series_seasons_season_pack_and_ep_from_start',
['Test Series 10 S02', 'Test Series 10 S03E01'],
['Test Series 10 S03'],
),
(
'test_next_series_seasons_season_pack_and_ep_from_start_backfill',
['Test Series 11 S02', 'Test Series 11 S03E01'],
['Test Series 11 S03', 'Test Series 11 S01'],
),
(
'test_next_series_seasons_season_pack_and_ep_from_start_backfill_and_begin',
['Test Series 12 S02', 'Test Series 12 S03E01'],
['Test Series 12 S03'],
),
(
'test_next_series_seasons_season_pack_gap',
['Test Series 13 S02', 'Test Series 13 S06'],
['Test Series 13 S07'],
),
(
'test_next_series_seasons_season_pack_gap_backfill',
['Test Series 14 S02', 'Test Series 14 S06'],
[
'Test Series 14 S07',
'Test Series 14 S05',
'Test Series 14 S04',
'Test Series 14 S03',
'Test Series 14 S01',
],
),
(
'test_next_series_seasons_season_pack_gap_backfill_and_begin',
['Test Series 15 S02', 'Test Series 15 S06'],
['Test Series 15 S07', 'Test Series 15 S05', 'Test Series 15 S04'],
),
(
'test_next_series_seasons_season_pack_gap_from_start',
['Test Series 16 S02', 'Test Series 16 S06'],
['Test Series 16 S07'],
),
(
'test_next_series_seasons_season_pack_gap_from_start_backfill',
['Test Series 17 S02', 'Test Series 17 S06'],
[
'Test Series 17 S07',
'Test Series 17 S05',
'Test Series 17 S04',
'Test Series 17 S03',
'Test Series 17 S01',
],
),
(
'test_next_series_seasons_season_pack_gap_from_start_backfill_and_begin',
['Test Series 18 S02', 'Test Series 18 S06'],
['Test Series 18 S07', 'Test Series 18 S05', 'Test Series 18 S04'],
),
(
'test_next_series_seasons_season_pack_and_ep_gap',
['Test Series 19 S02', 'Test Series 19 S06', 'Test Series 19 S07E01'],
['Test Series 19 S07'],
),
(
'test_next_series_seasons_season_pack_and_ep_gap_backfill',
['Test Series 20 S02', 'Test Series 20 S06', 'Test Series 20 S07E01'],
[
'Test Series 20 S07',
'Test Series 20 S05',
'Test Series 20 S04',
'Test Series 20 S03',
'Test Series 20 S01',
],
),
(
'test_next_series_seasons_season_pack_and_ep_gap_backfill_and_begin',
['Test Series 21 S02', 'Test Series 21 S06', 'Test Series 21 S07E01'],
['Test Series 21 S07', 'Test Series 21 S05', 'Test Series 21 S04'],
),
(
'test_next_series_seasons_season_pack_and_ep_gap_from_start',
['Test Series 22 S02', 'Test Series 22 S03E01', 'Test Series 22 S06'],
['Test Series 22 S07'],
),
(
'test_next_series_seasons_season_pack_and_ep_gap_from_start_backfill',
['Test Series 23 S02', 'Test Series 23 S03E01', 'Test Series 23 S06'],
[
'Test Series 23 S07',
'Test Series 23 S05',
'Test Series 23 S04',
'Test Series 23 S03',
'Test Series 23 S01',
],
),
(
'test_next_series_seasons_season_pack_and_ep_gap_from_start_backfill_and_begin',
['Test Series 24 S02', 'Test Series 24 S03E01', 'Test Series 24 S06'],
['Test Series 24 S07', 'Test Series 24 S05', 'Test Series 24 S04'],
),
(
'test_next_series_seasons_season_pack_begin_completed',
['Test Series 50 S02'],
['Test Series 50 S03'],
),
],
)
def test_next_series_seasons(self, execute_task, task_name, inject, result_find):
for entity_id in inject:
self.inject_series(execute_task, entity_id)
task = execute_task(task_name)
for result_title in result_find:
assert task.find_entry(title=result_title)
assert len(task.all_entries) == len(result_find)
# Tests which require multiple tasks to be executed in order
# Each run_parameter is a tuple of lists: [task name, list of series ID(s) to inject, list of result(s) to find]
@pytest.mark.parametrize(
"run_parameters",
[
(
[
'test_next_series_seasons_season_pack_from_start_multirun',
[],
['Test Series 100 S01'],
],
[
'test_next_series_seasons_season_pack_from_start_multirun',
[],
['Test Series 100 S02'],
],
)
],
)
def test_next_series_seasons_multirun(self, execute_task, run_parameters):
for this_test in run_parameters:
for entity_id in this_test[1]:
self.inject_series(execute_task, entity_id)
task = execute_task(this_test[0])
for result_title in this_test[2]:
assert task.find_entry(title=result_title)
assert len(task.all_entries) == len(this_test[2])
| 37.645089 | 116 | 0.520308 | 1,771 | 16,865 | 4.528515 | 0.083569 | 0.198254 | 0.135661 | 0.144015 | 0.653865 | 0.623691 | 0.578429 | 0.545885 | 0.500998 | 0.449127 | 0 | 0.062229 | 0.411147 | 16,865 | 447 | 117 | 37.729306 | 0.745343 | 0.017729 | 0 | 0.415704 | 0 | 0 | 0.720904 | 0.238449 | 0 | 0 | 0 | 0.002237 | 0.009238 | 1 | 0.009238 | false | 0 | 0.009238 | 0 | 0.025404 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
0
| 1 |
812c923f7680b63727b8c0d8a0b724feb7e64f73
| 1,448 |
py
|
Python
|
src/gausskernel/dbmind/xtuner/test/test_ssh.py
|
wotchin/openGauss-server
|
ebd92e92b0cfd76b121d98e4c57a22d334573159
|
[
"MulanPSL-1.0"
] | 1 |
2020-06-30T15:00:50.000Z
|
2020-06-30T15:00:50.000Z
|
src/gausskernel/dbmind/xtuner/test/test_ssh.py
|
wotchin/openGauss-server
|
ebd92e92b0cfd76b121d98e4c57a22d334573159
|
[
"MulanPSL-1.0"
] | null | null | null |
src/gausskernel/dbmind/xtuner/test/test_ssh.py
|
wotchin/openGauss-server
|
ebd92e92b0cfd76b121d98e4c57a22d334573159
|
[
"MulanPSL-1.0"
] | null | null | null |
# Copyright (c) 2020 Huawei Technologies Co.,Ltd.
#
# openGauss is licensed under Mulan PSL v2.
# You can use this software according to the terms and conditions of the Mulan PSL v2.
# You may obtain a copy of Mulan PSL v2 at:
#
# http://license.coscl.org.cn/MulanPSL2
#
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
# See the Mulan PSL v2 for more details.
# -------------------------------------------------------------------------
#
# test_ssh.py
#
# IDENTIFICATION
# src/gausskernel/dbmind/xtuner/test/test_ssh.py
#
# -------------------------------------------------------------------------
from ssh import ExecutorFactory
def test_remote():
exe = ExecutorFactory().set_host('').set_user('').set_pwd('').get_executor() # padding your information
print(exe.exec_command_sync("cat /proc/cpuinfo | grep \"processor\" | wc -l"))
print(exe.exec_command_sync("cat /proc/self/cmdline | xargs -0"))
print(exe.exec_command_sync("echo -e 'hello \\n world'")[0].count('\n'))
print(exe.exec_command_sync("echo -e 'hello \\n world'")[0])
print(exe.exec_command_sync('echo $SHELL'))
def test_local():
exe = ExecutorFactory().get_executor()
print(exe.exec_command_sync("ping -h"))
if __name__ == "__main__":
test_remote()
test_local()
| 33.674419 | 108 | 0.631215 | 193 | 1,448 | 4.57513 | 0.595855 | 0.05436 | 0.08154 | 0.129105 | 0.216308 | 0.19026 | 0.19026 | 0.0906 | 0.0906 | 0.0906 | 0 | 0.00974 | 0.149171 | 1,448 | 42 | 109 | 34.47619 | 0.706981 | 0.520718 | 0 | 0 | 0 | 0 | 0.215774 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.142857 | false | 0 | 0.071429 | 0 | 0.214286 | 0.428571 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
0
| 1 |
813bbe394d73b1fd28585f58879386377ceda809
| 9,047 |
py
|
Python
|
sympy/printing/lambdarepr.py
|
Carreau/sympy
|
168de33bb177936fa9517702b2c5a777b3989672
|
[
"BSD-3-Clause"
] | 4 |
2018-07-04T17:20:12.000Z
|
2019-07-14T18:07:25.000Z
|
sympy/printing/lambdarepr.py
|
Carreau/sympy
|
168de33bb177936fa9517702b2c5a777b3989672
|
[
"BSD-3-Clause"
] | null | null | null |
sympy/printing/lambdarepr.py
|
Carreau/sympy
|
168de33bb177936fa9517702b2c5a777b3989672
|
[
"BSD-3-Clause"
] | 1 |
2018-09-03T03:02:06.000Z
|
2018-09-03T03:02:06.000Z
|
from __future__ import print_function, division
from .str import StrPrinter
from sympy.utilities import default_sort_key
class LambdaPrinter(StrPrinter):
"""
This printer converts expressions into strings that can be used by
lambdify.
"""
def _print_MatrixBase(self, expr):
return "%s(%s)" % (expr.__class__.__name__,
self._print((expr.tolist())))
_print_SparseMatrix = \
_print_MutableSparseMatrix = \
_print_ImmutableSparseMatrix = \
_print_Matrix = \
_print_DenseMatrix = \
_print_MutableDenseMatrix = \
_print_ImmutableMatrix = \
_print_ImmutableDenseMatrix = \
_print_MatrixBase
def _print_Piecewise(self, expr):
result = []
i = 0
for arg in expr.args:
e = arg.expr
c = arg.cond
result.append('((')
result.append(self._print(e))
result.append(') if (')
result.append(self._print(c))
result.append(') else (')
i += 1
result = result[:-1]
result.append(') else None)')
result.append(')'*(2*i - 2))
return ''.join(result)
def _print_Sum(self, expr):
loops = (
'for {i} in range({a}, {b}+1)'.format(
i=self._print(i),
a=self._print(a),
b=self._print(b))
for i, a, b in expr.limits)
return '(builtins.sum({function} {loops}))'.format(
function=self._print(expr.function),
loops=' '.join(loops))
def _print_And(self, expr):
result = ['(']
for arg in sorted(expr.args, key=default_sort_key):
result.extend(['(', self._print(arg), ')'])
result.append(' and ')
result = result[:-1]
result.append(')')
return ''.join(result)
def _print_Or(self, expr):
result = ['(']
for arg in sorted(expr.args, key=default_sort_key):
result.extend(['(', self._print(arg), ')'])
result.append(' or ')
result = result[:-1]
result.append(')')
return ''.join(result)
def _print_Not(self, expr):
result = ['(', 'not (', self._print(expr.args[0]), '))']
return ''.join(result)
def _print_BooleanTrue(self, expr):
return "True"
def _print_BooleanFalse(self, expr):
return "False"
def _print_ITE(self, expr):
result = [
'((', self._print(expr.args[1]),
') if (', self._print(expr.args[0]),
') else (', self._print(expr.args[2]), '))'
]
return ''.join(result)
class NumPyPrinter(LambdaPrinter):
"""
Numpy printer which handles vectorized piecewise functions,
logical operators, etc.
"""
_default_settings = {
"order": "none",
"full_prec": "auto",
}
def _print_seq(self, seq, delimiter=', '):
"General sequence printer: converts to tuple"
# Print tuples here instead of lists because numba supports
# tuples in nopython mode.
return '({},)'.format(delimiter.join(self._print(item) for item in seq))
def _print_MatMul(self, expr):
"Matrix multiplication printer"
return '({0})'.format(').dot('.join(self._print(i) for i in expr.args))
def _print_DotProduct(self, expr):
# DotProduct allows any shape order, but numpy.dot does matrix
# multiplication, so we have to make sure it gets 1 x n by n x 1.
arg1, arg2 = expr.args
if arg1.shape[0] != 1:
arg1 = arg1.T
if arg2.shape[1] != 1:
arg2 = arg2.T
return "dot(%s, %s)" % (self._print(arg1), self._print(arg2))
def _print_Piecewise(self, expr):
"Piecewise function printer"
exprs = '[{0}]'.format(','.join(self._print(arg.expr) for arg in expr.args))
conds = '[{0}]'.format(','.join(self._print(arg.cond) for arg in expr.args))
# If [default_value, True] is a (expr, cond) sequence in a Piecewise object
# it will behave the same as passing the 'default' kwarg to select()
# *as long as* it is the last element in expr.args.
# If this is not the case, it may be triggered prematurely.
return 'select({0}, {1}, default=nan)'.format(conds, exprs)
def _print_Relational(self, expr):
"Relational printer for Equality and Unequality"
op = {
'==' :'equal',
'!=' :'not_equal',
'<' :'less',
'<=' :'less_equal',
'>' :'greater',
'>=' :'greater_equal',
}
if expr.rel_op in op:
lhs = self._print(expr.lhs)
rhs = self._print(expr.rhs)
return '{op}({lhs}, {rhs})'.format(op=op[expr.rel_op],
lhs=lhs,
rhs=rhs)
return super(NumPyPrinter, self)._print_Relational(expr)
def _print_And(self, expr):
"Logical And printer"
# We have to override LambdaPrinter because it uses Python 'and' keyword.
# If LambdaPrinter didn't define it, we could use StrPrinter's
# version of the function and add 'logical_and' to NUMPY_TRANSLATIONS.
return '{0}({1})'.format('logical_and', ','.join(self._print(i) for i in expr.args))
def _print_Or(self, expr):
"Logical Or printer"
# We have to override LambdaPrinter because it uses Python 'or' keyword.
# If LambdaPrinter didn't define it, we could use StrPrinter's
# version of the function and add 'logical_or' to NUMPY_TRANSLATIONS.
return '{0}({1})'.format('logical_or', ','.join(self._print(i) for i in expr.args))
def _print_Not(self, expr):
"Logical Not printer"
# We have to override LambdaPrinter because it uses Python 'not' keyword.
# If LambdaPrinter didn't define it, we would still have to define our
# own because StrPrinter doesn't define it.
return '{0}({1})'.format('logical_not', ','.join(self._print(i) for i in expr.args))
def _print_Min(self, expr):
return '{0}(({1}))'.format('amin', ','.join(self._print(i) for i in expr.args))
def _print_Max(self, expr):
return '{0}(({1}))'.format('amax', ','.join(self._print(i) for i in expr.args))
# numexpr works by altering the string passed to numexpr.evaluate
# rather than by populating a namespace. Thus a special printer...
class NumExprPrinter(LambdaPrinter):
# key, value pairs correspond to sympy name and numexpr name
# functions not appearing in this dict will raise a TypeError
_numexpr_functions = {
'sin' : 'sin',
'cos' : 'cos',
'tan' : 'tan',
'asin': 'arcsin',
'acos': 'arccos',
'atan': 'arctan',
'atan2' : 'arctan2',
'sinh' : 'sinh',
'cosh' : 'cosh',
'tanh' : 'tanh',
'asinh': 'arcsinh',
'acosh': 'arccosh',
'atanh': 'arctanh',
'ln' : 'log',
'log': 'log',
'exp': 'exp',
'sqrt' : 'sqrt',
'Abs' : 'abs',
'conjugate' : 'conj',
'im' : 'imag',
're' : 'real',
'where' : 'where',
'complex' : 'complex',
'contains' : 'contains',
}
def _print_ImaginaryUnit(self, expr):
return '1j'
def _print_seq(self, seq, delimiter=', '):
# simplified _print_seq taken from pretty.py
s = [self._print(item) for item in seq]
if s:
return delimiter.join(s)
else:
return ""
def _print_Function(self, e):
func_name = e.func.__name__
nstr = self._numexpr_functions.get(func_name, None)
if nstr is None:
# check for implemented_function
if hasattr(e, '_imp_'):
return "(%s)" % self._print(e._imp_(*e.args))
else:
raise TypeError("numexpr does not support function '%s'" %
func_name)
return "%s(%s)" % (nstr, self._print_seq(e.args))
def blacklisted(self, expr):
raise TypeError("numexpr cannot be used with %s" %
expr.__class__.__name__)
# blacklist all Matrix printing
_print_SparseMatrix = \
_print_MutableSparseMatrix = \
_print_ImmutableSparseMatrix = \
_print_Matrix = \
_print_DenseMatrix = \
_print_MutableDenseMatrix = \
_print_ImmutableMatrix = \
_print_ImmutableDenseMatrix = \
blacklisted
# blacklist some python expressions
_print_list = \
_print_tuple = \
_print_Tuple = \
_print_dict = \
_print_Dict = \
blacklisted
def doprint(self, expr):
lstr = super(NumExprPrinter, self).doprint(expr)
return "evaluate('%s', truediv=True)" % lstr
def lambdarepr(expr, **settings):
"""
Returns a string usable for lambdifying.
"""
return LambdaPrinter(settings).doprint(expr)
| 34.139623 | 92 | 0.559191 | 1,044 | 9,047 | 4.672414 | 0.253831 | 0.055351 | 0.0205 | 0.01722 | 0.359574 | 0.299713 | 0.270193 | 0.259943 | 0.235957 | 0.230217 | 0 | 0.007331 | 0.3064 | 9,047 | 264 | 93 | 34.268939 | 0.77004 | 0.20493 | 0 | 0.238342 | 0 | 0 | 0.128107 | 0.003278 | 0 | 0 | 0 | 0 | 0 | 1 | 0.129534 | false | 0 | 0.015544 | 0.031088 | 0.419689 | 0.435233 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
0
| 1 |
d49e9592c8658910d6180947346f6788ba5fdb29
| 498 |
py
|
Python
|
tests/assignments/test_assign7.py
|
acc-cosc-1336/cosc-1336-spring-2018-vcruz350
|
0cee9fde3d4129c51626c4e0c870972aebec9b95
|
[
"MIT"
] | null | null | null |
tests/assignments/test_assign7.py
|
acc-cosc-1336/cosc-1336-spring-2018-vcruz350
|
0cee9fde3d4129c51626c4e0c870972aebec9b95
|
[
"MIT"
] | 1 |
2018-03-08T19:46:08.000Z
|
2018-03-08T20:00:47.000Z
|
tests/assignments/test_assign7.py
|
acc-cosc-1336/cosc-1336-spring-2018-vcruz350
|
0cee9fde3d4129c51626c4e0c870972aebec9b95
|
[
"MIT"
] | null | null | null |
import unittest
#write the import for function for assignment7 sum_list_values
from src.assignments.assignment7 import sum_list_values
class Test_Assign7(unittest.TestCase):
def sample_test(self):
self.assertEqual(1,1)
#create a test for the sum_list_values function with list elements:
# bill 23 16 19 22
def test_sum_w_23_16_19_22(self):
test_list = ['bill', 23, 16, 19, 22]
self.assertEqual(80, sum_list_values(test_list))
#unittest.main(verbosity=2)
| 29.294118 | 71 | 0.736948 | 78 | 498 | 4.474359 | 0.461538 | 0.080229 | 0.148997 | 0.068768 | 0.114613 | 0 | 0 | 0 | 0 | 0 | 0 | 0.078624 | 0.182731 | 498 | 16 | 72 | 31.125 | 0.77887 | 0.341365 | 0 | 0 | 0 | 0 | 0.012346 | 0 | 0 | 0 | 0 | 0 | 0.25 | 1 | 0.25 | false | 0 | 0.25 | 0 | 0.625 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 |
0
| 1 |
d49ef05ecf83504c528cca6ff6237271a4f54a56
| 4,957 |
py
|
Python
|
setec/__init__.py
|
kgriffs/setec
|
c6701ffd757cdfe1cfb9c3919b0fd3aa02396f54
|
[
"Apache-2.0"
] | null | null | null |
setec/__init__.py
|
kgriffs/setec
|
c6701ffd757cdfe1cfb9c3919b0fd3aa02396f54
|
[
"Apache-2.0"
] | null | null | null |
setec/__init__.py
|
kgriffs/setec
|
c6701ffd757cdfe1cfb9c3919b0fd3aa02396f54
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018 by Kurt Griffiths
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from base64 import b64decode, b64encode
import msgpack
import nacl.encoding
import nacl.secret
import nacl.signing
import nacl.utils
from .version import __version__ # NOQA
class Signer:
"""Message signer based on Ed25519 and nacl.signing.
Arguments:
key (str): Base64-encoded key obtained from keygen()
"""
__slots__ = ('_signing_key',)
def __init__(self, skey):
self._signing_key = nacl.signing.SigningKey(skey, nacl.encoding.Base64Encoder)
@staticmethod
def keygen():
signing_key = nacl.signing.SigningKey.generate()
return (
signing_key.encode(nacl.encoding.Base64Encoder).decode(),
signing_key.verify_key.encode(nacl.encoding.Base64Encoder).decode(),
)
@staticmethod
def vkey(skey):
signing_key = nacl.signing.SigningKey(skey, nacl.encoding.Base64Encoder)
return signing_key.verify_key.encode(nacl.encoding.Base64Encoder)
def signb(self, message):
"""Sign a binary message with its signature attached.
Arguments:
message(bytes): Data to sign.
Returns:
bytes: Signed message
"""
return self._signing_key.sign(message)
def pack(self, doc):
return b64encode(self.packb(doc)).decode()
def packb(self, doc):
packed = msgpack.packb(doc, encoding='utf-8', use_bin_type=True)
return self.signb(packed)
class Verifier:
"""Signature verifier based on Ed25519 and nacl.signing.
Arguments:
key (str): Base64-encoded verify key
"""
__slots__ = ('_verify_key',)
def __init__(self, vkey):
self._verify_key = nacl.signing.VerifyKey(vkey, nacl.encoding.Base64Encoder)
def verifyb(self, message):
"""Verify a signed binary message.
Arguments:
message(bytes): Data to verify.
Returns:
bytes: The orignal message, sans signature.
"""
return self._verify_key.verify(message)
def unpack(self, packed):
return self.unpackb(b64decode(packed))
def unpackb(self, packed):
packed = self.verifyb(packed)
return msgpack.unpackb(packed, raw=False, encoding='utf-8')
class BlackBox:
"""Encryption engine based on PyNaCl's SecretBox (Salsa20/Poly1305).
Warning per the SecretBox docs:
Once you’ve decrypted the message you’ve demonstrated the ability to
create arbitrary valid messages, so messages you send are repudiable.
For non-repudiable messages, sign them after encryption.
(See also: https://pynacl.readthedocs.io/en/stable/signing)
Arguments:
key (str): Base64-encoded key obtained from keygen()
"""
__slots__ = ('_box',)
def __init__(self, key):
self._box = nacl.secret.SecretBox(b64decode(key))
@staticmethod
def keygen():
return b64encode(nacl.utils.random(nacl.secret.SecretBox.KEY_SIZE)).decode()
def encrypt(self, doc, signer=None):
"""Serialize and encrypt a document to Base64-encoded ciphertext.
Arguments:
doc: The string, dict, array, or other JSON-compatible
object to serialize and encrypt.
Keyword Arguments:
signer: An instance of Signer to use in signing the result. If
not provided, the ciphertext is not signed.
Returns:
str: Ciphertext
"""
data = msgpack.packb(doc, encoding='utf-8', use_bin_type=True)
ciphertext = self._box.encrypt(data)
if signer:
ciphertext = signer.signb(ciphertext)
return b64encode(ciphertext).decode()
def decrypt(self, ciphertext, verifier=None):
"""Unpack Base64-encoded ciphertext.
Arguments:
ciphertext (bytes): Ciphertext to decrypt and deserialize.
Keyword Arguments:
verifier: An instance of Verifier to use in verifying the
signed ciphertext. If not provided, the ciphertext is
assumed to be unsigned.
Returns:
doc: Deserialized JSON-compatible object.
"""
ciphertext = b64decode(ciphertext)
if verifier:
ciphertext = verifier.verifyb(ciphertext)
data = self._box.decrypt(ciphertext)
return msgpack.unpackb(data, raw=False, encoding='utf-8')
| 29.158824 | 86 | 0.656042 | 586 | 4,957 | 5.453925 | 0.320819 | 0.025031 | 0.046934 | 0.020651 | 0.227785 | 0.188673 | 0.156758 | 0.156758 | 0.125469 | 0.087922 | 0 | 0.018413 | 0.254993 | 4,957 | 169 | 87 | 29.331361 | 0.847008 | 0.440387 | 0 | 0.084746 | 0 | 0 | 0.019278 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.237288 | false | 0 | 0.118644 | 0.050847 | 0.644068 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 |
0
| 1 |
d4a7d95a9f223064052da15a9a7a9eecfe46cfa7
| 3,810 |
py
|
Python
|
atmosphere/custom_activity/base_class.py
|
ambiata/atmosphere-python-sdk
|
48880a8553000cdea59d63b0fba49e1f0f482784
|
[
"MIT"
] | null | null | null |
atmosphere/custom_activity/base_class.py
|
ambiata/atmosphere-python-sdk
|
48880a8553000cdea59d63b0fba49e1f0f482784
|
[
"MIT"
] | 9 |
2021-02-21T21:53:03.000Z
|
2021-11-05T06:06:55.000Z
|
atmosphere/custom_activity/base_class.py
|
ambiata/atmosphere-python-sdk
|
48880a8553000cdea59d63b0fba49e1f0f482784
|
[
"MIT"
] | null | null | null |
from abc import ABC, abstractmethod
from typing import Tuple
from requests import Response
from .pydantic_models import (AppliedExclusionConditionsResponse,
BiasAttributeConfigListResponse,
ComputeRewardResponse, DefaultPredictionResponse,
ExclusionRuleConditionListResponse,
PredictionResponsePayloadFormatListResponse)
class BaseActivityCustomCode(ABC):
"""
The main class of this repository: the one to be implemented
"""
is_for_mocker: bool
def __init__(self, is_for_mocker: bool = False):
self.is_for_mocker = is_for_mocker
@abstractmethod
def validate_prediction_request(self, prediction_request: dict) -> None:
"""Raise a ValidationError if the received prediction request is not valid"""
@abstractmethod
def validate_outcome_request(self, outcome_request: dict) -> None:
"""Raise a ValidationError if the received outcome request is not valid"""
@abstractmethod
def compute_reward(self, outcome_request: dict) -> ComputeRewardResponse:
"""From an outcome, compute the reward"""
@abstractmethod
def get_module_version(self) -> str:
"""Return the version of the module."""
@abstractmethod
def send_mock_prediction_request(
self, url_prediction_endpoint: str
) -> Tuple[Response, dict]:
"""
Send a mock request to the provided url and returns the corresponding response
with extra information if required for computing the prediction.
The response and dictionary will be provided to
the `send_mock_outcome_request`.
"""
@abstractmethod
def send_mock_outcome_request(
self,
url_outcome_endpoint: str,
prediction_response: Response,
info_from_prediction: dict,
) -> Response:
"""
Send a mock request to the provided url and returns the corresponding response.
Provide the prediction response and extra information created while
creating the prediction request from `send_mock_prediction_request`.
"""
def get_prediction_response_payload_formats(
self,
) -> PredictionResponsePayloadFormatListResponse:
"""
Return the list of available format of the prediction payload.
Every format should have a name and a description
The name of the format should be unique.
"""
return {"prediction_response_payload_formats": []}
def format_prediction_payload_response(
self,
default_prediction_response: DefaultPredictionResponse,
payload_format: str, # noqa pylint: disable=unused-argument
) -> dict:
"""
You can format the prediction the way you want based
on the information returned by default
"""
return default_prediction_response
def get_exclusion_rule_conditions(self) -> ExclusionRuleConditionListResponse:
"""
Define the exclusion rules for the activity
"""
return ExclusionRuleConditionListResponse(exclusion_rule_conditions=[])
def get_applied_exclusion_conditions(
self, prediction_request: dict # noqa pylint: disable=unused-argument
) -> AppliedExclusionConditionsResponse:
"""
Define the exclusion rules for the activity
"""
return AppliedExclusionConditionsResponse(applied_exclusion_conditions=[])
def get_bias_attribute_configs(self) -> BiasAttributeConfigListResponse:
"""
Define the bias attribute configs, these decide which attributes may be
used by atmospherex as bias attributes
"""
return BiasAttributeConfigListResponse(bias_attribute_configs=[])
| 36.634615 | 87 | 0.684777 | 379 | 3,810 | 6.691293 | 0.313984 | 0.046924 | 0.01735 | 0.01183 | 0.175868 | 0.15142 | 0.124606 | 0.124606 | 0.090694 | 0.05205 | 0 | 0 | 0.25643 | 3,810 | 103 | 88 | 36.990291 | 0.895164 | 0.323622 | 0 | 0.183673 | 0 | 0 | 0.015171 | 0.015171 | 0 | 0 | 0 | 0 | 0 | 1 | 0.244898 | false | 0 | 0.081633 | 0 | 0.469388 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 |
0
| 1 |
d4ae07ad4070643d0ba3b0f74c8b5ba6215fad3c
| 2,770 |
py
|
Python
|
projects/objects/buildings/protos/textures/colored_textures/textures_generator.py
|
yjf18340/webots
|
60d441c362031ab8fde120cc0cd97bdb1a31a3d5
|
[
"Apache-2.0"
] | 1 |
2019-11-13T08:12:02.000Z
|
2019-11-13T08:12:02.000Z
|
projects/objects/buildings/protos/textures/colored_textures/textures_generator.py
|
chinakwy/webots
|
7c35a359848bafe81fe0229ac2ed587528f4c73e
|
[
"Apache-2.0"
] | null | null | null |
projects/objects/buildings/protos/textures/colored_textures/textures_generator.py
|
chinakwy/webots
|
7c35a359848bafe81fe0229ac2ed587528f4c73e
|
[
"Apache-2.0"
] | 1 |
2020-09-25T02:01:45.000Z
|
2020-09-25T02:01:45.000Z
|
#!/usr/bin/env python
# Copyright 1996-2019 Cyberbotics Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generate textures prepared for OSM, based on image templates."""
import glob
import os
from PIL import Image
# change directory to this script directory in order to allow this script to be called from another directory.
os.chdir(os.path.dirname(os.path.realpath(__file__)))
# get all the template files in put them in a list of tuples
templates = []
for f in glob.glob("*_diffuse_template.jpg"):
templates.append((f, f.replace('_diffuse_', '_color_mask_')))
# target colors
# ref: http://wiki.openstreetmap.org/wiki/Key:colour
# TODO: is it sufficient?
colors = {
'000000': (0.0, 0.0, 0.0),
'FFFFFF': (0.84, 0.84, 0.84),
'808080': (0.4, 0.4, 0.4),
'C0C0C0': (0.65, 0.65, 0.65),
'800000': (0.4, 0.15, 0.15),
'FF0000': (0.45, 0.0, 0.0),
'808000': (0.4, 0.4, 0.2),
'FFFF00': (0.7, 0.6, 0.15),
'008000': (0.15, 0.3, 0.15),
'00FF00': (0.55, 0.69, 0.52),
'008080': (0.15, 0.3, 0.3),
'00FFFF': (0.6, 0.7, 0.7),
'000080': (0.2, 0.2, 0.3),
'0000FF': (0.4, 0.4, 0.75),
'800080': (0.5, 0.4, 0.5),
'FF00FF': (0.9, 0.75, 0.85),
'F5DEB3': (0.83, 0.78, 0.65),
'8B4513': (0.3, 0.1, 0.05)
}
effectFactor = 0.5 # power of the effect, found empirically
# foreach template
for template in templates:
# load the templates
diffuse = Image.open(template[0])
mask = Image.open(template[1])
assert diffuse.size == mask.size
width, height = diffuse.size
# create an image per color
for colorString, color in colors.iteritems():
image = Image.new('RGB', diffuse.size)
pixels = image.load()
for x in range(height):
for y in range(width):
dR, dG, dB = diffuse.getpixel((x, y))
mR, mG, mB = mask.getpixel((x, y))
r = dR + int(255.0 * (mR / 255.0) * (color[0] * 2.0 - 1.0) * effectFactor)
g = dG + int(255.0 * (mG / 255.0) * (color[1] * 2.0 - 1.0) * effectFactor)
b = dB + int(255.0 * (mB / 255.0) * (color[2] * 2.0 - 1.0) * effectFactor)
pixels[x, y] = (r, g, b)
image.save(template[0].replace('_diffuse_template', '_' + colorString))
| 35.063291 | 110 | 0.605415 | 450 | 2,770 | 3.695556 | 0.42 | 0.010824 | 0.014432 | 0.009621 | 0.048707 | 0 | 0 | 0 | 0 | 0 | 0 | 0.123358 | 0.230325 | 2,770 | 78 | 111 | 35.512821 | 0.65666 | 0.36065 | 0 | 0 | 0 | 0 | 0.09868 | 0.012622 | 0 | 0 | 0 | 0.012821 | 0.022222 | 1 | 0 | false | 0 | 0.066667 | 0 | 0.066667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
0
| 1 |
d4b39516d2e47e56ba5e7898643ba4593ea3b27e
| 349 |
py
|
Python
|
change_threshold_migration.py
|
arcapix/gpfsapi-examples
|
15bff7fda7b0a576209253dee48eb44e4c0d565f
|
[
"MIT"
] | 10 |
2016-05-17T12:58:35.000Z
|
2022-01-10T05:23:45.000Z
|
change_threshold_migration.py
|
arcapix/gpfsapi-examples
|
15bff7fda7b0a576209253dee48eb44e4c0d565f
|
[
"MIT"
] | null | null | null |
change_threshold_migration.py
|
arcapix/gpfsapi-examples
|
15bff7fda7b0a576209253dee48eb44e4c0d565f
|
[
"MIT"
] | 1 |
2016-09-12T09:07:00.000Z
|
2016-09-12T09:07:00.000Z
|
from arcapix.fs.gpfs.policy import PlacementPolicy
from arcapix.fs.gpfs.rule import MigrateRule
# load placement policy for mmfs1
policy = PlacementPolicy('mmfs1')
# create a new migrate rule for 'sata1'
r = MigrateRule(source='sata1', threshold=(90, 50))
# add rule to start of the policy
policy.rules.insert(r, 0)
# save changes
policy.save()
| 23.266667 | 51 | 0.759312 | 52 | 349 | 5.096154 | 0.634615 | 0.083019 | 0.098113 | 0.128302 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.0299 | 0.137536 | 349 | 14 | 52 | 24.928571 | 0.850498 | 0.326648 | 0 | 0 | 0 | 0 | 0.043478 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.333333 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 |
0
| 1 |
d4c0845bc0b80a14fbe5e783d9ed64b00db19bce
| 3,383 |
py
|
Python
|
app/__init__.py
|
credwood/bitplayers
|
4ca6b6c6a21bb21d7cd963c64028415559c3dcc4
|
[
"MIT"
] | 1 |
2020-06-26T21:49:14.000Z
|
2020-06-26T21:49:14.000Z
|
app/__init__.py
|
credwood/bitplayers
|
4ca6b6c6a21bb21d7cd963c64028415559c3dcc4
|
[
"MIT"
] | 2 |
2020-03-31T11:11:04.000Z
|
2021-12-13T20:38:48.000Z
|
app/__init__.py
|
credwood/bitplayers
|
4ca6b6c6a21bb21d7cd963c64028415559c3dcc4
|
[
"MIT"
] | null | null | null |
import dash
from flask import Flask
from flask.helpers import get_root_path
from flask_login import login_required
from flask_wtf.csrf import CSRFProtect
from flask_admin import Admin, BaseView, expose
from flask_admin.contrib.sqla import ModelView
from datetime import datetime
from dateutil import parser
import pytz
from pytz import timezone
from config import BaseConfig
csrf = CSRFProtect()
def create_app():
from app.models import Blog, User, MyModelView, Contact
from app.extensions import db
from app.dashapp1.layout import layout as layout_1
from app.dashapp1.callbacks import register_callbacks as register_callbacks_1
#from app.dashapp2.layout import layout as layout_2
#from app.dashapp2.callbacks import register_callbacks as register_callbacks_2
from app.dashapp3.layout import layout as layout_3
from app.dashapp3.callbacks import register_callbacks as register_callbacks_3
server = Flask(__name__)
server.config.from_object(BaseConfig)
csrf.init_app(server)
csrf._exempt_views.add('dash.dash.dispatch')
admin = Admin(server)
admin.add_view(MyModelView(User, db.session))
admin.add_view(MyModelView(Blog, db.session))
admin.add_view(MyModelView(Contact, db.session))
register_dashapp(server, 'dashapp1', 'dashboard1', layout_1, register_callbacks_1)
#register_dashapp(server, 'dashapp2', 'dashboard2', layout_2, register_callbacks_2)
register_dashapp(server, 'dashapp3', 'dashboard3', layout_3, register_callbacks_3)
register_extensions(server)
register_blueprints(server)
server.jinja_env.filters['formatdatetime'] = format_datetime
return server
def format_datetime(date,fmt=None):
western = timezone("America/Los_Angeles")
native=pytz.utc.localize(date, is_dst=None).astimezone(western)
#date = parser.parse(str(date))
#native = date.astimezone(western)
format='%m-%d-%Y %I:%M %p'
return native.strftime(format)
def register_dashapp(app, title, base_pathname, layout, register_callbacks_fun):
# Meta tags for viewport responsiveness
meta_viewport = {"name": "viewport", "content": "width=device-width, initial-scale=1, shrink-to-fit=no"}
my_dashapp = dash.Dash(__name__,
server=app,
url_base_pathname=f'/{base_pathname}/',
assets_folder=get_root_path(__name__) + f'/{base_pathname}/assets/',
meta_tags=[meta_viewport])
with app.app_context():
my_dashapp.title = title
my_dashapp.layout = layout
register_callbacks_fun(my_dashapp)
#_protect_dashviews(my_dashapp)
def _protect_dashviews(dashapp):
for view_func in dashapp.server.view_functions:
if view_func.startswith(dashapp.config.url_base_pathname):
dashapp.server.view_functions[view_func] = login_required(dashapp.server.view_functions[view_func])
def register_extensions(server):
from app.extensions import db
from app.extensions import login_inst
from app.extensions import migrate
from app.extensions import mail
db.init_app(server)
login_inst.init_app(server)
login_inst.login_view = 'main.login'
migrate.init_app(server, db)
mail.init_app(server)
def register_blueprints(server):
from app.webapp import server_bp
server.register_blueprint(server_bp)
| 35.610526 | 111 | 0.738693 | 442 | 3,383 | 5.411765 | 0.28733 | 0.038043 | 0.035535 | 0.048077 | 0.196906 | 0.145903 | 0.090719 | 0 | 0 | 0 | 0 | 0.008957 | 0.174993 | 3,383 | 94 | 112 | 35.989362 | 0.848083 | 0.100503 | 0 | 0.029851 | 0 | 0 | 0.074769 | 0.007905 | 0 | 0 | 0 | 0 | 0 | 1 | 0.089552 | false | 0 | 0.343284 | 0 | 0.462687 | 0.044776 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 |
0
| 1 |
d4c5d71a8319e8e4743e5c7446b67b54ee62af61
| 256 |
py
|
Python
|
devtools/api/health.py
|
ankeshkhemani/devtools
|
beb9a46c27b6b4c02a2e8729af0c971cc175f134
|
[
"Apache-2.0"
] | null | null | null |
devtools/api/health.py
|
ankeshkhemani/devtools
|
beb9a46c27b6b4c02a2e8729af0c971cc175f134
|
[
"Apache-2.0"
] | null | null | null |
devtools/api/health.py
|
ankeshkhemani/devtools
|
beb9a46c27b6b4c02a2e8729af0c971cc175f134
|
[
"Apache-2.0"
] | null | null | null |
import datetime
from fastapi import APIRouter
router = APIRouter()
@router.get("", tags=["health"])
async def get_health():
return {
"results": [],
"status": "success",
"timestamp": datetime.datetime.now().timestamp()
}
| 17.066667 | 56 | 0.605469 | 25 | 256 | 6.16 | 0.68 | 0.194805 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.230469 | 256 | 14 | 57 | 18.285714 | 0.781726 | 0 | 0 | 0 | 0 | 0 | 0.136719 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.2 | 0 | 0.3 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
0
| 1 |
d4cc2ada6fd8bd17a6303118a58e9c1a8c44ff7a
| 2,265 |
py
|
Python
|
pytorch_toolkit/face_recognition/model/common.py
|
AnastasiaaSenina/openvino_training_extensions
|
267425d64372dff5b9083dc0ca6abfc305a71449
|
[
"Apache-2.0"
] | 1 |
2020-02-09T15:50:49.000Z
|
2020-02-09T15:50:49.000Z
|
pytorch_toolkit/face_recognition/model/common.py
|
akshayjaryal603/openvino_training_extensions
|
7d606a22143db0af97087709d63a2ec2aa02036c
|
[
"Apache-2.0"
] | 28 |
2020-09-25T22:40:36.000Z
|
2022-03-12T00:37:36.000Z
|
pytorch_toolkit/face_recognition/model/common.py
|
akshayjaryal603/openvino_training_extensions
|
7d606a22143db0af97087709d63a2ec2aa02036c
|
[
"Apache-2.0"
] | 1 |
2021-04-02T07:51:01.000Z
|
2021-04-02T07:51:01.000Z
|
"""
Copyright (c) 2018 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from abc import abstractmethod
from functools import partial
import torch.nn as nn
class ModelInterface(nn.Module):
"""Abstract class for models"""
@abstractmethod
def set_dropout_ratio(self, ratio):
"""Sets dropout ratio of the model"""
@abstractmethod
def get_input_res(self):
"""Returns input resolution"""
from .rmnet_angular import RMNetAngular
from .mobilefacenet import MobileFaceNet
from .landnet import LandmarksNet
from .se_resnet_angular import SEResNetAngular
from .shufflenet_v2_angular import ShuffleNetV2Angular
from .backbones.se_resnet import se_resnet50, se_resnet101, se_resnet152
from .backbones.resnet import resnet50
from .backbones.se_resnext import se_resnext50, se_resnext101, se_resnext152
models_backbones = {'rmnet': RMNetAngular,
'mobilenetv2': MobileFaceNet,
'mobilenetv2_2x': partial(MobileFaceNet, width_multiplier=2.0),
'mobilenetv2_1_5x': partial(MobileFaceNet, width_multiplier=1.5),
'resnet50': partial(SEResNetAngular, base=resnet50),
'se_resnet50': partial(SEResNetAngular, base=se_resnet50),
'se_resnet101': partial(SEResNetAngular, base=se_resnet101),
'se_resnet152': partial(SEResNetAngular, base=se_resnet152),
'se_resnext50': partial(SEResNetAngular, base=se_resnext50),
'se_resnext101': partial(SEResNetAngular, base=se_resnext101),
'se_resnext152': partial(SEResNetAngular, base=se_resnext152),
'shufflenetv2': ShuffleNetV2Angular}
models_landmarks = {'landnet': LandmarksNet}
| 41.944444 | 85 | 0.714349 | 262 | 2,265 | 6.038168 | 0.454198 | 0.097345 | 0.115044 | 0.106195 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.042434 | 0.209272 | 2,265 | 53 | 86 | 42.735849 | 0.840871 | 0.283444 | 0 | 0.068966 | 0 | 0 | 0.091824 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.068966 | false | 0 | 0.37931 | 0 | 0.482759 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 |
0
| 1 |
d4cd43090d9af44b579f4587a49e6d83acfe093a
| 807 |
py
|
Python
|
src/dataclay/util/logs.py
|
kpavel/pyclay
|
275bc8af5c57301231a20cca1cc88556a9c84c79
|
[
"BSD-3-Clause"
] | 1 |
2020-04-16T17:09:15.000Z
|
2020-04-16T17:09:15.000Z
|
src/dataclay/util/logs.py
|
kpavel/pyclay
|
275bc8af5c57301231a20cca1cc88556a9c84c79
|
[
"BSD-3-Clause"
] | 35 |
2019-11-06T17:06:16.000Z
|
2021-04-12T16:27:20.000Z
|
src/dataclay/util/logs.py
|
kpavel/pyclay
|
275bc8af5c57301231a20cca1cc88556a9c84c79
|
[
"BSD-3-Clause"
] | 1 |
2020-05-06T11:28:16.000Z
|
2020-05-06T11:28:16.000Z
|
""" Class description goes here. """
import json
import logging
class JSONFormatter(logging.Formatter):
"""Simple JSON formatter for the logging facility."""
def format(self, obj):
"""Note that obj is a LogRecord instance."""
# Copy the dictionary
ret = dict(obj.__dict__)
# Perform the message substitution
args = ret.pop("args")
msg = ret.pop("msg")
ret["message"] = msg % args
# Exceptions must be formatted (they are not JSON-serializable
try:
ei = ret.pop("exc_info")
except KeyError:
pass
else:
if ei is not None:
ret["exc_info"] = self.formatException(ei)
# Dump the dictionary in JSON form
return json.dumps(ret, skipkeys=True)
| 26.032258 | 70 | 0.581165 | 95 | 807 | 4.873684 | 0.621053 | 0.038877 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.319703 | 807 | 30 | 71 | 26.9 | 0.843352 | 0.327138 | 0 | 0 | 0 | 0 | 0.057471 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.0625 | false | 0.0625 | 0.125 | 0 | 0.3125 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 |
0
| 1 |
d4d8056be31284c17cf40684370c5ac0209b3ede
| 1,296 |
py
|
Python
|
tg/release.py
|
TurboGears/tg2
|
f40a82d016d70ce560002593b4bb8f83b57f87b3
|
[
"MIT"
] | 812 |
2015-01-16T22:57:52.000Z
|
2022-03-27T04:49:40.000Z
|
tg/release.py
|
KonstantinKlepikov/tg2
|
b230e98bf6f64b3620dcb4214fa45dafddb0d60f
|
[
"MIT"
] | 74 |
2015-02-18T17:55:31.000Z
|
2021-12-13T10:41:08.000Z
|
tg/release.py
|
KonstantinKlepikov/tg2
|
b230e98bf6f64b3620dcb4214fa45dafddb0d60f
|
[
"MIT"
] | 72 |
2015-06-10T06:02:45.000Z
|
2022-03-27T08:37:24.000Z
|
"""TurboGears project related information"""
version = "2.4.3"
description = "Next generation TurboGears"
long_description="""
TurboGears brings together a best of breed python tools
to create a flexible, full featured, and easy to use web
framework.
TurboGears 2 provides an integrated and well tested set of tools for
everything you need to build dynamic, database driven applications.
It provides a full range of tools for front end javascript
develeopment, back database development and everything in between:
* dynamic javascript powered widgets (ToscaWidgets2)
* automatic JSON generation from your controllers
* powerful, designer friendly XHTML based templating
* object or route based URL dispatching
* powerful Object Relational Mappers (SQLAlchemy)
The latest development version is available in the
`TurboGears Git repositories`_.
.. _TurboGears Git repositories:
https://github.com/TurboGears
"""
url="http://www.turbogears.org/"
author= "Alessandro Molina, Mark Ramm, Christopher Perkins, Jonathan LaCour, Rick Copland, Alberto Valverde, Michael Pedersen and the TurboGears community"
email = "amol@turbogears.org"
copyright = """Copyright 2005-2020 Kevin Dangoor, Alberto Valverde, Mark Ramm, Christopher Perkins, Alessandro Molina and contributors"""
license = "MIT"
| 41.806452 | 155 | 0.794753 | 167 | 1,296 | 6.149701 | 0.688623 | 0.013632 | 0.019474 | 0.050633 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.011638 | 0.138117 | 1,296 | 30 | 156 | 43.2 | 0.907789 | 0.029321 | 0 | 0 | 0 | 0.04 | 0.907348 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
0
| 1 |
d4db73effedd714b6a4d9b15c4a8d627cf47c849
| 1,151 |
py
|
Python
|
tests/manage/monitoring/pagerduty/test_ceph.py
|
MeridianExplorer/ocs-ci
|
a33d5116128b88f176f5eff68a3ef805125cdba1
|
[
"MIT"
] | null | null | null |
tests/manage/monitoring/pagerduty/test_ceph.py
|
MeridianExplorer/ocs-ci
|
a33d5116128b88f176f5eff68a3ef805125cdba1
|
[
"MIT"
] | null | null | null |
tests/manage/monitoring/pagerduty/test_ceph.py
|
MeridianExplorer/ocs-ci
|
a33d5116128b88f176f5eff68a3ef805125cdba1
|
[
"MIT"
] | null | null | null |
import logging
import pytest
from ocs_ci.framework.testlib import (
managed_service_required,
skipif_ms_consumer,
tier4,
tier4a,
)
from ocs_ci.ocs import constants
from ocs_ci.utility import pagerduty
log = logging.getLogger(__name__)
@tier4
@tier4a
@managed_service_required
@skipif_ms_consumer
@pytest.mark.polarion_id("OCS-2771")
def test_corrupt_pg_pd(measure_corrupt_pg):
"""
Test that there is appropriate incident in PagerDuty when Placement group
on one OSD is corrupted and that this incident is cleared when the corrupted
ceph pool is removed.
"""
api = pagerduty.PagerDutyAPI()
# get incidents from time when manager deployment was scaled down
incidents = measure_corrupt_pg.get("pagerduty_incidents")
target_label = constants.ALERT_CLUSTERERRORSTATE
# TODO(fbalak): check the whole string in summary and incident alerts
assert pagerduty.check_incident_list(
summary=target_label,
incidents=incidents,
urgency="high",
)
api.check_incident_cleared(
summary=target_label,
measure_end_time=measure_corrupt_pg.get("stop"),
)
| 26.159091 | 80 | 0.741095 | 148 | 1,151 | 5.52027 | 0.533784 | 0.044064 | 0.033048 | 0.068543 | 0.093023 | 0.093023 | 0 | 0 | 0 | 0 | 0 | 0.008611 | 0.192876 | 1,151 | 43 | 81 | 26.767442 | 0.870829 | 0.264987 | 0 | 0.068966 | 0 | 0 | 0.042631 | 0 | 0 | 0 | 0 | 0.023256 | 0.034483 | 1 | 0.034483 | false | 0 | 0.172414 | 0 | 0.206897 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
0
| 1 |
d4e990995bc970a5eeb5c450531463a5dff36df5
| 2,026 |
py
|
Python
|
pytouch/elements.py
|
Krai53n/pytouch
|
8a1c69c4ba5981f3cb0bf00db3bcef5dd15e8375
|
[
"MIT"
] | null | null | null |
pytouch/elements.py
|
Krai53n/pytouch
|
8a1c69c4ba5981f3cb0bf00db3bcef5dd15e8375
|
[
"MIT"
] | null | null | null |
pytouch/elements.py
|
Krai53n/pytouch
|
8a1c69c4ba5981f3cb0bf00db3bcef5dd15e8375
|
[
"MIT"
] | null | null | null |
from random import randint
import pyxel
from constants import Screen
import cursors
class Text:
def __init__(self, text):
self._text = text
self._symbol_len = 3
self._padding_len = 1
def _count_text_len(self):
return (
self._symbol_len + self._padding_len
) * len(self._text) - self._padding_len
def _x_text_center_position(self):
return (Screen.width - self._count_text_len()) // 2
def draw(self):
pyxel.text(self._x_text_center_position(), 0, self._text, 2)
class Score:
def __init__(self, padding_right=2, padding_top=2):
self._padding_right = padding_right
self._padding_top = padding_top
self.score = 0
def increase(self):
self.score += 1
def reduce(self):
self.score -= 1
def draw(self):
pyxel.text(self._padding_right, self._padding_top,
f"Score: {self.score}", (Screen.bg - 2) % 16)
class Circle:
def __init__(self):
self._r = 0
self._col = (Screen.bg - 1) % 16
def zero(self):
self._r = 0
def increase(self, size=1):
self._r += size
@property
def r(self):
return self._r
@r.setter
def r(self, r):
self._r = r
@property
def col(self):
return self._col
@col.setter
def col(self, color):
self._col = color
def draw(self, x, y):
pyxel.circ(x, y, self._r, self._col)
class ReachCircle(Circle):
def __init__(self):
super().__init__()
self.min_r = 10
self.respawn()
@property
def x(self):
return self._x
@property
def y(self):
return self._y
def respawn(self):
self._x = randint(self._r, Screen.width - self._r)
self._y = randint(self._r, Screen.height - self._r)
self._r = randint(self.min_r, min(Screen.width, Screen.height) // 2) - 4
def draw(self):
pyxel.circb(self._x, self._y, self._r, self._col)
| 21.104167 | 80 | 0.579961 | 278 | 2,026 | 3.92446 | 0.190647 | 0.059578 | 0.064161 | 0.043996 | 0.143905 | 0.043996 | 0 | 0 | 0 | 0 | 0 | 0.016289 | 0.30306 | 2,026 | 95 | 81 | 21.326316 | 0.756374 | 0 | 0 | 0.161765 | 0 | 0 | 0.009378 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.308824 | false | 0 | 0.058824 | 0.088235 | 0.514706 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 |
0
| 1 |
d4ed66dc63c65bd461e9e3340f0322d30f2b6c89
| 319 |
py
|
Python
|
count_split_inversions/test_count_split_inversions.py
|
abaldwin/algorithms
|
8c8722394c9115c572dadcd8ab601885512fd494
|
[
"Apache-2.0"
] | null | null | null |
count_split_inversions/test_count_split_inversions.py
|
abaldwin/algorithms
|
8c8722394c9115c572dadcd8ab601885512fd494
|
[
"Apache-2.0"
] | null | null | null |
count_split_inversions/test_count_split_inversions.py
|
abaldwin/algorithms
|
8c8722394c9115c572dadcd8ab601885512fd494
|
[
"Apache-2.0"
] | null | null | null |
import unittest
from count_split_inversions import count_inversions
class TestCountSplitInversions(unittest.TestCase):
def test_count_inversions(self):
input = [1, 3, 5, 2, 4, 6]
result = count_inversions(input)
self.assertEqual(result, 3)
if __name__ == '__main__':
unittest.main()
| 22.785714 | 51 | 0.705329 | 38 | 319 | 5.552632 | 0.631579 | 0.21327 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.027451 | 0.200627 | 319 | 13 | 52 | 24.538462 | 0.8 | 0 | 0 | 0 | 0 | 0 | 0.025078 | 0 | 0 | 0 | 0 | 0 | 0.111111 | 1 | 0.111111 | false | 0 | 0.222222 | 0 | 0.444444 | 0 | 0 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
0
| 1 |
d4f12c3a663d3edb5021b78314c1afd940fc7b1a
| 412 |
py
|
Python
|
accountifie/toolkit/urls.py
|
imcallister/accountifie
|
094834c9d632e0353e3baf8d924eeb10cba0add4
|
[
"MIT",
"Unlicense"
] | 4 |
2017-06-02T08:48:48.000Z
|
2021-11-21T23:57:15.000Z
|
accountifie/toolkit/urls.py
|
imcallister/accountifie
|
094834c9d632e0353e3baf8d924eeb10cba0add4
|
[
"MIT",
"Unlicense"
] | 3 |
2020-06-05T16:55:42.000Z
|
2021-06-10T17:43:12.000Z
|
accountifie/toolkit/urls.py
|
imcallister/accountifie
|
094834c9d632e0353e3baf8d924eeb10cba0add4
|
[
"MIT",
"Unlicense"
] | 4 |
2015-12-15T14:27:51.000Z
|
2017-04-21T21:42:27.000Z
|
from django.conf import settings
from django.conf.urls import url, static
from . import views
from . import jobs
urlpatterns = [
url(r'^choose_company/(?P<company_id>.*)/$', views.choose_company, name='choose_company'),
url(r'^cleanlogs/$', jobs.cleanlogs, name='cleanlogs'),
url(r'^primecache/$', jobs.primecache, name='primecache'),
url(r'^dump_fixtures/$', views.dump_fixtures),
]
| 27.466667 | 98 | 0.686893 | 53 | 412 | 5.226415 | 0.396226 | 0.057762 | 0.101083 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.145631 | 412 | 14 | 99 | 29.428571 | 0.786932 | 0 | 0 | 0 | 0 | 0 | 0.26764 | 0.087591 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.4 | 0 | 0.4 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 |
0
| 1 |
d4f1aa99ca10cb206e4f7702a9c7de6f3d6dfd4e
| 5,975 |
py
|
Python
|
intersight/models/niaapi_version_regex_all_of.py
|
sdnit-se/intersight-python
|
551f7685c0f76bb8af60ec83ffb6f9672d49a4ae
|
[
"Apache-2.0"
] | 21 |
2018-03-29T14:20:35.000Z
|
2021-10-13T05:11:41.000Z
|
intersight/models/niaapi_version_regex_all_of.py
|
sdnit-se/intersight-python
|
551f7685c0f76bb8af60ec83ffb6f9672d49a4ae
|
[
"Apache-2.0"
] | 14 |
2018-01-30T15:45:46.000Z
|
2022-02-23T14:23:21.000Z
|
intersight/models/niaapi_version_regex_all_of.py
|
sdnit-se/intersight-python
|
551f7685c0f76bb8af60ec83ffb6f9672d49a4ae
|
[
"Apache-2.0"
] | 18 |
2018-01-03T15:09:56.000Z
|
2021-07-16T02:21:54.000Z
|
# coding: utf-8
"""
Cisco Intersight
Cisco Intersight is a management platform delivered as a service with embedded analytics for your Cisco and 3rd party IT infrastructure. This platform offers an intelligent level of management that enables IT organizations to analyze, simplify, and automate their environments in more advanced ways than the prior generations of tools. Cisco Intersight provides an integrated and intuitive management experience for resources in the traditional data center as well as at the edge. With flexible deployment options to address complex security needs, getting started with Intersight is quick and easy. Cisco Intersight has deep integration with Cisco UCS and HyperFlex systems allowing for remote deployment, configuration, and ongoing maintenance. The model-based deployment works for a single system in a remote location or hundreds of systems in a data center and enables rapid, standardized configuration and deployment. It also streamlines maintaining those systems whether you are working with small or very large configurations. # noqa: E501
The version of the OpenAPI document: 1.0.9-1295
Contact: intersight@cisco.com
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from intersight.configuration import Configuration
class NiaapiVersionRegexAllOf(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'apic': 'NiaapiVersionRegexPlatform',
'dcnm': 'NiaapiVersionRegexPlatform',
'version': 'str'
}
attribute_map = {'apic': 'Apic', 'dcnm': 'Dcnm', 'version': 'Version'}
def __init__(self,
apic=None,
dcnm=None,
version=None,
local_vars_configuration=None): # noqa: E501
"""NiaapiVersionRegexAllOf - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._apic = None
self._dcnm = None
self._version = None
self.discriminator = None
if apic is not None:
self.apic = apic
if dcnm is not None:
self.dcnm = dcnm
if version is not None:
self.version = version
@property
def apic(self):
"""Gets the apic of this NiaapiVersionRegexAllOf. # noqa: E501
:return: The apic of this NiaapiVersionRegexAllOf. # noqa: E501
:rtype: NiaapiVersionRegexPlatform
"""
return self._apic
@apic.setter
def apic(self, apic):
"""Sets the apic of this NiaapiVersionRegexAllOf.
:param apic: The apic of this NiaapiVersionRegexAllOf. # noqa: E501
:type: NiaapiVersionRegexPlatform
"""
self._apic = apic
@property
def dcnm(self):
"""Gets the dcnm of this NiaapiVersionRegexAllOf. # noqa: E501
:return: The dcnm of this NiaapiVersionRegexAllOf. # noqa: E501
:rtype: NiaapiVersionRegexPlatform
"""
return self._dcnm
@dcnm.setter
def dcnm(self, dcnm):
"""Sets the dcnm of this NiaapiVersionRegexAllOf.
:param dcnm: The dcnm of this NiaapiVersionRegexAllOf. # noqa: E501
:type: NiaapiVersionRegexPlatform
"""
self._dcnm = dcnm
@property
def version(self):
"""Gets the version of this NiaapiVersionRegexAllOf. # noqa: E501
Version number for the Version Regex data, also used as identity. # noqa: E501
:return: The version of this NiaapiVersionRegexAllOf. # noqa: E501
:rtype: str
"""
return self._version
@version.setter
def version(self, version):
"""Sets the version of this NiaapiVersionRegexAllOf.
Version number for the Version Regex data, also used as identity. # noqa: E501
:param version: The version of this NiaapiVersionRegexAllOf. # noqa: E501
:type: str
"""
self._version = version
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(
map(lambda x: x.to_dict()
if hasattr(x, "to_dict") else x, value))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(
map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, NiaapiVersionRegexAllOf):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, NiaapiVersionRegexAllOf):
return True
return self.to_dict() != other.to_dict()
| 34.738372 | 1,052 | 0.627113 | 683 | 5,975 | 5.405564 | 0.286969 | 0.030336 | 0.094258 | 0.080444 | 0.303088 | 0.273023 | 0.270585 | 0.153846 | 0.095883 | 0.053629 | 0 | 0.013539 | 0.295397 | 5,975 | 171 | 1,053 | 34.94152 | 0.86342 | 0.450209 | 0 | 0.064103 | 0 | 0 | 0.04408 | 0.018944 | 0 | 0 | 0 | 0 | 0 | 1 | 0.153846 | false | 0 | 0.051282 | 0 | 0.371795 | 0.025641 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
0
| 1 |
d4f20508bec1fb3b3210c9cb30a6481120876c56
| 2,158 |
py
|
Python
|
ROS/fprime_ws/src/genfprime/src/genfprime/generate_modmk.py
|
genemerewether/fprime
|
fcdd071b5ddffe54ade098ca5d451903daba9eed
|
[
"Apache-2.0"
] | 5 |
2019-10-22T03:41:02.000Z
|
2022-01-16T12:48:31.000Z
|
ROS/fprime_ws/src/genfprime/src/genfprime/generate_modmk.py
|
genemerewether/fprime
|
fcdd071b5ddffe54ade098ca5d451903daba9eed
|
[
"Apache-2.0"
] | 27 |
2019-02-07T17:58:58.000Z
|
2019-08-13T00:46:24.000Z
|
ROS/fprime_ws/src/genfprime/src/genfprime/generate_modmk.py
|
genemerewether/fprime
|
fcdd071b5ddffe54ade098ca5d451903daba9eed
|
[
"Apache-2.0"
] | 3 |
2019-01-01T18:44:37.000Z
|
2019-08-01T01:19:39.000Z
|
#
# Copyright 2004-2016, by the California Institute of Technology.
# ALL RIGHTS RESERVED. United States Government Sponsorship
# acknowledged. Any commercial use must be negotiated with the Office
# of Technology Transfer at the California Institute of Technology.
#
# This software may be subject to U.S. export control laws and
# regulations. By accepting this document, the user agrees to comply
# with all U.S. export laws and regulations. User has the
# responsibility to obtain export licenses, or other export authority
# as may be required before exporting such information to foreign
# countries or providing access to foreign persons.
#
from __future__ import print_function
import os
from genmsg import MsgGenerationException
#from . name import *
## :param type_name outdir: Full path to output directory
## :returns int: status. 0 if successful
def write_modmk(outdir): #, msg_types, srv_types):
if not os.path.isdir(outdir):
#TODO: warn?
return 0
xml_in_dir = set([f for f in os.listdir(outdir)
if f.endswith('.xml')])
_write_modmk(outdir, sorted(xml_in_dir))
# TODO(mereweth) if we want to independently specify the generated XML files
# generated_xml = [_msg_serializable_xml_name(f) for f in sorted(msg_types)]
# generated_xml.extend([_port_xml_name(f) for f in sorted(msg_types)]
# write_msg_modmk(outdir, generated_xml)
# generated_xml = [_srv_serializable_xml_name(f) for f in sorted(srv_types)]
# generated_xml.extend([_port_xml_name(f) for f in sorted(srv_types)]
# write_msg_modmk(outdir, generated_xml)
return 0
def _write_modmk(outdir, generated_xml):
if not os.path.exists(outdir):
os.makedirs(outdir)
elif not os.path.isdir(outdir):
raise MsgGenerationException("file preventing the creating of Fprime directory: %s"%dir)
p = os.path.join(outdir, 'mod.mk')
with open(p, 'w') as f:
f.write('SRC = \\\n')
if len(generated_xml) != 0:
for xml in generated_xml[:-1]:
f.write('%s \\\n'%xml)
f.write('%s\n'%generated_xml[-1])
return 0
| 37.206897 | 96 | 0.698332 | 315 | 2,158 | 4.628571 | 0.419048 | 0.090535 | 0.017147 | 0.024005 | 0.240055 | 0.165981 | 0.165981 | 0.123457 | 0.106996 | 0.064472 | 0 | 0.008793 | 0.209453 | 2,158 | 57 | 97 | 37.859649 | 0.845838 | 0.580167 | 0 | 0.130435 | 0 | 0 | 0.09589 | 0 | 0 | 0 | 0 | 0.017544 | 0 | 1 | 0.086957 | false | 0 | 0.130435 | 0 | 0.347826 | 0.043478 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
0
| 1 |
d4f523ec6d8e4a47a69a4a400a7f08b9647af175
| 1,154 |
py
|
Python
|
src/cut_link/utils.py
|
true7/srt
|
d5accd411e73ade4ed40a41759e95cb20fbda98d
|
[
"MIT"
] | null | null | null |
src/cut_link/utils.py
|
true7/srt
|
d5accd411e73ade4ed40a41759e95cb20fbda98d
|
[
"MIT"
] | null | null | null |
src/cut_link/utils.py
|
true7/srt
|
d5accd411e73ade4ed40a41759e95cb20fbda98d
|
[
"MIT"
] | null | null | null |
import string
import random
import json
from calendar import month_name
from django.conf import settings
SHORTLINK_MIN = getattr(settings, "SHORTLINK_MIN", 6)
def code_generator(size=SHORTLINK_MIN):
chars = string.ascii_letters + string.digits
return ''.join(random.choice(chars) for _ in range(size))
def create_shortlink(instance):
new_link = code_generator()
class_ = instance.__class__
query_set = class_.objects.filter(shortlink=new_link)
if query_set.exists():
return create_shortlink()
return new_link
def json_data_func(instance):
''' Return json format data, ready for passing into AmCharts.
Contains 2 items - name of the month and count of distinct
links, which were cut on the website.
'''
class_ = instance.__class__
# FIXME. The problem is every next year it will add results above
result = []
for month in range(1, len(month_name)):
count_use = class_.objects.filter(pub_date__month=month).count()
data = dict(month=month_name[month], count=count_use)
result.append(data)
json_data = json.dumps(result)
return json_data
| 27.47619 | 72 | 0.710572 | 159 | 1,154 | 4.918239 | 0.509434 | 0.034527 | 0.051151 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.003272 | 0.205373 | 1,154 | 41 | 73 | 28.146341 | 0.849509 | 0.189775 | 0 | 0.08 | 0 | 0 | 0.014396 | 0 | 0 | 0 | 0 | 0.02439 | 0 | 1 | 0.12 | false | 0 | 0.2 | 0 | 0.48 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
0
| 1 |
be077745c0ef294c19a02fb08ff66ab17f79fb99
| 898 |
py
|
Python
|
day1/files_ex1.py
|
grenn72/pynet-ons-feb19
|
5aff7dfa6a697214dc24818819a60b46a261d0d3
|
[
"Apache-2.0"
] | null | null | null |
day1/files_ex1.py
|
grenn72/pynet-ons-feb19
|
5aff7dfa6a697214dc24818819a60b46a261d0d3
|
[
"Apache-2.0"
] | null | null | null |
day1/files_ex1.py
|
grenn72/pynet-ons-feb19
|
5aff7dfa6a697214dc24818819a60b46a261d0d3
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
from __future__ import print_function
# READ ####
f = open("my_file.txt")
print("\nLoop directly over file")
print("-" * 60)
for line in f:
print(line.strip())
print("-" * 60)
f.seek(0)
my_content = f.readlines()
print("\nUse readlines method")
print("-" * 60)
for line in my_content:
print(line.strip())
print("-" * 60)
f.seek(0)
my_content = f.read()
print("\nUse read + splitlines")
print("-" * 60)
for line in my_content.splitlines():
print(line)
print("-" * 60)
f.close()
with open("my_file.txt") as f:
print("\nUse with and loop over file")
print("-" * 60)
for line in f:
print(line.strip())
print("-" * 60)
# WRITE ####
print("\nWriting file.")
f = open("new_file.txt", "w")
f.write("whatever2\n")
f.close()
# APPEND ####
print("\nAppending file.")
with open("new_file.txt", "a") as f:
f.write("something else\n")
print()
| 18.708333 | 42 | 0.614699 | 138 | 898 | 3.905797 | 0.333333 | 0.103896 | 0.074212 | 0.103896 | 0.361781 | 0.361781 | 0.361781 | 0.269017 | 0.269017 | 0.269017 | 0 | 0.025641 | 0.174833 | 898 | 47 | 43 | 19.106383 | 0.701754 | 0.044543 | 0 | 0.472222 | 0 | 0 | 0.254459 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.027778 | 0 | 0.027778 | 0.555556 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
0
| 1 |
be10e301876952317779fb802d1ea27b44f1342a
| 2,188 |
py
|
Python
|
ks_engine/variable_scoring.py
|
FilippoRanza/ks.py
|
47d909fb70fec50f8d3174855bf5d0c05527bf03
|
[
"MIT"
] | 2 |
2021-01-29T11:45:39.000Z
|
2022-03-10T03:17:12.000Z
|
ks_engine/variable_scoring.py
|
Optimization-Algorithms/ks.py
|
44890d33a744c5c4865b96f97efc1e5241b719b1
|
[
"MIT"
] | 1 |
2020-05-12T16:18:34.000Z
|
2020-05-12T16:18:34.000Z
|
ks_engine/variable_scoring.py
|
Optimization-Algorithms/ks.py
|
44890d33a744c5c4865b96f97efc1e5241b719b1
|
[
"MIT"
] | 1 |
2021-01-29T11:45:45.000Z
|
2021-01-29T11:45:45.000Z
|
#! /usr/bin/python
from .solution import Solution
try:
import gurobipy
except ImportError:
print("Gurobi not found: error ignored to allow tests")
def variable_score_factory(sol: Solution, base_kernel: dict, config: dict):
if config.get("VARIABLE_RANKING"):
output = VariableRanking(sol, base_kernel)
else:
output = ReducedCostScoring(sol, base_kernel)
return output
class AbstactVariableScoring:
def __init__(self, solution: Solution, base_kernel: dict):
self.score = {k: 0 if base_kernel[k] else v for k, v in solution.vars.items()}
def get_value(self, var_name):
return self.score[var_name]
def success_update_score(self, curr_kernel, curr_bucket):
raise NotImplementedError
def failure_update_score(self, curr_kernel, curr_bucket):
raise NotImplementedError
class ReducedCostScoring(AbstactVariableScoring):
def success_update_score(self, curr_kernel, curr_bucket):
pass
def failure_update_score(self, curr_kernel, curr_bucket):
pass
class VariableRanking(AbstactVariableScoring):
def cb_update_score(self, name, value):
if value == 0:
self.score[name] += 0.1
else:
self.score[name] -= 0.1
def success_update_score(self, curr_kernel, curr_bucket):
for var in curr_bucket:
if curr_kernel[var]:
self.score[var] -= 15
else:
self.score[var] += 15
def failure_update_score(self, curr_kernel, curr_bucket):
for var in curr_bucket:
if curr_kernel[var]:
self.score[var] += 1
else:
self.score[var] -= 1
def callback_factory(scoring: AbstactVariableScoring):
if isinstance(scoring, VariableRanking):
output = __build_callback__(scoring)
else:
output = None
return output
def __build_callback__(scoring):
def callback(model, where):
if where == gurobipy.GRB.Callback.MIPSOL:
for var in model.getVars():
value = model.cbGetSolution(var)
scoring.cb_update_score(var.varName, value)
return callback
| 27.012346 | 86 | 0.65128 | 261 | 2,188 | 5.237548 | 0.279693 | 0.05267 | 0.076811 | 0.083394 | 0.326262 | 0.304316 | 0.304316 | 0.304316 | 0.298464 | 0.117045 | 0 | 0.007435 | 0.26234 | 2,188 | 80 | 87 | 27.35 | 0.839529 | 0.00777 | 0 | 0.375 | 0 | 0 | 0.028571 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.232143 | false | 0.035714 | 0.053571 | 0.017857 | 0.410714 | 0.017857 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 |
0
| 1 |
be18b88ab1937677b7e3d5583d09538c7f91bce2
| 2,460 |
py
|
Python
|
pdf2write.py
|
codeunik/stylus_labs_write_pdf_importer
|
25d7aa037647a86284c24527bda7b222cf95bb62
|
[
"MIT"
] | null | null | null |
pdf2write.py
|
codeunik/stylus_labs_write_pdf_importer
|
25d7aa037647a86284c24527bda7b222cf95bb62
|
[
"MIT"
] | null | null | null |
pdf2write.py
|
codeunik/stylus_labs_write_pdf_importer
|
25d7aa037647a86284c24527bda7b222cf95bb62
|
[
"MIT"
] | null | null | null |
import base64
import os
import sys
import PyPDF2
svg = '''<svg id="write-document" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink">
<rect id="write-doc-background" width="100%" height="100%" fill="#808080"/>
<defs id="write-defs">
<script type="text/writeconfig">
<int name="docFormatVersion" value="2" />
<int name="pageColor" value="-1" />
<int name="pageNum" value="0" />
<int name="ruleColor" value="0" />
<float name="marginLeft" value="0" />
<float name="xOffset" value="-380.701752" />
<float name="xRuling" value="0" />
<float name="yOffset" value="1536.84216" />
<float name="yRuling" value="0" />
</script>
</defs>
'''
pdf_path = sys.argv[1]
pdf = PyPDF2.PdfFileReader(pdf_path, "rb")
img_width = 720
n_pages = pdf.getNumPages() + 1
page = pdf.getPage(0)
width = page.mediaBox.getWidth()
height = page.mediaBox.getHeight()
aspect_ratio = height/width
img_height = int(aspect_ratio * img_width)
os.system('mkdir -p /tmp/pdf2write')
new_page_height = 0
for page in range(n_pages):
print(f"Processing {page}/{n_pages}", end='\r')
os.system(f'pdftoppm {pdf_path} /tmp/pdf2write/tmp{page} -png -f {page} -singlefile')
with open(f'/tmp/pdf2write/tmp{page}.png', 'rb') as f:
base64_data = base64.b64encode(f.read()).decode('utf-8')
tmp_svg = f'''<svg class="write-page" color-interpolation="linearRGB" x="10" y="{new_page_height+10}" width="{img_width}px" height="{img_height}px" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink">
<g class="write-content write-v3" width="{img_width}" height="{img_height}" xruling="0" yruling="0" marginLeft="0" papercolor="#FFFFFF" rulecolor="#00000000">
<g class="ruleline write-std-ruling write-scale-down" fill="none" stroke="none" stroke-width="1" shape-rendering="crispEdges" vector-effect="non-scaling-stroke">
<rect class="pagerect" fill="#FFFFFF" stroke="none" x="0" y="0" width="{img_width}" height="{img_height}" />
</g>
<image x="0" y="0" width="{img_width}" height="{img_height}" xlink:href="data:image/png;base64,{base64_data}"/>
</g>
</svg>'''
new_page_height += (img_height+10)
svg += tmp_svg
svg += '''</svg>'''
os.system('rm -rf /tmp/pdf2write')
with open(f'{os.path.dirname(pdf_path)}/{os.path.basename(pdf_path).split(".")[0]}.svg', 'w') as f:
f.write(svg)
os.system(f'gzip -S z {os.path.dirname(pdf_path)}/{os.path.basename(pdf_path).split(".")[0]}.svg')
| 37.846154 | 230 | 0.667073 | 379 | 2,460 | 4.240106 | 0.350923 | 0.030492 | 0.046671 | 0.029869 | 0.215308 | 0.187928 | 0.170504 | 0.170504 | 0.170504 | 0.170504 | 0 | 0.049564 | 0.114228 | 2,460 | 64 | 231 | 38.4375 | 0.68793 | 0 | 0 | 0.039216 | 0 | 0.196078 | 0.713008 | 0.231301 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.078431 | 0 | 0.078431 | 0.019608 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
0
| 1 |
be18cd8c90ebbd40ae9aadcbac8dd9bce504b9ec
| 2,462 |
py
|
Python
|
py_headless_daw/project/having_parameters.py
|
hq9000/py-headless-daw
|
33e08727c25d3f00b2556adf5f25c9f7ff4d4304
|
[
"MIT"
] | 22 |
2020-06-09T18:46:56.000Z
|
2021-09-28T02:11:42.000Z
|
py_headless_daw/project/having_parameters.py
|
hq9000/py-headless-daw
|
33e08727c25d3f00b2556adf5f25c9f7ff4d4304
|
[
"MIT"
] | 19 |
2020-06-03T06:34:57.000Z
|
2021-01-26T07:36:17.000Z
|
py_headless_daw/project/having_parameters.py
|
hq9000/py-headless-daw
|
33e08727c25d3f00b2556adf5f25c9f7ff4d4304
|
[
"MIT"
] | 1 |
2020-06-18T09:25:21.000Z
|
2020-06-18T09:25:21.000Z
|
from typing import Dict, List, cast
from py_headless_daw.project.parameter import Parameter, ParameterValueType, ParameterRangeType
class HavingParameters:
def __init__(self):
self._parameters: Dict[str, Parameter] = {}
super().__init__()
def has_parameter(self, name: str) -> bool:
return name in self._parameters
def add_parameter(self,
name: str,
value: ParameterValueType,
param_type: str,
value_range: ParameterRangeType):
if name in self._parameters:
raise Exception('parameter named ' + name + ' already added to this object')
parameter = Parameter(name, value, param_type, value_range)
self._parameters[name] = parameter
def add_parameter_object(self, parameter: Parameter) -> None:
self._parameters[parameter.name] = parameter
def get_parameter(self, name: str) -> Parameter:
for parameter in self.parameters:
if parameter.name == name:
return parameter
list_of_names: List[str] = [p.name for p in self.parameters]
# noinspection PyTypeChecker
available_names: List[str] = cast(List[str], list_of_names)
raise Exception('parameter named ' + name + ' not found. Available: ' + ', '.join(available_names))
def get_parameter_value(self, name: str) -> ParameterValueType:
param = self.get_parameter(name)
return param.value
def get_float_parameter_value(self, name: str) -> float:
param = self.get_parameter(name)
if param.type != Parameter.TYPE_FLOAT:
raise ValueError(f"parameter {name} was expected to be float (error: f009d0ef)")
value = self.get_parameter_value(name)
cast_value = cast(float, value)
return cast_value
def get_enum_parameter_value(self, name: str) -> str:
param = self.get_parameter(name)
if param.type != Parameter.TYPE_ENUM:
raise ValueError(f"parameter {name} was expected to be enum (error: 80a1d180)")
value = self.get_parameter_value(name)
cast_value = cast(str, value)
return cast_value
def set_parameter_value(self, name: str, value: ParameterValueType):
param = self.get_parameter(name)
param.value = value
@property
def parameters(self) -> List[Parameter]:
return list(self._parameters.values())
| 35.681159 | 107 | 0.644598 | 283 | 2,462 | 5.413428 | 0.229682 | 0.076371 | 0.050261 | 0.057441 | 0.399478 | 0.269582 | 0.177546 | 0.177546 | 0.177546 | 0.063969 | 0 | 0.005513 | 0.263201 | 2,462 | 68 | 108 | 36.205882 | 0.83903 | 0.010561 | 0 | 0.163265 | 0 | 0 | 0.083402 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.204082 | false | 0 | 0.040816 | 0.040816 | 0.387755 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 |
0
| 1 |
be21dcede1ec1af84c0ccb9e8297bd042d23271a
| 1,712 |
py
|
Python
|
CondTools/BeamSpot/test/BeamSpotRcdPrinter_cfg.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 13 |
2015-11-30T15:49:45.000Z
|
2022-02-08T16:11:30.000Z
|
CondTools/BeamSpot/test/BeamSpotRcdPrinter_cfg.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 640 |
2015-02-11T18:55:47.000Z
|
2022-03-31T14:12:23.000Z
|
CondTools/BeamSpot/test/BeamSpotRcdPrinter_cfg.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 51 |
2015-08-11T21:01:40.000Z
|
2022-03-30T07:31:34.000Z
|
import FWCore.ParameterSet.Config as cms
import os
process = cms.Process("summary")
process.MessageLogger = cms.Service( "MessageLogger",
debugModules = cms.untracked.vstring( "*" ),
cout = cms.untracked.PSet( threshold = cms.untracked.string( "DEBUG" ) ),
destinations = cms.untracked.vstring( "cout" )
)
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(1)
)
process.source = cms.Source("EmptySource",
numberEventsInRun = cms.untracked.uint32(1),
firstRun = cms.untracked.uint32(1)
)
process.load("CondCore.CondDB.CondDB_cfi")
process.load("CondTools.BeamSpot.BeamSpotRcdPrinter_cfi")
### 2018 Prompt
process.BeamSpotRcdPrinter.tagName = "BeamSpotObjects_PCL_byLumi_v0_prompt"
process.BeamSpotRcdPrinter.startIOV = 1350646955507767
process.BeamSpotRcdPrinter.endIOV = 1406876667347162
process.BeamSpotRcdPrinter.output = "summary2018_Prompt.txt"
### 2017 ReReco
#process.BeamSpotRcdPrinter.tagName = "BeamSpotObjects_LumiBased_v4_offline"
#process.BeamSpotRcdPrinter.startIOV = 1275820035276801
#process.BeamSpotRcdPrinter.endIOV = 1316235677532161
### 2018 ABC ReReco
#process.BeamSpotRcdPrinter.tagName = "BeamSpotObjects_LumiBased_v4_offline"
#process.BeamSpotRcdPrinter.startIOV = 1354018504835073
#process.BeamSpotRcdPrinter.endIOV = 1374668707594734
### 2018D Prompt
#process.BeamSpotRcdPrinter.tagName = "BeamSpotObjects_PCL_byLumi_v0_prompt"
#process.BeamSpotRcdPrinter.startIOV = 1377280047710242
#process.BeamSpotRcdPrinter.endIOV = 1406876667347162
process.p = cms.Path(process.BeamSpotRcdPrinter)
| 38.044444 | 110 | 0.733645 | 155 | 1,712 | 7.993548 | 0.412903 | 0.282486 | 0.100081 | 0.151735 | 0.421308 | 0.33414 | 0.33414 | 0.33414 | 0.33414 | 0.33414 | 0 | 0.113221 | 0.169393 | 1,712 | 44 | 111 | 38.909091 | 0.758087 | 0.352804 | 0 | 0 | 0 | 0 | 0.152855 | 0.115101 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.090909 | 0 | 0.090909 | 0 | 0 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
0
| 1 |
be237e880ccb11dff8fac9488a75005cce1dd897
| 381 |
py
|
Python
|
django/authentication/api/urls.py
|
NAVANEETHA-BS/Django-Reactjs-Redux-Register-login-logout-Homepage--Project
|
f29ed189b988a2d46d76b3c58cf77d1ed58ca64d
|
[
"MIT"
] | 2 |
2021-05-13T18:02:00.000Z
|
2022-03-30T19:53:38.000Z
|
django/authentication/api/urls.py
|
NAVANEETHA-BS/Django-Reactjs-Redux-Register-login-logout-Homepage--Project
|
f29ed189b988a2d46d76b3c58cf77d1ed58ca64d
|
[
"MIT"
] | null | null | null |
django/authentication/api/urls.py
|
NAVANEETHA-BS/Django-Reactjs-Redux-Register-login-logout-Homepage--Project
|
f29ed189b988a2d46d76b3c58cf77d1ed58ca64d
|
[
"MIT"
] | null | null | null |
from django.urls import path
from rest_framework_simplejwt.views import (
TokenObtainPairView,
TokenRefreshView,
TokenVerifyView
)
urlpatterns = [
path('obtain/', TokenObtainPairView.as_view(), name='token_obtain_pair'),
path('refresh/', TokenRefreshView.as_view(), name='token_refresh'),
path('verify/', TokenVerifyView.as_view(), name='token_verify'),
]
| 29.307692 | 77 | 0.734908 | 40 | 381 | 6.775 | 0.525 | 0.066421 | 0.110701 | 0.166052 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.131234 | 381 | 12 | 78 | 31.75 | 0.818731 | 0 | 0 | 0 | 0 | 0 | 0.167979 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.181818 | 0 | 0.181818 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
0
| 1 |
be23b9cced5e521037b8711e7bde05f5d17925f0
| 7,257 |
py
|
Python
|
yue/core/explorer/ftpsource.py
|
nsetzer/YueMusicPlayer
|
feaf6fe5c046b1a7f6b7774d4e86a2fbb1e431cf
|
[
"MIT"
] | null | null | null |
yue/core/explorer/ftpsource.py
|
nsetzer/YueMusicPlayer
|
feaf6fe5c046b1a7f6b7774d4e86a2fbb1e431cf
|
[
"MIT"
] | null | null | null |
yue/core/explorer/ftpsource.py
|
nsetzer/YueMusicPlayer
|
feaf6fe5c046b1a7f6b7774d4e86a2fbb1e431cf
|
[
"MIT"
] | 1 |
2019-03-06T14:29:27.000Z
|
2019-03-06T14:29:27.000Z
|
from ftplib import FTP,error_perm, all_errors
import posixpath
from io import BytesIO,SEEK_SET
from .source import DataSource
import sys
import re
reftp = re.compile('(ssh|ftp)\:\/\/(([^@:]+)?:?([^@]+)?@)?([^:]+)(:[0-9]+)?\/(.*)')
def parseFTPurl( url ):
m = reftp.match( url )
if m:
g = m.groups()
result = {
"mode" : g[0],
"username" : g[2] or "",
"password" : g[3] or "",
"hostname" : g[4] or "",
"port" : int(g[5][1:]) if g[5] else 0,
"path" : g[6] or "/",
}
if result['port'] == 0:
if result['mode'] == ssh:
result['port'] = 22
else:
result['port'] = 21 # ftp port default
return result
raise ValueError("invalid: %s"%url)
def utf8_fix(s):
return ''.join([ a if ord(a)<128 else "%02X"%ord(a) for a in s])
class FTPWriter(object):
"""docstring for FTPWriter"""
def __init__(self, ftp, path):
super(FTPWriter, self).__init__()
self.ftp = ftp
self.path = path
self.file = BytesIO()
def write(self,data):
return self.file.write(data)
def seek(self,pos,whence=SEEK_SET):
return self.file.seek(pos,whence)
def tell(self):
return self.file.tell()
def close(self):
self.file.seek(0)
text = "STOR " + utf8_fix(self.path)
self.ftp.storbinary(text, self.file)
def __enter__(self):
return self
def __exit__(self,typ,val,tb):
if typ is None:
self.close()
class FTPReader(object):
"""docstring for FTPWriter"""
def __init__(self, ftp, path):
super(FTPReader, self).__init__()
self.ftp = ftp
self.path = path
self.file = BytesIO()
# open the file
text = "RETR " + utf8_fix(self.path)
self.ftp.retrbinary(text, self.file.write)
self.file.seek(0)
def read(self,n=None):
return self.file.read(n)
def seek(self,pos,whence=SEEK_SET):
return self.file.seek(pos,whence)
def tell(self):
return self.file.tell()
def close(self):
self.file.close()
def __enter__(self):
return self
def __exit__(self,typ,val,tb):
if typ is None:
self.close()
class FTPSource(DataSource):
"""
there is some sort of problem with utf-8/latin-1 and ftplib
storbinary must accepts a STRING, since it builds a cmd and add
the CRLF to the input argument using the plus operator.
the command fails when given unicode text (ord > 127) and also
fails whenm given a byte string.
"""
# TODO: turn this into a directory generator
# which first loads the directory, then loops over
# loaded items.
# TODO: on windows we need a way to view available
# drive letters
def __init__(self, host, port, username="", password=""):
super(FTPSource, self).__init__()
self.ftp = FTP()
self.ftp.connect(host,port)
self.ftp.login(username,password)
self.hostname = "%s:%d"%(host,port)
def root(self):
return "/"
def close(self):
try:
self.ftp.quit()
except all_errors as e:
sys.stderr.write("Error Closing FTP connection\n")
sys.stderr.write("%s\n"%e)
super().close()
def fix(self, path):
return utf8_fix(path)
def join(self,*args):
return posixpath.join(*args)
def breakpath(self,path):
return [ x for x in path.replace("/","\\").split("\\") if x ]
def relpath(self,path,base):
return posixpath.relpath(path,base)
def normpath(self,path,root=None):
if root and not path.startswith("/"):
path = posixpath.join(root,path)
return posixpath.normpath( path )
def listdir(self,path):
return self.ftp.nlst(path)
def parent(self,path):
# TODO: if path is C:\\ return empty string ?
# empty string returns drives
p,_ = posixpath.split(path)
return p
def move(self,oldpath,newpath):
self.ftp.rename(oldpath,newpath)
def delete(self,path):
# todo support removing directory rmdir()
path = utf8_fix(path)
if self.exists( path ):
if self.isdir(path):
try:
self.ftp.rmd(path)
except Exception as e:
print("ftp delete error: %s"%e)
else:
try:
self.ftp.delete(path)
except Exception as e:
print("ftp delete error: %s"%e)
def open(self,path,mode):
if mode=="wb":
return FTPWriter(self.ftp,path)
elif mode=="rb":
return FTPReader(self.ftp,path)
raise NotImplementedError(mode)
def exists(self,path):
path = utf8_fix(path)
p,n=posixpath.split(path)
lst = set(self.listdir(p))
return n in lst
def isdir(self,path):
path = utf8_fix(path)
try:
return self.ftp.size(path) is None
except error_perm:
# TODO: to think about more later,
# under my use-case, I'm only asking if a path is a directory
# if I Already think it exists. Under the current FTP impl
# ftp.size() fails for various reasons unless the file exists
# and is an accessable file. I can infer that a failure to
# determine the size means that the path is a directory,
# but this does not hold true under other use cases.
# I can't cache listdir calls, but if I could, then I could
# use that to determine if the file exists
return True#self.exists( path )
def mkdir(self,path):
# this is a really ugly quick and dirty solution
path = utf8_fix(path)
if not self.exists(path):
p = self.parent( path )
try:
if not self.exists(p):
self.ftp.mkd( p )
self.ftp.mkd(path)
except Exception as e:
print("ftp mkd error: %s"%e)
def split(self,path):
return posixpath.split(path)
def splitext(self,path):
return posixpath.splitext(path)
def stat(self,path):
try:
size = self.ftp.size(path)
except error_perm:
size = None
result = {
"isDir" : size is None,
"isLink": False,
"mtime" : 0,
"ctime" : 0,
"size" : size or 0,
"name" : self.split(path)[1],
"mode" : 0
}
return result
def stat_fast(self,path):
# not fast for thus file system :(
try:
size = self.ftp.size(path)
except error_perm:
size = None
result = {
"name" : self.split(path)[1],
"size" : size or 0,
"isDir" : size is None,
"isLink" : False,
}
return result
def chmod(self,path,mode):
print("chmod not implemented")
def getExportPath(self,path):
return self.hostname+path
| 27.384906 | 83 | 0.539893 | 929 | 7,257 | 4.153929 | 0.276642 | 0.038093 | 0.021767 | 0.015548 | 0.253952 | 0.235812 | 0.193314 | 0.18554 | 0.18554 | 0.18554 | 0 | 0.009011 | 0.342428 | 7,257 | 264 | 84 | 27.488636 | 0.799665 | 0.166598 | 0 | 0.38674 | 0 | 0 | 0.053309 | 0.010194 | 0 | 0 | 0 | 0.003788 | 0 | 1 | 0.209945 | false | 0.016575 | 0.033149 | 0.099448 | 0.414365 | 0.022099 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 |
0
| 1 |
be2a32ef4dd37c381a36c7a58f2812962caeb4d5
| 502 |
py
|
Python
|
logger_application/logger.py
|
swatishayna/OnlineEDAAutomation
|
a1bfe8b1dee51a4872529a98f6e1136922329e3e
|
[
"MIT"
] | 1 |
2022-03-24T20:26:44.000Z
|
2022-03-24T20:26:44.000Z
|
logger_application/logger.py
|
surajaiswal13/OnlineEDAAutomation
|
a1bfe8b1dee51a4872529a98f6e1136922329e3e
|
[
"MIT"
] | null | null | null |
logger_application/logger.py
|
surajaiswal13/OnlineEDAAutomation
|
a1bfe8b1dee51a4872529a98f6e1136922329e3e
|
[
"MIT"
] | 2 |
2022-02-08T16:35:32.000Z
|
2022-03-04T06:56:54.000Z
|
from datetime import datetime
from src.utils import uploaded_file
import os
class App_Logger:
def __init__(self):
pass
def log(self, file_object, email, log_message, log_writer_id):
self.now = datetime.now()
self.date = self.now.date()
self.current_time = self.now.strftime("%H:%M:%S")
file_object.write(
email+ "_eda_" + log_writer_id + "\t\t" +str(self.date) + "/" + str(self.current_time) + "\t\t" +email+ "\t\t" +log_message +"\n")
| 27.888889 | 143 | 0.621514 | 73 | 502 | 4.027397 | 0.465753 | 0.071429 | 0.07483 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.227092 | 502 | 17 | 144 | 29.529412 | 0.757732 | 0 | 0 | 0 | 0 | 0 | 0.055888 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.166667 | false | 0.083333 | 0.25 | 0 | 0.5 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 |
0
| 1 |
076c3b7d76dce4361980237fd24f6e7d24b9f302
| 368 |
py
|
Python
|
utils/scripts/OOOlevelGen/src/sprites/__init__.py
|
fullscreennl/monkeyswipe
|
c56192e202674dd5ab18023f6cf14cf51e95fbd0
|
[
"MIT"
] | null | null | null |
utils/scripts/OOOlevelGen/src/sprites/__init__.py
|
fullscreennl/monkeyswipe
|
c56192e202674dd5ab18023f6cf14cf51e95fbd0
|
[
"MIT"
] | null | null | null |
utils/scripts/OOOlevelGen/src/sprites/__init__.py
|
fullscreennl/monkeyswipe
|
c56192e202674dd5ab18023f6cf14cf51e95fbd0
|
[
"MIT"
] | null | null | null |
__all__ = ['EnemyBucketWithStar',
'Nut',
'Beam',
'Enemy',
'Friend',
'Hero',
'Launcher',
'Rotor',
'SpikeyBuddy',
'Star',
'Wizard',
'EnemyEquipedRotor',
'CyclingEnemyObject',
'Joints',
'Bomb',
'Contacts']
| 21.647059 | 33 | 0.366848 | 17 | 368 | 7.705882 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.497283 | 368 | 16 | 34 | 23 | 0.708108 | 0 | 0 | 0 | 0 | 0 | 0.347826 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
0
| 1 |
076ea8e320bea4958c4967806ffb3361e0b72568
| 2,400 |
py
|
Python
|
Imaging/Core/Testing/Python/TestHSVToRGB.py
|
forestGzh/VTK
|
bc98327275bd5cfa95c5825f80a2755a458b6da8
|
[
"BSD-3-Clause"
] | 1,755 |
2015-01-03T06:55:00.000Z
|
2022-03-29T05:23:26.000Z
|
Imaging/Core/Testing/Python/TestHSVToRGB.py
|
forestGzh/VTK
|
bc98327275bd5cfa95c5825f80a2755a458b6da8
|
[
"BSD-3-Clause"
] | 29 |
2015-04-23T20:58:30.000Z
|
2022-03-02T16:16:42.000Z
|
Imaging/Core/Testing/Python/TestHSVToRGB.py
|
forestGzh/VTK
|
bc98327275bd5cfa95c5825f80a2755a458b6da8
|
[
"BSD-3-Clause"
] | 1,044 |
2015-01-05T22:48:27.000Z
|
2022-03-31T02:38:26.000Z
|
#!/usr/bin/env python
import vtk
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
# Use the painter to draw using colors.
# This is not a pipeline object. It will support pipeline objects.
# Please do not use this object directly.
imageCanvas = vtk.vtkImageCanvasSource2D()
imageCanvas.SetNumberOfScalarComponents(3)
imageCanvas.SetScalarTypeToUnsignedChar()
imageCanvas.SetExtent(0,320,0,320,0,0)
imageCanvas.SetDrawColor(0,0,0)
imageCanvas.FillBox(0,511,0,511)
# r, g, b
imageCanvas.SetDrawColor(255,0,0)
imageCanvas.FillBox(0,50,0,100)
imageCanvas.SetDrawColor(128,128,0)
imageCanvas.FillBox(50,100,0,100)
imageCanvas.SetDrawColor(0,255,0)
imageCanvas.FillBox(100,150,0,100)
imageCanvas.SetDrawColor(0,128,128)
imageCanvas.FillBox(150,200,0,100)
imageCanvas.SetDrawColor(0,0,255)
imageCanvas.FillBox(200,250,0,100)
imageCanvas.SetDrawColor(128,0,128)
imageCanvas.FillBox(250,300,0,100)
# intensity scale
imageCanvas.SetDrawColor(5,5,5)
imageCanvas.FillBox(0,50,110,210)
imageCanvas.SetDrawColor(55,55,55)
imageCanvas.FillBox(50,100,110,210)
imageCanvas.SetDrawColor(105,105,105)
imageCanvas.FillBox(100,150,110,210)
imageCanvas.SetDrawColor(155,155,155)
imageCanvas.FillBox(150,200,110,210)
imageCanvas.SetDrawColor(205,205,205)
imageCanvas.FillBox(200,250,110,210)
imageCanvas.SetDrawColor(255,255,255)
imageCanvas.FillBox(250,300,110,210)
# saturation scale
imageCanvas.SetDrawColor(245,0,0)
imageCanvas.FillBox(0,50,220,320)
imageCanvas.SetDrawColor(213,16,16)
imageCanvas.FillBox(50,100,220,320)
imageCanvas.SetDrawColor(181,32,32)
imageCanvas.FillBox(100,150,220,320)
imageCanvas.SetDrawColor(149,48,48)
imageCanvas.FillBox(150,200,220,320)
imageCanvas.SetDrawColor(117,64,64)
imageCanvas.FillBox(200,250,220,320)
imageCanvas.SetDrawColor(85,80,80)
imageCanvas.FillBox(250,300,220,320)
convert = vtk.vtkImageRGBToHSV()
convert.SetInputConnection(imageCanvas.GetOutputPort())
convertBack = vtk.vtkImageHSVToRGB()
convertBack.SetInputConnection(convert.GetOutputPort())
cast = vtk.vtkImageCast()
cast.SetInputConnection(convertBack.GetOutputPort())
cast.SetOutputScalarTypeToFloat()
cast.ReleaseDataFlagOff()
viewer = vtk.vtkImageViewer()
viewer.SetInputConnection(convertBack.GetOutputPort())
#viewer SetInputConnection [imageCanvas GetOutputPort]
viewer.SetColorWindow(256)
viewer.SetColorLevel(127.5)
viewer.SetSize(320,320)
viewer.Render()
# --- end of script --
| 34.285714 | 67 | 0.814583 | 325 | 2,400 | 6.009231 | 0.298462 | 0.223758 | 0.048643 | 0.069124 | 0.108039 | 0.023554 | 0 | 0 | 0 | 0 | 0 | 0.153102 | 0.052917 | 2,400 | 69 | 68 | 34.782609 | 0.706115 | 0.11625 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.033898 | 0 | 0.033898 | 0 | 0 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
0
| 1 |
076eec8de4f676b9d586492c7ab7750df189a96a
| 296 |
py
|
Python
|
kelas_2b/echa.py
|
barizraihan/belajarpython
|
57df4c939600dd34a519599d6c78178bfb55063b
|
[
"MIT"
] | null | null | null |
kelas_2b/echa.py
|
barizraihan/belajarpython
|
57df4c939600dd34a519599d6c78178bfb55063b
|
[
"MIT"
] | null | null | null |
kelas_2b/echa.py
|
barizraihan/belajarpython
|
57df4c939600dd34a519599d6c78178bfb55063b
|
[
"MIT"
] | null | null | null |
import csv
class echa:
def werehousing(self):
with open('kelas_2b/echa.csv', 'r') as csvfile:
csv_reader = csv.reader(csvfile, delimiter=',')
for row in csv_reader:
print("menampilkan data barang:", row[0], row[1], row[2], row[3], row[4])
| 32.888889 | 93 | 0.567568 | 41 | 296 | 4.02439 | 0.682927 | 0.163636 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.028302 | 0.283784 | 296 | 8 | 94 | 37 | 0.75 | 0 | 0 | 0 | 0 | 0 | 0.14527 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.142857 | false | 0 | 0.142857 | 0 | 0.428571 | 0.142857 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
0
| 1 |
076f84eca9f11a3725b25d5cf7a8fa60fb6dd720
| 3,399 |
py
|
Python
|
tests/test_handler_surface_distance.py
|
dyollb/MONAI
|
9084c452c48095c82c71d4391b3684006e5a3c56
|
[
"Apache-2.0"
] | 2,971 |
2019-10-16T23:53:16.000Z
|
2022-03-31T20:58:24.000Z
|
tests/test_handler_surface_distance.py
|
dyollb/MONAI
|
9084c452c48095c82c71d4391b3684006e5a3c56
|
[
"Apache-2.0"
] | 2,851 |
2020-01-10T16:23:44.000Z
|
2022-03-31T22:14:53.000Z
|
tests/test_handler_surface_distance.py
|
dyollb/MONAI
|
9084c452c48095c82c71d4391b3684006e5a3c56
|
[
"Apache-2.0"
] | 614 |
2020-01-14T19:18:01.000Z
|
2022-03-31T14:06:14.000Z
|
# Copyright 2020 - 2021 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from typing import Tuple
import numpy as np
import torch
from ignite.engine import Engine
from monai.handlers import SurfaceDistance
def create_spherical_seg_3d(
radius: float = 20.0, centre: Tuple[int, int, int] = (49, 49, 49), im_shape: Tuple[int, int, int] = (99, 99, 99)
) -> np.ndarray:
"""
Return a 3D image with a sphere inside. Voxel values will be
1 inside the sphere, and 0 elsewhere.
Args:
radius: radius of sphere (in terms of number of voxels, can be partial)
centre: location of sphere centre.
im_shape: shape of image to create
See also:
:py:meth:`~create_test_image_3d`
"""
# Create image
image = np.zeros(im_shape, dtype=np.int32)
spy, spx, spz = np.ogrid[
-centre[0] : im_shape[0] - centre[0], -centre[1] : im_shape[1] - centre[1], -centre[2] : im_shape[2] - centre[2]
]
circle = (spx * spx + spy * spy + spz * spz) <= radius * radius
image[circle] = 1
image[~circle] = 0
return image
sampler_sphere = torch.Tensor(create_spherical_seg_3d(radius=20, centre=(20, 20, 20))).unsqueeze(0).unsqueeze(0)
# test input a list of channel-first tensor
sampler_sphere_gt = [torch.Tensor(create_spherical_seg_3d(radius=20, centre=(10, 20, 20))).unsqueeze(0)]
sampler_sphere_zeros = torch.zeros_like(sampler_sphere)
TEST_SAMPLE_1 = [sampler_sphere, sampler_sphere_gt]
TEST_SAMPLE_2 = [sampler_sphere_gt, sampler_sphere_gt]
TEST_SAMPLE_3 = [sampler_sphere_zeros, sampler_sphere_gt]
TEST_SAMPLE_4 = [sampler_sphere_zeros, sampler_sphere_zeros]
class TestHandlerSurfaceDistance(unittest.TestCase):
# TODO test multi node Surface Distance
def test_compute(self):
sur_metric = SurfaceDistance(include_background=True)
def _val_func(engine, batch):
pass
engine = Engine(_val_func)
sur_metric.attach(engine, "surface_distance")
y_pred, y = TEST_SAMPLE_1
sur_metric.update([y_pred, y])
self.assertAlmostEqual(sur_metric.compute(), 4.17133, places=4)
y_pred, y = TEST_SAMPLE_2
sur_metric.update([y_pred, y])
self.assertAlmostEqual(sur_metric.compute(), 2.08566, places=4)
y_pred, y = TEST_SAMPLE_3
sur_metric.update([y_pred, y])
self.assertAlmostEqual(sur_metric.compute(), float("inf"))
y_pred, y = TEST_SAMPLE_4
sur_metric.update([y_pred, y])
self.assertAlmostEqual(sur_metric.compute(), float("inf"))
def test_shape_mismatch(self):
sur_metric = SurfaceDistance(include_background=True)
with self.assertRaises((AssertionError, ValueError)):
y_pred = TEST_SAMPLE_1[0]
y = torch.ones((1, 1, 10, 10, 10))
sur_metric.update([y_pred, y])
if __name__ == "__main__":
unittest.main()
| 35.778947 | 120 | 0.692262 | 490 | 3,399 | 4.602041 | 0.344898 | 0.06918 | 0.023947 | 0.035477 | 0.303769 | 0.22306 | 0.213747 | 0.149889 | 0.149889 | 0.109978 | 0 | 0.036229 | 0.204178 | 3,399 | 94 | 121 | 36.159574 | 0.797412 | 0.283024 | 0 | 0.176471 | 0 | 0 | 0.0126 | 0 | 0 | 0 | 0 | 0.010638 | 0.098039 | 1 | 0.078431 | false | 0.019608 | 0.117647 | 0 | 0.235294 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
0
| 1 |
0773947b769d5f943efc051b2beaf2ee562da724
| 1,231 |
py
|
Python
|
AppImageBuilder/commands/file.py
|
gouchi/appimage-builder
|
40e9851c573179e066af116fb906e9cad8099b59
|
[
"MIT"
] | null | null | null |
AppImageBuilder/commands/file.py
|
gouchi/appimage-builder
|
40e9851c573179e066af116fb906e9cad8099b59
|
[
"MIT"
] | null | null | null |
AppImageBuilder/commands/file.py
|
gouchi/appimage-builder
|
40e9851c573179e066af116fb906e9cad8099b59
|
[
"MIT"
] | null | null | null |
# Copyright 2020 Alexis Lopez Zubieta
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
import os
from .command import Command
class FileError(RuntimeError):
pass
class File(Command):
def __init__(self):
super().__init__('file')
self.log_stdout = False
self.log_command = False
def query(self, path):
self._run(['file', '-b', '--exclude', 'ascii', path])
if self.return_code != 0:
raise FileError('\n'.join(self.stderr))
return '\n'.join(self.stdout)
def is_executable_elf(self, path):
output = self.query(path)
result = ('ELF' in output) and ('executable' in output)
return result
| 31.564103 | 80 | 0.685621 | 166 | 1,231 | 5 | 0.554217 | 0.066265 | 0.031325 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.005225 | 0.222583 | 1,231 | 38 | 81 | 32.394737 | 0.862069 | 0.490658 | 0 | 0 | 0 | 0 | 0.066775 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.166667 | false | 0.055556 | 0.111111 | 0 | 0.5 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 |
0
| 1 |
077860d7dfef7192b10ddd84d4a9115cb45934f6
| 290 |
py
|
Python
|
config.py
|
Pasmikh/quiz_please_bot
|
2b619b359d8021be57b404525013c53403d6cde1
|
[
"MIT"
] | null | null | null |
config.py
|
Pasmikh/quiz_please_bot
|
2b619b359d8021be57b404525013c53403d6cde1
|
[
"MIT"
] | null | null | null |
config.py
|
Pasmikh/quiz_please_bot
|
2b619b359d8021be57b404525013c53403d6cde1
|
[
"MIT"
] | null | null | null |
days_of_week = ['Monday','Tuesday','Wednesday','Thursday','Friday','Saturday', 'Sunday']
operation = ''
options = ['Info', 'Check-in/Out', 'Edit games', 'Back']
admins = ['admin1_telegram_nickname', 'admin2_telegram_nickname']
avail_days = []
TOKEN = 'bot_token'
group_id = id_of_group_chat
| 41.428571 | 88 | 0.713793 | 37 | 290 | 5.27027 | 0.810811 | 0.164103 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.007576 | 0.089655 | 290 | 7 | 89 | 41.428571 | 0.731061 | 0 | 0 | 0 | 0 | 0 | 0.47079 | 0.164948 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
0
| 1 |
077ab159d3a90c5c7c3094919ba408b1a2cadaa4
| 663 |
py
|
Python
|
tests/test_missing_process.py
|
ricklupton/sphinx_probs_rdf
|
bcae27a37162c1a4c4b329af6759a0b5b52cab7a
|
[
"MIT"
] | 1 |
2021-07-31T10:06:50.000Z
|
2021-07-31T10:06:50.000Z
|
tests/test_missing_process.py
|
ricklupton/sphinx_probs_rdf
|
bcae27a37162c1a4c4b329af6759a0b5b52cab7a
|
[
"MIT"
] | 1 |
2021-05-05T18:15:48.000Z
|
2021-05-05T18:15:48.000Z
|
tests/test_missing_process.py
|
ricklupton/sphinx_probs_rdf
|
bcae27a37162c1a4c4b329af6759a0b5b52cab7a
|
[
"MIT"
] | null | null | null |
import pytest
from rdflib import Graph, Namespace, Literal
from rdflib.namespace import RDF, RDFS
from sphinx_probs_rdf.directives import PROBS
SYS = Namespace("http://example.org/system/")
@pytest.mark.sphinx(
'probs_rdf', testroot='missing',
confoverrides={'probs_rdf_system_prefix': str(SYS)})
def test_builder_reports_warning_for_missing_process(app, status, warning):
app.builder.build_all()
assert "build succeeded" not in status.getvalue()
warnings = warning.getvalue().strip()
assert 'WARNING: Requested child "http://example.org/system/Missing" of "http://example.org/system/ErrorMissingProcess" is not a Process' in warnings
| 36.833333 | 153 | 0.764706 | 88 | 663 | 5.613636 | 0.534091 | 0.048583 | 0.08502 | 0.121457 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.120664 | 663 | 17 | 154 | 39 | 0.847341 | 0 | 0 | 0 | 0 | 0.076923 | 0.313725 | 0.034691 | 0 | 0 | 0 | 0 | 0.153846 | 1 | 0.076923 | false | 0 | 0.307692 | 0 | 0.384615 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 |
0
| 1 |
0785423db820435be27b39e1842db52b66a25a8e
| 2,953 |
py
|
Python
|
tiktorch/server/session/process.py
|
FynnBe/tiktorch
|
60c6fa9700e7ff73e44338e8755c56c6e8846f2f
|
[
"MIT"
] | null | null | null |
tiktorch/server/session/process.py
|
FynnBe/tiktorch
|
60c6fa9700e7ff73e44338e8755c56c6e8846f2f
|
[
"MIT"
] | null | null | null |
tiktorch/server/session/process.py
|
FynnBe/tiktorch
|
60c6fa9700e7ff73e44338e8755c56c6e8846f2f
|
[
"MIT"
] | null | null | null |
import dataclasses
import io
import multiprocessing as _mp
import uuid
import zipfile
from concurrent.futures import Future
from multiprocessing.connection import Connection
from typing import List, Optional, Tuple
import numpy
from tiktorch import log
from tiktorch.rpc import Shutdown
from tiktorch.rpc import mp as _mp_rpc
from tiktorch.rpc.mp import MPServer
from tiktorch.server.reader import eval_model_zip
from .backend import base
from .rpc_interface import IRPCModelSession
@dataclasses.dataclass
class ModelInfo:
# TODO: Test for model info
name: str
input_axes: str
output_axes: str
valid_shapes: List[List[Tuple[str, int]]]
halo: List[Tuple[str, int]]
offset: List[Tuple[str, int]]
scale: List[Tuple[str, float]]
class ModelSessionProcess(IRPCModelSession):
def __init__(self, model_zip: bytes, devices: List[str]) -> None:
with zipfile.ZipFile(io.BytesIO(model_zip)) as model_file:
self._model = eval_model_zip(model_file, devices)
self._datasets = {}
self._worker = base.SessionBackend(self._model)
def forward(self, input_tensor: numpy.ndarray) -> Future:
res = self._worker.forward(input_tensor)
return res
def create_dataset(self, mean, stddev):
id_ = uuid.uuid4().hex
self._datasets[id_] = {"mean": mean, "stddev": stddev}
return id_
def get_model_info(self) -> ModelInfo:
return ModelInfo(
self._model.name,
self._model.input_axes,
self._model.output_axes,
valid_shapes=[self._model.input_shape],
halo=self._model.halo,
scale=self._model.scale,
offset=self._model.offset,
)
def shutdown(self) -> Shutdown:
self._worker.shutdown()
return Shutdown()
def _run_model_session_process(
conn: Connection, model_zip: bytes, devices: List[str], log_queue: Optional[_mp.Queue] = None
):
try:
# from: https://github.com/pytorch/pytorch/issues/973#issuecomment-346405667
import resource
rlimit = resource.getrlimit(resource.RLIMIT_NOFILE)
resource.setrlimit(resource.RLIMIT_NOFILE, (4096, rlimit[1]))
except ModuleNotFoundError:
pass # probably running on windows
if log_queue:
log.configure(log_queue)
session_proc = ModelSessionProcess(model_zip, devices)
srv = MPServer(session_proc, conn)
srv.listen()
def start_model_session_process(
model_zip: bytes, devices: List[str], log_queue: Optional[_mp.Queue] = None
) -> Tuple[_mp.Process, IRPCModelSession]:
client_conn, server_conn = _mp.Pipe()
proc = _mp.Process(
target=_run_model_session_process,
name="ModelSessionProcess",
kwargs={"conn": server_conn, "devices": devices, "log_queue": log_queue, "model_zip": model_zip},
)
proc.start()
return proc, _mp_rpc.create_client(IRPCModelSession, client_conn)
| 30.132653 | 105 | 0.691162 | 364 | 2,953 | 5.379121 | 0.318681 | 0.045965 | 0.024515 | 0.022983 | 0.068948 | 0.068948 | 0.055158 | 0.055158 | 0.055158 | 0.055158 | 0 | 0.007735 | 0.211988 | 2,953 | 97 | 106 | 30.443299 | 0.833691 | 0.043007 | 0 | 0 | 0 | 0 | 0.02056 | 0 | 0 | 0 | 0 | 0.010309 | 0 | 1 | 0.092105 | false | 0.013158 | 0.223684 | 0.013158 | 0.5 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
0
| 1 |
0797199eb44c9067c6481782c2b094efbd8e10a6
| 3,917 |
py
|
Python
|
G5/DerivedData/ParameterProbing/checkme.py
|
shooking/ZoomPedalFun
|
7b9f5f4441cfe42e988e06cf6b98603c21ac2466
|
[
"CC0-1.0"
] | 9 |
2021-02-15T00:05:32.000Z
|
2022-01-24T14:01:46.000Z
|
G5/DerivedData/ParameterProbing/checkme.py
|
shooking/ZoomPedalFun
|
7b9f5f4441cfe42e988e06cf6b98603c21ac2466
|
[
"CC0-1.0"
] | 13 |
2021-08-23T02:07:26.000Z
|
2022-02-16T16:55:00.000Z
|
G5/DerivedData/ParameterProbing/checkme.py
|
shooking/ZoomPedalFun
|
7b9f5f4441cfe42e988e06cf6b98603c21ac2466
|
[
"CC0-1.0"
] | null | null | null |
# -*- coding: ascii -*-
import sys
import json
def check(data):
OnOffstart = data.find(b"OnOff")
if OnOffstart != -1:
fxName=""
OnOffblockSize = 0x30
for j in range(12):
if data[OnOffstart + j + OnOffblockSize] == 0x00:
break
fxName = fxName + chr(data[OnOffstart + j + OnOffblockSize])
tD = {
"fxname" :fxName
}
mmax = []
mdefault = []
name = []
mpedal = []
numParameters = 0
#print("OnOffStart at {}".format(OnOffstart))
try:
# this is WAY too large, let except break the loop
for j in range(0, 2000):
"""
if not ( data[OnOffstart + (j+1) * OnOffblockSize - 1] == 0x00
and data[OnOffstart + (j+1) * OnOffblockSize - 2] == 0x00):
# ZD2 format has a length and PRME offset. ZDL has none of this.
print("End of the parameters")
break;
if not ( data[OnOffstart + (j) * OnOffblockSize + 0x18 ] == 0x00
and data[OnOffstart + (j) * OnOffblockSize + 0x19] == 0x00
and data[OnOffstart + (j) * OnOffblockSize + 0x1A] == 0x00
and data[OnOffstart + (j) * OnOffblockSize + 0x1B] == 0x00 ):
print("Empty next slot")
break
"""
currName = ""
for i in range(12):
if data[OnOffstart + j * OnOffblockSize + i] == 0x00:
break
currName = currName + chr(data[OnOffstart + j * OnOffblockSize + i])
if data[OnOffstart + j * OnOffblockSize + i] & 0x80:
raise Exception("Non binary char")
if currName == "":
break
name.append(currName)
mmax.append( data[OnOffstart + j * OnOffblockSize + 12] +
data[OnOffstart + j * OnOffblockSize + 13] * 256)
mdefault.append(data[OnOffstart + j * OnOffblockSize + 16] +
data[OnOffstart + j * OnOffblockSize + 17] * 256);
if data[OnOffstart + j * OnOffblockSize + 0x2C]:
mpedal.append(True)
else:
mpedal.append(False)
#print(mmax[j])
#print(mdefault[j])
"""
print("[{}] {} {} {} {}".format(
OnOffstart + (j+1) * OnOffblockSize,
hex(data[OnOffstart + (j+1) * OnOffblockSize]),
hex(data[OnOffstart + (j+1) * OnOffblockSize + 1]),
hex(data[OnOffstart + (j+1) * OnOffblockSize + 2]),
hex(data[OnOffstart + (j+1) * OnOffblockSize + 3])) )
"""
#print("increment params")
numParameters = numParameters + 1
except:
pass
#print("Found {} parameters.".format(numParameters))
tD['Parameters'] = []
# 0 is the OnOff state
# 1 is the name
# so actual paramters start from index 2, but clearly there are 2 less
for i in range(numParameters - 2):
#print(i)
tD['Parameters'].append({'name': name[i+2], 'mmax': mmax[i + 2], 'mdefault': mdefault[i + 2], 'pedal': mpedal[i+2]})
#json.dump(tD, sys.stdout, indent=4)
f = open(fxName+'.json', "w")
json.dump(tD, f, indent=4)
f.close()
return fxName+'.OnOff'
# handles a zoom firmware
if __name__ == "__main__":
if len(sys.argv) == 2:
f = open(sys.argv[1], "rb")
data = f.read()
f.close()
check(data)
| 40.381443 | 129 | 0.451876 | 370 | 3,917 | 4.762162 | 0.324324 | 0.166856 | 0.170261 | 0.23042 | 0.363791 | 0.251419 | 0.097616 | 0.097616 | 0.052213 | 0.052213 | 0 | 0.042115 | 0.430176 | 3,917 | 96 | 130 | 40.802083 | 0.747312 | 0.100332 | 0 | 0.090909 | 0 | 0 | 0.039259 | 0 | 0 | 0 | 0.008822 | 0 | 0 | 1 | 0.018182 | false | 0.018182 | 0.036364 | 0 | 0.072727 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
0
| 1 |
07abd88c1750bfa23ce141be4914e78e9e578d95
| 316 |
py
|
Python
|
sqlakeyset/__init__.py
|
jhihruei/sqlakeyset
|
0aa0f6e041dc37bc5f918303578875ad334cad6c
|
[
"Unlicense"
] | null | null | null |
sqlakeyset/__init__.py
|
jhihruei/sqlakeyset
|
0aa0f6e041dc37bc5f918303578875ad334cad6c
|
[
"Unlicense"
] | null | null | null |
sqlakeyset/__init__.py
|
jhihruei/sqlakeyset
|
0aa0f6e041dc37bc5f918303578875ad334cad6c
|
[
"Unlicense"
] | null | null | null |
from .columns import OC
from .paging import get_page, select_page, process_args
from .results import serialize_bookmark, unserialize_bookmark, Page, Paging
__all__ = [
'OC',
'get_page',
'select_page',
'serialize_bookmark',
'unserialize_bookmark',
'Page',
'Paging',
'process_args'
]
| 19.75 | 75 | 0.693038 | 36 | 316 | 5.694444 | 0.416667 | 0.068293 | 0.126829 | 0.165854 | 0.44878 | 0.44878 | 0 | 0 | 0 | 0 | 0 | 0 | 0.199367 | 316 | 15 | 76 | 21.066667 | 0.810277 | 0 | 0 | 0 | 0 | 0 | 0.257143 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.230769 | 0 | 0.230769 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
0
| 1 |
07abdc1f2ef1ad7ab554d9cccaa9f73782091369
| 6,609 |
py
|
Python
|
low_rank_local_connectivity/models/simple_model.py
|
shaun95/google-research
|
d41bbaca1eb9bfd980ec2b3fd201c3ddb4d1f2e5
|
[
"Apache-2.0"
] | 1 |
2022-03-13T21:48:52.000Z
|
2022-03-13T21:48:52.000Z
|
low_rank_local_connectivity/models/simple_model.py
|
shaun95/google-research
|
d41bbaca1eb9bfd980ec2b3fd201c3ddb4d1f2e5
|
[
"Apache-2.0"
] | null | null | null |
low_rank_local_connectivity/models/simple_model.py
|
shaun95/google-research
|
d41bbaca1eb9bfd980ec2b3fd201c3ddb4d1f2e5
|
[
"Apache-2.0"
] | 1 |
2022-03-30T07:20:29.000Z
|
2022-03-30T07:20:29.000Z
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Simple model for image classification.
The model is multiple
conv/locally_connected/wide_conv/low_rank_locally_connected layers followed
by a fully connected layer. Changes to the model architecture can be made by
modifying simple_model_config.py file.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import os
import tensorflow.compat.v1 as tf
from low_rank_local_connectivity import layers
from low_rank_local_connectivity import utils
MOMENTUM = 0.9
EPS = 1e-5
class SimpleNetwork(tf.keras.Model):
"""Locally Connected Network."""
def __init__(self, config, variable_scope='simple_network'):
super(SimpleNetwork, self).__init__()
self.variable_scope = variable_scope
self.config = copy.deepcopy(config)
filters_list = self.config.num_filters_list
depth = len(filters_list)
self.pass_is_training_list = []
self.layers_list = []
if self.config.num_channels < 1:
raise ValueError('num_channels should be > 0')
input_channels = self.config.num_channels
if self.config.coord_conv:
# Add two coordinate conv channels.
input_channels = input_channels + 2
if len(self.config.layer_types) < depth:
self.config.layer_types.extend(
['conv2d'] * (depth - len(self.config.layer_types)))
chin = input_channels
for i, (kernel_size, num_filters, strides, layer_type) in enumerate(zip(
self.config.kernel_size_list,
filters_list,
self.config.strides_list,
self.config.layer_types)):
padding = 'valid'
if layer_type == 'conv2d':
chout = num_filters
layer = tf.keras.layers.Conv2D(
filters=chout,
kernel_size=kernel_size,
strides=(strides, strides),
padding=padding,
activation=None,
use_bias=not self.config.batch_norm,
kernel_initializer=self.config.kernel_initializer,
name=os.path.join(self.variable_scope, 'layer%d' %i, layer_type))
elif layer_type == 'wide_conv2d':
# Conv. layer with equivalent params to low rank locally connected.
if self.config.rank < 1:
raise ValueError('rank should be > 0 for %s layer.' % layer_type)
chout = int((self.config.rank * chin + num_filters) / float(
chin + num_filters) * num_filters)
layer = tf.keras.layers.Conv2D(
filters=chout if i < (depth-1)
else int(num_filters * self.config.rank),
kernel_size=kernel_size, strides=(strides, strides),
padding=padding,
activation=None,
use_bias=not self.config.batch_norm,
kernel_initializer=self.config.kernel_initializer,
name=os.path.join(self.variable_scope, 'layer%d' %i, layer_type))
elif layer_type == 'locally_connected2d':
# Full locally connected layer.
chout = num_filters
layer = tf.keras.layers.LocallyConnected2D(
filters=chout,
kernel_size=(kernel_size, kernel_size),
strides=(strides, strides),
padding=padding,
activation=None,
use_bias=True, # not self.config.batch_norm,
name=os.path.join(self.variable_scope, 'layer%d' %i, layer_type),
kernel_initializer=self.config.kernel_initializer)
elif layer_type == 'low_rank_locally_connected2d':
if self.config.rank < 1:
raise ValueError('rank should be > 0 for %s layer.' % layer_type)
chout = num_filters
layer = layers.LowRankLocallyConnected2D(
filters=chout,
kernel_size=(kernel_size, kernel_size),
strides=(strides, strides),
padding=padding,
activation=None,
use_bias=not self.config.batch_norm,
name=os.path.join(self.variable_scope, 'layer%d' %i, layer_type),
kernel_initializer=self.config.kernel_initializer,
combining_weights_initializer=(
self.config.combining_weights_initializer),
spatial_rank=self.config.rank,
normalize_weights=self.config.normalize_weights,
input_dependent=config.input_dependent,
share_row_combining_weights=self.config.share_row_combining_weights,
share_col_combining_weights=self.config.share_col_combining_weights)
else:
raise ValueError('Can not recognize layer %s type.' % layer_type)
chin = chout
self.layers_list.append(layer)
self.pass_is_training_list.append(False)
if self.config.batch_norm:
layer = tf.keras.layers.BatchNormalization(
trainable=True, momentum=MOMENTUM, epsilon=EPS)
self.layers_list.append(layer)
self.pass_is_training_list.append(True)
layer = tf.keras.layers.ReLU()
self.layers_list.append(layer)
self.pass_is_training_list.append(False)
if self.config.global_avg_pooling:
self.layers_list.append(tf.keras.layers.GlobalAveragePooling2D())
else:
self.layers_list.append(tf.keras.layers.Flatten())
self.pass_is_training_list.append(False)
self.layers_list.append(tf.keras.layers.Dense(
units=self.config.num_classes, activation=None, use_bias=True,
name='logits'))
self.pass_is_training_list.append(False)
def __call__(self, images, is_training):
endpoints = {}
if self.config.coord_conv:
# Append position channels.
net = tf.concat([images, utils.position_channels(images)], axis=3)
else:
net = images
for i, (pass_is_training, layer) in enumerate(
zip(self.pass_is_training_list, self.layers_list)):
net = layer(net, training=is_training) if pass_is_training else layer(net)
endpoints['layer%d' % i] = net
tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, layer.updates)
self.add_update(layer.updates)
logits = net
return logits, endpoints
| 37.982759 | 80 | 0.681192 | 843 | 6,609 | 5.11981 | 0.252669 | 0.07646 | 0.029194 | 0.029194 | 0.40987 | 0.368397 | 0.352641 | 0.305607 | 0.288925 | 0.267609 | 0 | 0.006463 | 0.227417 | 6,609 | 173 | 81 | 38.202312 | 0.838817 | 0.157966 | 0 | 0.373016 | 0 | 0 | 0.045528 | 0.005059 | 0 | 0 | 0 | 0 | 0 | 1 | 0.015873 | false | 0.071429 | 0.063492 | 0 | 0.095238 | 0.007937 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 |
0
| 1 |
07c0c2bb274ab76681ad18763446d5b0c976c985
| 242 |
py
|
Python
|
pixelate_task_1.py
|
Swayamshu/Pixelate_Sample_Arena
|
d8e8b4614987f9302a19ec1e20a922618e67b943
|
[
"MIT"
] | null | null | null |
pixelate_task_1.py
|
Swayamshu/Pixelate_Sample_Arena
|
d8e8b4614987f9302a19ec1e20a922618e67b943
|
[
"MIT"
] | null | null | null |
pixelate_task_1.py
|
Swayamshu/Pixelate_Sample_Arena
|
d8e8b4614987f9302a19ec1e20a922618e67b943
|
[
"MIT"
] | null | null | null |
import gym
import pix_sample_arena
import time
import pybullet as p
import pybullet_data
import cv2
if __name__ == "__main__":
env = gym.make("pix_sample_arena-v0")
x = 0
while True:
p.stepSimulation()
time.sleep(100)
| 18.615385 | 41 | 0.702479 | 36 | 242 | 4.361111 | 0.666667 | 0.11465 | 0.178344 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.031579 | 0.214876 | 242 | 13 | 42 | 18.615385 | 0.794737 | 0 | 0 | 0 | 0 | 0 | 0.111111 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.5 | 0 | 0.5 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 |
0
| 1 |
07c638a7630e99e901331aada0e29b538ff7310d
| 1,482 |
py
|
Python
|
forms/QRGenerator.py
|
Rono-Barto-Co/Project-QR
|
e80fc5a41f25542038c090311844912790cb1478
|
[
"MIT"
] | 3 |
2019-07-04T03:27:06.000Z
|
2019-09-06T08:52:35.000Z
|
forms/QRGenerator.py
|
Rono-Barto-Co/Project-QR
|
e80fc5a41f25542038c090311844912790cb1478
|
[
"MIT"
] | null | null | null |
forms/QRGenerator.py
|
Rono-Barto-Co/Project-QR
|
e80fc5a41f25542038c090311844912790cb1478
|
[
"MIT"
] | null | null | null |
from flask_wtf import FlaskForm
from wtforms import StringField, SubmitField, SelectField
from wtforms.validators import DataRequired
class QRGenerator(FlaskForm):
code_content = StringField('Content', validators=[DataRequired()])
code_size = SelectField('Size', choices=[('15', 'Size'),
('5', '5'),
('10', '10'),
('15', '15'),
('20', '20'),
('25', '25'),
('30', '30')])
code_color = SelectField('Colour', choices=[('white', 'Colour'),
("white", "White"),
('yellow', "Yellow"),
('lime', "Green"),
("#ffa500", "Orange")])
code_correction = SelectField('Error Correction', choices=[("H", "Error Correction"),
("H", "H"),
("L", "L"),
("M", "M"),
("Q", "Q")])
code_image = StringField('Image URL')
generate_code = SubmitField('Generate QR Code')
| 54.888889 | 89 | 0.323887 | 87 | 1,482 | 5.436782 | 0.471264 | 0.046512 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.040419 | 0.549258 | 1,482 | 26 | 90 | 57 | 0.667665 | 0 | 0 | 0 | 0 | 0 | 0.112011 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.125 | 0 | 0.416667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
0
| 1 |
07c6a477f6bfebee04a539e1d02b2df95226ab91
| 1,259 |
py
|
Python
|
Quiz/m2_advanced_quants/l5_volatility/volatility_estimation.py
|
jcrangel/AI-for-Trading
|
c3b865e992f8eb8deda91e7641428eef1d343636
|
[
"Apache-2.0"
] | null | null | null |
Quiz/m2_advanced_quants/l5_volatility/volatility_estimation.py
|
jcrangel/AI-for-Trading
|
c3b865e992f8eb8deda91e7641428eef1d343636
|
[
"Apache-2.0"
] | null | null | null |
Quiz/m2_advanced_quants/l5_volatility/volatility_estimation.py
|
jcrangel/AI-for-Trading
|
c3b865e992f8eb8deda91e7641428eef1d343636
|
[
"Apache-2.0"
] | null | null | null |
import pandas as pd
import numpy as np
def estimate_volatility(prices, l):
"""Create an exponential moving average model of the volatility of a stock
price, and return the most recent (last) volatility estimate.
Parameters
----------
prices : pandas.Series
A series of adjusted closing prices for a stock.
l : float
The 'lambda' parameter of the exponential moving average model. Making
this value smaller will cause the model to weight older terms less
relative to more recent terms.
Returns
-------
last_vol : float
The last element of your exponential moving averge volatility model series.
"""
# TODO: Implement the exponential moving average volatility model and return the last value.
return prices.ewm(alpha=(1-l)).mean()[-1]
def test_run(filename='data.csv'):
"""Test run get_most_volatile() with stock prices from a file."""
prices = pd.read_csv(filename, parse_dates=[
'date'], index_col='date', squeeze=True)
print("Most recent volatility estimate: {:.6f}".format(estimate_volatility(prices, 0.7)))
# print(estimate_volatility(prices, 0.7))
if __name__ == '__main__':
test_run()
| 31.475 | 96 | 0.659253 | 165 | 1,259 | 4.915152 | 0.515152 | 0.083847 | 0.088779 | 0.071517 | 0.064118 | 0 | 0 | 0 | 0 | 0 | 0 | 0.007376 | 0.246227 | 1,259 | 39 | 97 | 32.282051 | 0.847208 | 0.586974 | 0 | 0 | 0 | 0 | 0.145161 | 0 | 0 | 0 | 0 | 0.025641 | 0 | 1 | 0.2 | false | 0 | 0.2 | 0 | 0.5 | 0.1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
0
| 1 |
07d5b427e69bdc09287f1c66c02797e0db0b274b
| 1,218 |
py
|
Python
|
examples/question_answering/qa_sparse_train.py
|
ebell495/nn_pruning
|
41263ab898117a639f3f219c23a4cecc8bc0e3f3
|
[
"Apache-2.0"
] | 250 |
2021-02-22T15:50:04.000Z
|
2022-03-31T08:12:02.000Z
|
examples/question_answering/qa_sparse_train.py
|
vuiseng9/nn_pruning
|
8f4a14dd63d621483cbc1bc4eb34600d66e9e71b
|
[
"Apache-2.0"
] | 28 |
2021-02-22T15:54:34.000Z
|
2022-03-17T08:57:38.000Z
|
examples/question_answering/qa_sparse_train.py
|
vuiseng9/nn_pruning
|
8f4a14dd63d621483cbc1bc4eb34600d66e9e71b
|
[
"Apache-2.0"
] | 31 |
2021-02-22T16:07:17.000Z
|
2022-03-28T09:17:24.000Z
|
# coding=utf-8
# Copyright 2020 The HuggingFace Team All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Sparse Fine-tuning the library models for question answering.
"""
# You can also adapt this script on your own question answering task. Pointers for this are left as comments.
from nn_pruning.sparse_trainer import SparseTrainer
from .qa_train import QATrainer
# SparseTrainer should appear first in the base classes, as its functions must override QATrainer and its base classes (Trainer)
class QASparseTrainer(SparseTrainer, QATrainer):
def __init__(self, sparse_args, *args, **kwargs):
QATrainer.__init__(self, *args, **kwargs)
SparseTrainer.__init__(self, sparse_args)
| 43.5 | 128 | 0.769294 | 177 | 1,218 | 5.19774 | 0.632768 | 0.065217 | 0.028261 | 0.034783 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.008824 | 0.162562 | 1,218 | 27 | 129 | 45.111111 | 0.893137 | 0.729885 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.166667 | false | 0 | 0.333333 | 0 | 0.666667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 |
0
| 1 |
07d7992d7ae8299b452c378aa6d4664a38bab354
| 1,252 |
py
|
Python
|
src/petronia/aid/bootstrap/__init__.py
|
groboclown/petronia
|
486338023d19cee989e92f0c5692680f1a37811f
|
[
"MIT"
] | 19 |
2017-06-21T10:28:24.000Z
|
2021-12-31T11:49:28.000Z
|
src/petronia/aid/bootstrap/__init__.py
|
groboclown/petronia
|
486338023d19cee989e92f0c5692680f1a37811f
|
[
"MIT"
] | 10 |
2016-11-11T18:57:57.000Z
|
2021-02-01T15:33:43.000Z
|
src/petronia/aid/bootstrap/__init__.py
|
groboclown/petronia
|
486338023d19cee989e92f0c5692680f1a37811f
|
[
"MIT"
] | 3 |
2017-09-17T03:29:35.000Z
|
2019-06-03T10:43:08.000Z
|
"""
Common Petronia imports for bootstrap parts of an extension.
This should be imported along with the `simp` module.
"""
from ...base.bus import (
EventBus,
ListenerRegistrar,
ListenerSetup,
QueuePriority,
ExtensionMetadataStruct,
register_event,
EVENT_WILDCARD,
TARGET_WILDCARD,
QUEUE_EVENT_NORMAL,
QUEUE_EVENT_HIGH,
QUEUE_EVENT_IO,
QUEUE_EVENT_TYPES
)
from ...base.participant import (
create_singleton_identity,
NOT_PARTICIPANT,
)
from ...base.events import (
# These are generally just bootstrap events.
DisposeCompleteEvent,
as_dispose_complete_listener,
RequestDisposeEvent,
as_request_dispose_listener,
SystemStartedEvent,
as_system_started_listener,
)
from ...base.events.bus import (
EventProtectionModel,
GLOBAL_EVENT_PROTECTION,
INTERNAL_EVENT_PROTECTION,
PRODUCE_EVENT_PROTECTION,
CONSUME_EVENT_PROTECTION,
REQUEST_EVENT_PROTECTION,
RESPONSE_EVENT_PROTECTION,
)
from ...core.extensions.api import ANY_VERSION
from ...core.shutdown.api import (
SystemShutdownEvent,
as_system_shutdown_listener,
SystemShutdownFinalizeEvent,
as_system_shutdown_finalize_listener,
TARGET_ID_SYSTEM_SHUTDOWN,
)
| 19.261538 | 60 | 0.747604 | 130 | 1,252 | 6.846154 | 0.561538 | 0.101124 | 0.031461 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.188498 | 1,252 | 64 | 61 | 19.5625 | 0.875984 | 0.126997 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0.139535 | 0 | 0.139535 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 |
0
| 1 |
07dab8d1754575bc1f3f83e4e0cadea3c8dcd3af
| 8,104 |
py
|
Python
|
src/biotite/application/application.py
|
claudejrogers/biotite
|
3635bc9071506ecb85ddd9b1dbe6a430295e060e
|
[
"BSD-3-Clause"
] | null | null | null |
src/biotite/application/application.py
|
claudejrogers/biotite
|
3635bc9071506ecb85ddd9b1dbe6a430295e060e
|
[
"BSD-3-Clause"
] | null | null | null |
src/biotite/application/application.py
|
claudejrogers/biotite
|
3635bc9071506ecb85ddd9b1dbe6a430295e060e
|
[
"BSD-3-Clause"
] | null | null | null |
# This source code is part of the Biotite package and is distributed
# under the 3-Clause BSD License. Please see 'LICENSE.rst' for further
# information.
__name__ = "biotite.application"
__author__ = "Patrick Kunzmann"
__all__ = ["Application", "AppStateError", "TimeoutError", "VersionError",
"AppState", "requires_state"]
import abc
import time
from functools import wraps
from enum import Flag, auto
class AppState(Flag):
"""
This enum type represents the app states of an application.
"""
CREATED = auto()
RUNNING = auto()
FINISHED = auto()
JOINED = auto()
CANCELLED = auto()
def requires_state(app_state):
"""
A decorator for methods of :class:`Application` subclasses that
raises an :class:`AppStateError` in case the method is called, when
the :class:`Application` is not in the specified :class:`AppState`
`app_state`.
Parameters
----------
app_state : AppState
The required app state.
Examples
--------
Raises :class:`AppStateError` when `function` is called,
if :class:`Application` is not in one of the specified states:
>>> @requires_state(AppState.RUNNING | AppState.FINISHED)
... def function(self):
... pass
"""
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
# First parameter of method is always 'self'
instance = args[0]
if not instance._state & app_state:
raise AppStateError(
f"The application is in {instance.get_app_state()} state, "
f"but {app_state} state is required"
)
return func(*args, **kwargs)
return wrapper
return decorator
class Application(metaclass=abc.ABCMeta):
"""
This class is a wrapper around an external piece of runnable
software in any sense. Subclasses of this abstract base class
specify the respective kind of software and the way of interacting
with it.
Every :class:`Application` runs through a different app states
(instances of enum :class:`AppState`) from its creation until its
termination:
Directly after its instantiation the app is in the *CREATED* state.
In this state further parameters can be set for the application run.
After the user calls the :func:`start()` method, the app state is
set to *RUNNING* and the :class:`Application` type specific
:func:`run()` method is called.
When the application finishes the AppState changes to *FINISHED*.
This is checked via the :class:`Application` type specific
:func:`is_finished()` method.
The user can now call the :func:`join()` method, concluding the
application in the *JOINED* state and making the results of the
application accessible by executing the :class:`Application`
type specific :func:`evaluate()` method.
Furthermore this executes the :class:`Application` type specific
:func:`clean_up()` method.
:func:`join()` can even be called in the *RUNNING* state:
This will constantly check :func:`is_finished()` and will directly
go into the *JOINED* state as soon as the application reaches the
*FINISHED* state.
Calling the :func:`cancel()` method while the application is
*RUNNING* or *FINISHED* leaves the application in the *CANCELLED*
state.
This triggers the :func:`clean_up()` method, too, but there are no
accessible results.
If a method is called in an unsuitable app state, an
:class:`AppStateError` is called.
The application run behaves like an additional thread: Between the
call of :func:`start()` and :func:`join()` other Python code can be
executed, while the application runs in the background.
"""
def __init__(self):
self._state = AppState.CREATED
@requires_state(AppState.CREATED)
def start(self):
"""
Start the application run and set its state to *RUNNING*.
This can only be done from the *CREATED* state.
"""
self.run()
self._start_time = time.time()
self._state = AppState.RUNNING
@requires_state(AppState.RUNNING | AppState.FINISHED)
def join(self, timeout=None):
"""
Conclude the application run and set its state to *JOINED*.
This can only be done from the *RUNNING* or *FINISHED* state.
If the application is *FINISHED* the joining process happens
immediately, if otherwise the application is *RUNNING*, this
method waits until the application is *FINISHED*.
Parameters
----------
timeout : float, optional
If this parameter is specified, the :class:`Application`
only waits for finishing until this value (in seconds) runs
out.
After this time is exceeded a :class:`TimeoutError` is
raised and the application is cancelled.
Raises
------
TimeoutError
If the joining process exceeds the `timeout` value.
"""
time.sleep(self.wait_interval())
while self.get_app_state() != AppState.FINISHED:
if timeout is not None and time.time()-self._start_time > timeout:
self.cancel()
raise TimeoutError(
f"The application expired its timeout "
f"({timeout:.1f} s)"
)
else:
time.sleep(self.wait_interval())
time.sleep(self.wait_interval())
try:
self.evaluate()
except AppStateError:
raise
except:
self._state = AppState.CANCELLED
raise
else:
self._state = AppState.JOINED
self.clean_up()
@requires_state(AppState.RUNNING | AppState.FINISHED)
def cancel(self):
"""
Cancel the application when in *RUNNING* or *FINISHED* state.
"""
self._state = AppState.CANCELLED
self.clean_up()
def get_app_state(self):
"""
Get the current app state.
Returns
-------
app_state : AppState
The current app state.
"""
if self._state == AppState.RUNNING:
if self.is_finished():
self._state = AppState.FINISHED
return self._state
@abc.abstractmethod
def run(self):
"""
Commence the application run. Called in :func:`start()`.
PROTECTED: Override when inheriting.
"""
pass
@abc.abstractmethod
def is_finished(self):
"""
Check if the application has finished.
PROTECTED: Override when inheriting.
Returns
-------
finished : bool
True of the application has finished, false otherwise
"""
pass
@abc.abstractmethod
def wait_interval(self):
"""
The time interval of :func:`is_finished()` calls in the joining
process.
PROTECTED: Override when inheriting.
Returns
-------
interval : float
Time (in seconds) between calls of :func:`is_finished()` in
:func:`join()`
"""
pass
@abc.abstractmethod
def evaluate(self):
"""
Evaluate application results. Called in :func:`join()`.
PROTECTED: Override when inheriting.
"""
pass
def clean_up(self):
"""
Do clean up work after the application terminates.
PROTECTED: Optionally override when inheriting.
"""
pass
class AppStateError(Exception):
"""
Indicate that the application lifecycle was violated.
"""
pass
class TimeoutError(Exception):
"""
Indicate that the application's timeout expired.
"""
pass
class VersionError(Exception):
"""
Indicate that the application's version is invalid.
"""
pass
| 31.169231 | 79 | 0.604516 | 918 | 8,104 | 5.269063 | 0.248366 | 0.072359 | 0.024602 | 0.01902 | 0.167666 | 0.096547 | 0.052719 | 0.013645 | 0 | 0 | 0 | 0.000534 | 0.307132 | 8,104 | 260 | 80 | 31.169231 | 0.860908 | 0.538129 | 0 | 0.287356 | 0 | 0 | 0.086515 | 0.009107 | 0 | 0 | 0 | 0 | 0 | 1 | 0.149425 | false | 0.091954 | 0.045977 | 0 | 0.356322 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 |
0
| 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.