text
stringlengths 75
104k
| code_tokens
sequence | avg_line_len
float64 7.91
980
| score
float64 0
0.18
| texts
sequence | scores
sequence | num_lines
int64 3
2.77k
| avg_score
float64 0
0.37
|
---|---|---|---|---|---|---|---|
def get_session(username, password, cookie_path=COOKIE_PATH, cache=True,
cache_expiry=300, cache_path=CACHE_PATH, driver='phantomjs'):
"""Get session, existing or new."""
class USPSAuth(AuthBase): # pylint: disable=too-few-public-methods
"""USPS authorization storage."""
def __init__(self, username, password, cookie_path, driver):
"""Init."""
self.username = username
self.password = password
self.cookie_path = cookie_path
self.driver = driver
def __call__(self, r):
"""Call is no-op."""
return r
session = requests.Session()
if cache:
session = requests_cache.core.CachedSession(cache_name=cache_path,
expire_after=cache_expiry)
session.auth = USPSAuth(username, password, cookie_path, driver)
session.headers.update({'User-Agent': USER_AGENT})
if os.path.exists(cookie_path):
_LOGGER.debug("cookie found at: %s", cookie_path)
session.cookies = _load_cookies(cookie_path)
else:
_login(session)
return session | [
"def",
"get_session",
"(",
"username",
",",
"password",
",",
"cookie_path",
"=",
"COOKIE_PATH",
",",
"cache",
"=",
"True",
",",
"cache_expiry",
"=",
"300",
",",
"cache_path",
"=",
"CACHE_PATH",
",",
"driver",
"=",
"'phantomjs'",
")",
":",
"class",
"USPSAuth",
"(",
"AuthBase",
")",
":",
"# pylint: disable=too-few-public-methods",
"\"\"\"USPS authorization storage.\"\"\"",
"def",
"__init__",
"(",
"self",
",",
"username",
",",
"password",
",",
"cookie_path",
",",
"driver",
")",
":",
"\"\"\"Init.\"\"\"",
"self",
".",
"username",
"=",
"username",
"self",
".",
"password",
"=",
"password",
"self",
".",
"cookie_path",
"=",
"cookie_path",
"self",
".",
"driver",
"=",
"driver",
"def",
"__call__",
"(",
"self",
",",
"r",
")",
":",
"\"\"\"Call is no-op.\"\"\"",
"return",
"r",
"session",
"=",
"requests",
".",
"Session",
"(",
")",
"if",
"cache",
":",
"session",
"=",
"requests_cache",
".",
"core",
".",
"CachedSession",
"(",
"cache_name",
"=",
"cache_path",
",",
"expire_after",
"=",
"cache_expiry",
")",
"session",
".",
"auth",
"=",
"USPSAuth",
"(",
"username",
",",
"password",
",",
"cookie_path",
",",
"driver",
")",
"session",
".",
"headers",
".",
"update",
"(",
"{",
"'User-Agent'",
":",
"USER_AGENT",
"}",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"cookie_path",
")",
":",
"_LOGGER",
".",
"debug",
"(",
"\"cookie found at: %s\"",
",",
"cookie_path",
")",
"session",
".",
"cookies",
"=",
"_load_cookies",
"(",
"cookie_path",
")",
"else",
":",
"_login",
"(",
"session",
")",
"return",
"session"
] | 39.034483 | 0.000862 | [
"def get_session(username, password, cookie_path=COOKIE_PATH, cache=True,\n",
" cache_expiry=300, cache_path=CACHE_PATH, driver='phantomjs'):\n",
" \"\"\"Get session, existing or new.\"\"\"\n",
" class USPSAuth(AuthBase): # pylint: disable=too-few-public-methods\n",
" \"\"\"USPS authorization storage.\"\"\"\n",
"\n",
" def __init__(self, username, password, cookie_path, driver):\n",
" \"\"\"Init.\"\"\"\n",
" self.username = username\n",
" self.password = password\n",
" self.cookie_path = cookie_path\n",
" self.driver = driver\n",
"\n",
" def __call__(self, r):\n",
" \"\"\"Call is no-op.\"\"\"\n",
" return r\n",
"\n",
" session = requests.Session()\n",
" if cache:\n",
" session = requests_cache.core.CachedSession(cache_name=cache_path,\n",
" expire_after=cache_expiry)\n",
" session.auth = USPSAuth(username, password, cookie_path, driver)\n",
" session.headers.update({'User-Agent': USER_AGENT})\n",
" if os.path.exists(cookie_path):\n",
" _LOGGER.debug(\"cookie found at: %s\", cookie_path)\n",
" session.cookies = _load_cookies(cookie_path)\n",
" else:\n",
" _login(session)\n",
" return session"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.05555555555555555
] | 29 | 0.001916 |
def get_apkid(apkfile):
"""Read (appid, versionCode, versionName) from an APK
This first tries to do quick binary XML parsing to just get the
values that are needed. It will fallback to full androguard
parsing, which is slow, if it can't find the versionName value or
versionName is set to a Android String Resource (e.g. an integer
hex value that starts with @).
"""
if not os.path.exists(apkfile):
log.error("'{apkfile}' does not exist!".format(apkfile=apkfile))
appid = None
versionCode = None
versionName = None
with zipfile.ZipFile(apkfile) as apk:
with apk.open('AndroidManifest.xml') as manifest:
axml = AXMLParser(manifest.read())
count = 0
while axml.is_valid():
_type = next(axml)
count += 1
if _type == START_TAG:
for i in range(0, axml.getAttributeCount()):
name = axml.getAttributeName(i)
_type = axml.getAttributeValueType(i)
_data = axml.getAttributeValueData(i)
value = format_value(_type, _data, lambda _: axml.getAttributeValue(i))
if appid is None and name == 'package':
appid = value
elif versionCode is None and name == 'versionCode':
if value.startswith('0x'):
versionCode = str(int(value, 16))
else:
versionCode = value
elif versionName is None and name == 'versionName':
versionName = value
if axml.name == 'manifest':
break
elif _type == END_TAG or _type == TEXT or _type == END_DOCUMENT:
raise RuntimeError('{path}: <manifest> must be the first element in AndroidManifest.xml'
.format(path=apkfile))
if not versionName or versionName[0] == '@':
a = APK(apkfile)
versionName = ensure_final_value(a.package, a.get_android_resources(), a.get_androidversion_name())
if not versionName:
versionName = '' # versionName is expected to always be a str
return appid, versionCode, versionName.strip('\0') | [
"def",
"get_apkid",
"(",
"apkfile",
")",
":",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"apkfile",
")",
":",
"log",
".",
"error",
"(",
"\"'{apkfile}' does not exist!\"",
".",
"format",
"(",
"apkfile",
"=",
"apkfile",
")",
")",
"appid",
"=",
"None",
"versionCode",
"=",
"None",
"versionName",
"=",
"None",
"with",
"zipfile",
".",
"ZipFile",
"(",
"apkfile",
")",
"as",
"apk",
":",
"with",
"apk",
".",
"open",
"(",
"'AndroidManifest.xml'",
")",
"as",
"manifest",
":",
"axml",
"=",
"AXMLParser",
"(",
"manifest",
".",
"read",
"(",
")",
")",
"count",
"=",
"0",
"while",
"axml",
".",
"is_valid",
"(",
")",
":",
"_type",
"=",
"next",
"(",
"axml",
")",
"count",
"+=",
"1",
"if",
"_type",
"==",
"START_TAG",
":",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"axml",
".",
"getAttributeCount",
"(",
")",
")",
":",
"name",
"=",
"axml",
".",
"getAttributeName",
"(",
"i",
")",
"_type",
"=",
"axml",
".",
"getAttributeValueType",
"(",
"i",
")",
"_data",
"=",
"axml",
".",
"getAttributeValueData",
"(",
"i",
")",
"value",
"=",
"format_value",
"(",
"_type",
",",
"_data",
",",
"lambda",
"_",
":",
"axml",
".",
"getAttributeValue",
"(",
"i",
")",
")",
"if",
"appid",
"is",
"None",
"and",
"name",
"==",
"'package'",
":",
"appid",
"=",
"value",
"elif",
"versionCode",
"is",
"None",
"and",
"name",
"==",
"'versionCode'",
":",
"if",
"value",
".",
"startswith",
"(",
"'0x'",
")",
":",
"versionCode",
"=",
"str",
"(",
"int",
"(",
"value",
",",
"16",
")",
")",
"else",
":",
"versionCode",
"=",
"value",
"elif",
"versionName",
"is",
"None",
"and",
"name",
"==",
"'versionName'",
":",
"versionName",
"=",
"value",
"if",
"axml",
".",
"name",
"==",
"'manifest'",
":",
"break",
"elif",
"_type",
"==",
"END_TAG",
"or",
"_type",
"==",
"TEXT",
"or",
"_type",
"==",
"END_DOCUMENT",
":",
"raise",
"RuntimeError",
"(",
"'{path}: <manifest> must be the first element in AndroidManifest.xml'",
".",
"format",
"(",
"path",
"=",
"apkfile",
")",
")",
"if",
"not",
"versionName",
"or",
"versionName",
"[",
"0",
"]",
"==",
"'@'",
":",
"a",
"=",
"APK",
"(",
"apkfile",
")",
"versionName",
"=",
"ensure_final_value",
"(",
"a",
".",
"package",
",",
"a",
".",
"get_android_resources",
"(",
")",
",",
"a",
".",
"get_androidversion_name",
"(",
")",
")",
"if",
"not",
"versionName",
":",
"versionName",
"=",
"''",
"# versionName is expected to always be a str",
"return",
"appid",
",",
"versionCode",
",",
"versionName",
".",
"strip",
"(",
"'\\0'",
")"
] | 45.076923 | 0.002088 | [
"def get_apkid(apkfile):\n",
" \"\"\"Read (appid, versionCode, versionName) from an APK\n",
"\n",
" This first tries to do quick binary XML parsing to just get the\n",
" values that are needed. It will fallback to full androguard\n",
" parsing, which is slow, if it can't find the versionName value or\n",
" versionName is set to a Android String Resource (e.g. an integer\n",
" hex value that starts with @).\n",
"\n",
" \"\"\"\n",
" if not os.path.exists(apkfile):\n",
" log.error(\"'{apkfile}' does not exist!\".format(apkfile=apkfile))\n",
"\n",
" appid = None\n",
" versionCode = None\n",
" versionName = None\n",
" with zipfile.ZipFile(apkfile) as apk:\n",
" with apk.open('AndroidManifest.xml') as manifest:\n",
" axml = AXMLParser(manifest.read())\n",
" count = 0\n",
" while axml.is_valid():\n",
" _type = next(axml)\n",
" count += 1\n",
" if _type == START_TAG:\n",
" for i in range(0, axml.getAttributeCount()):\n",
" name = axml.getAttributeName(i)\n",
" _type = axml.getAttributeValueType(i)\n",
" _data = axml.getAttributeValueData(i)\n",
" value = format_value(_type, _data, lambda _: axml.getAttributeValue(i))\n",
" if appid is None and name == 'package':\n",
" appid = value\n",
" elif versionCode is None and name == 'versionCode':\n",
" if value.startswith('0x'):\n",
" versionCode = str(int(value, 16))\n",
" else:\n",
" versionCode = value\n",
" elif versionName is None and name == 'versionName':\n",
" versionName = value\n",
"\n",
" if axml.name == 'manifest':\n",
" break\n",
" elif _type == END_TAG or _type == TEXT or _type == END_DOCUMENT:\n",
" raise RuntimeError('{path}: <manifest> must be the first element in AndroidManifest.xml'\n",
" .format(path=apkfile))\n",
"\n",
" if not versionName or versionName[0] == '@':\n",
" a = APK(apkfile)\n",
" versionName = ensure_final_value(a.package, a.get_android_resources(), a.get_androidversion_name())\n",
" if not versionName:\n",
" versionName = '' # versionName is expected to always be a str\n",
"\n",
" return appid, versionCode, versionName.strip('\\0')"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.010416666666666666,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.012345679012345678,
0.009174311926605505,
0,
0,
0,
0,
0.009259259259259259,
0,
0,
0,
0.018518518518518517
] | 52 | 0.001148 |
def _getNextArticleBatch(self):
"""download next batch of articles based on the article uris in the uri list"""
# try to get more uris, if none
self._articlePage += 1
# if we have already obtained all pages, then exit
if self._totalPages != None and self._articlePage > self._totalPages:
return
self.setRequestedResult(RequestArticlesInfo(page=self._articlePage,
sortBy=self._sortBy, sortByAsc=self._sortByAsc,
returnInfo = self._returnInfo))
if self._er._verboseOutput:
print("Downloading article page %d..." % (self._articlePage))
res = self._er.execQuery(self)
if "error" in res:
print("Error while obtaining a list of articles: " + res["error"])
else:
self._totalPages = res.get("articles", {}).get("pages", 0)
results = res.get("articles", {}).get("results", [])
self._articleList.extend(results) | [
"def",
"_getNextArticleBatch",
"(",
"self",
")",
":",
"# try to get more uris, if none",
"self",
".",
"_articlePage",
"+=",
"1",
"# if we have already obtained all pages, then exit",
"if",
"self",
".",
"_totalPages",
"!=",
"None",
"and",
"self",
".",
"_articlePage",
">",
"self",
".",
"_totalPages",
":",
"return",
"self",
".",
"setRequestedResult",
"(",
"RequestArticlesInfo",
"(",
"page",
"=",
"self",
".",
"_articlePage",
",",
"sortBy",
"=",
"self",
".",
"_sortBy",
",",
"sortByAsc",
"=",
"self",
".",
"_sortByAsc",
",",
"returnInfo",
"=",
"self",
".",
"_returnInfo",
")",
")",
"if",
"self",
".",
"_er",
".",
"_verboseOutput",
":",
"print",
"(",
"\"Downloading article page %d...\"",
"%",
"(",
"self",
".",
"_articlePage",
")",
")",
"res",
"=",
"self",
".",
"_er",
".",
"execQuery",
"(",
"self",
")",
"if",
"\"error\"",
"in",
"res",
":",
"print",
"(",
"\"Error while obtaining a list of articles: \"",
"+",
"res",
"[",
"\"error\"",
"]",
")",
"else",
":",
"self",
".",
"_totalPages",
"=",
"res",
".",
"get",
"(",
"\"articles\"",
",",
"{",
"}",
")",
".",
"get",
"(",
"\"pages\"",
",",
"0",
")",
"results",
"=",
"res",
".",
"get",
"(",
"\"articles\"",
",",
"{",
"}",
")",
".",
"get",
"(",
"\"results\"",
",",
"[",
"]",
")",
"self",
".",
"_articleList",
".",
"extend",
"(",
"results",
")"
] | 50.052632 | 0.008256 | [
"def _getNextArticleBatch(self):\n",
" \"\"\"download next batch of articles based on the article uris in the uri list\"\"\"\n",
" # try to get more uris, if none\n",
" self._articlePage += 1\n",
" # if we have already obtained all pages, then exit\n",
" if self._totalPages != None and self._articlePage > self._totalPages:\n",
" return\n",
" self.setRequestedResult(RequestArticlesInfo(page=self._articlePage,\n",
" sortBy=self._sortBy, sortByAsc=self._sortByAsc,\n",
" returnInfo = self._returnInfo))\n",
" if self._er._verboseOutput:\n",
" print(\"Downloading article page %d...\" % (self._articlePage))\n",
" res = self._er.execQuery(self)\n",
" if \"error\" in res:\n",
" print(\"Error while obtaining a list of articles: \" + res[\"error\"])\n",
" else:\n",
" self._totalPages = res.get(\"articles\", {}).get(\"pages\", 0)\n",
" results = res.get(\"articles\", {}).get(\"results\", [])\n",
" self._articleList.extend(results)"
] | [
0,
0.022727272727272728,
0,
0,
0,
0.01282051282051282,
0,
0,
0.016666666666666666,
0.06818181818181818,
0,
0,
0,
0,
0,
0,
0,
0,
0.024390243902439025
] | 19 | 0.00762 |
def _add_missing_rows(self, indexes):
"""
Given a list of indexes, find all the indexes that are not currently in the Series and make a new row for
that index by appending to the Series. This does not maintain sorted order for the index.
:param indexes: list of indexes
:return: nothing
"""
new_indexes = [x for x in indexes if x not in self._index]
for x in new_indexes:
self._add_row(x) | [
"def",
"_add_missing_rows",
"(",
"self",
",",
"indexes",
")",
":",
"new_indexes",
"=",
"[",
"x",
"for",
"x",
"in",
"indexes",
"if",
"x",
"not",
"in",
"self",
".",
"_index",
"]",
"for",
"x",
"in",
"new_indexes",
":",
"self",
".",
"_add_row",
"(",
"x",
")"
] | 41.363636 | 0.008602 | [
"def _add_missing_rows(self, indexes):\n",
" \"\"\"\n",
" Given a list of indexes, find all the indexes that are not currently in the Series and make a new row for\n",
" that index by appending to the Series. This does not maintain sorted order for the index.\n",
"\n",
" :param indexes: list of indexes\n",
" :return: nothing\n",
" \"\"\"\n",
" new_indexes = [x for x in indexes if x not in self._index]\n",
" for x in new_indexes:\n",
" self._add_row(x)"
] | [
0,
0.08333333333333333,
0.008771929824561403,
0.01020408163265306,
0,
0,
0,
0,
0,
0,
0.03571428571428571
] | 11 | 0.012548 |
def collect_blame_info(cls, matches):
"""Runs git blame on files, for the specified sets of line ranges.
If no line range tuples are provided, it will do all lines.
"""
old_area = None
for filename, ranges in matches:
area, name = os.path.split(filename)
if not area:
area = '.'
if area != old_area:
print("\n\n%s/\n" % area)
old_area = area
print("%s " % name, end="")
filter = cls.build_line_range_filter(ranges)
command = ['git', 'blame', '--line-porcelain'] + filter + [name]
os.chdir(area)
p = subprocess.Popen(command, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = p.communicate()
if err:
print(" <<<<<<<<<< Unable to collect 'git blame' info:", err)
else:
yield out | [
"def",
"collect_blame_info",
"(",
"cls",
",",
"matches",
")",
":",
"old_area",
"=",
"None",
"for",
"filename",
",",
"ranges",
"in",
"matches",
":",
"area",
",",
"name",
"=",
"os",
".",
"path",
".",
"split",
"(",
"filename",
")",
"if",
"not",
"area",
":",
"area",
"=",
"'.'",
"if",
"area",
"!=",
"old_area",
":",
"print",
"(",
"\"\\n\\n%s/\\n\"",
"%",
"area",
")",
"old_area",
"=",
"area",
"print",
"(",
"\"%s \"",
"%",
"name",
",",
"end",
"=",
"\"\"",
")",
"filter",
"=",
"cls",
".",
"build_line_range_filter",
"(",
"ranges",
")",
"command",
"=",
"[",
"'git'",
",",
"'blame'",
",",
"'--line-porcelain'",
"]",
"+",
"filter",
"+",
"[",
"name",
"]",
"os",
".",
"chdir",
"(",
"area",
")",
"p",
"=",
"subprocess",
".",
"Popen",
"(",
"command",
",",
"stdout",
"=",
"subprocess",
".",
"PIPE",
",",
"stderr",
"=",
"subprocess",
".",
"PIPE",
")",
"out",
",",
"err",
"=",
"p",
".",
"communicate",
"(",
")",
"if",
"err",
":",
"print",
"(",
"\" <<<<<<<<<< Unable to collect 'git blame' info:\"",
",",
"err",
")",
"else",
":",
"yield",
"out"
] | 39.5 | 0.00206 | [
"def collect_blame_info(cls, matches):\n",
" \"\"\"Runs git blame on files, for the specified sets of line ranges.\n",
"\n",
" If no line range tuples are provided, it will do all lines.\n",
" \"\"\"\n",
" old_area = None\n",
" for filename, ranges in matches:\n",
" area, name = os.path.split(filename)\n",
" if not area:\n",
" area = '.'\n",
" if area != old_area:\n",
" print(\"\\n\\n%s/\\n\" % area)\n",
" old_area = area\n",
" print(\"%s \" % name, end=\"\")\n",
" filter = cls.build_line_range_filter(ranges)\n",
" command = ['git', 'blame', '--line-porcelain'] + filter + [name]\n",
" os.chdir(area)\n",
" p = subprocess.Popen(command, stdout=subprocess.PIPE,\n",
" stderr=subprocess.PIPE)\n",
" out, err = p.communicate()\n",
" if err:\n",
" print(\" <<<<<<<<<< Unable to collect 'git blame' info:\", err)\n",
" else:\n",
" yield out"
] | [
0,
0.013333333333333334,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.04
] | 24 | 0.002222 |
def find_non_contiguous(all_items):
"""Find any items that have slots that aren't contiguous"""
non_contiguous = []
for item in all_items:
if item.slots.count() < 2:
# No point in checking
continue
last_slot = None
for slot in item.slots.all().order_by('end_time'):
if last_slot:
if last_slot.end_time != slot.get_start_time():
non_contiguous.append(item)
break
last_slot = slot
return non_contiguous | [
"def",
"find_non_contiguous",
"(",
"all_items",
")",
":",
"non_contiguous",
"=",
"[",
"]",
"for",
"item",
"in",
"all_items",
":",
"if",
"item",
".",
"slots",
".",
"count",
"(",
")",
"<",
"2",
":",
"# No point in checking",
"continue",
"last_slot",
"=",
"None",
"for",
"slot",
"in",
"item",
".",
"slots",
".",
"all",
"(",
")",
".",
"order_by",
"(",
"'end_time'",
")",
":",
"if",
"last_slot",
":",
"if",
"last_slot",
".",
"end_time",
"!=",
"slot",
".",
"get_start_time",
"(",
")",
":",
"non_contiguous",
".",
"append",
"(",
"item",
")",
"break",
"last_slot",
"=",
"slot",
"return",
"non_contiguous"
] | 35.333333 | 0.001838 | [
"def find_non_contiguous(all_items):\n",
" \"\"\"Find any items that have slots that aren't contiguous\"\"\"\n",
" non_contiguous = []\n",
" for item in all_items:\n",
" if item.slots.count() < 2:\n",
" # No point in checking\n",
" continue\n",
" last_slot = None\n",
" for slot in item.slots.all().order_by('end_time'):\n",
" if last_slot:\n",
" if last_slot.end_time != slot.get_start_time():\n",
" non_contiguous.append(item)\n",
" break\n",
" last_slot = slot\n",
" return non_contiguous"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.04
] | 15 | 0.002667 |
def addRow(self, *row):
"""
Add a row to the table. All items are converted to strings.
@type row: tuple
@keyword row: Each argument is a cell in the table.
"""
row = [ str(item) for item in row ]
len_row = [ len(item) for item in row ]
width = self.__width
len_old = len(width)
len_new = len(row)
known = min(len_old, len_new)
missing = len_new - len_old
if missing > 0:
width.extend( len_row[ -missing : ] )
elif missing < 0:
len_row.extend( [0] * (-missing) )
self.__width = [ max( width[i], len_row[i] ) for i in compat.xrange(len(len_row)) ]
self.__cols.append(row) | [
"def",
"addRow",
"(",
"self",
",",
"*",
"row",
")",
":",
"row",
"=",
"[",
"str",
"(",
"item",
")",
"for",
"item",
"in",
"row",
"]",
"len_row",
"=",
"[",
"len",
"(",
"item",
")",
"for",
"item",
"in",
"row",
"]",
"width",
"=",
"self",
".",
"__width",
"len_old",
"=",
"len",
"(",
"width",
")",
"len_new",
"=",
"len",
"(",
"row",
")",
"known",
"=",
"min",
"(",
"len_old",
",",
"len_new",
")",
"missing",
"=",
"len_new",
"-",
"len_old",
"if",
"missing",
">",
"0",
":",
"width",
".",
"extend",
"(",
"len_row",
"[",
"-",
"missing",
":",
"]",
")",
"elif",
"missing",
"<",
"0",
":",
"len_row",
".",
"extend",
"(",
"[",
"0",
"]",
"*",
"(",
"-",
"missing",
")",
")",
"self",
".",
"__width",
"=",
"[",
"max",
"(",
"width",
"[",
"i",
"]",
",",
"len_row",
"[",
"i",
"]",
")",
"for",
"i",
"in",
"compat",
".",
"xrange",
"(",
"len",
"(",
"len_row",
")",
")",
"]",
"self",
".",
"__cols",
".",
"append",
"(",
"row",
")"
] | 35.75 | 0.02861 | [
"def addRow(self, *row):\n",
" \"\"\"\n",
" Add a row to the table. All items are converted to strings.\n",
"\n",
" @type row: tuple\n",
" @keyword row: Each argument is a cell in the table.\n",
" \"\"\"\n",
" row = [ str(item) for item in row ]\n",
" len_row = [ len(item) for item in row ]\n",
" width = self.__width\n",
" len_old = len(width)\n",
" len_new = len(row)\n",
" known = min(len_old, len_new)\n",
" missing = len_new - len_old\n",
" if missing > 0:\n",
" width.extend( len_row[ -missing : ] )\n",
" elif missing < 0:\n",
" len_row.extend( [0] * (-missing) )\n",
" self.__width = [ max( width[i], len_row[i] ) for i in compat.xrange(len(len_row)) ]\n",
" self.__cols.append(row)"
] | [
0,
0.08333333333333333,
0,
0,
0,
0,
0,
0.0625,
0.041666666666666664,
0.03225806451612903,
0,
0,
0.025,
0,
0,
0.1,
0,
0.0425531914893617,
0.05434782608695652,
0.03225806451612903
] | 20 | 0.023696 |
def form(value):
"""
Format numbers in a nice way.
>>> form(0)
'0'
>>> form(0.0)
'0.0'
>>> form(0.0001)
'1.000E-04'
>>> form(1003.4)
'1,003'
>>> form(103.4)
'103'
>>> form(9.3)
'9.30000'
>>> form(-1.2)
'-1.2'
"""
if isinstance(value, FLOAT + INT):
if value <= 0:
return str(value)
elif value < .001:
return '%.3E' % value
elif value < 10 and isinstance(value, FLOAT):
return '%.5f' % value
elif value > 1000:
return '{:,d}'.format(int(round(value)))
elif numpy.isnan(value):
return 'NaN'
else: # in the range 10-1000
return str(int(value))
elif isinstance(value, bytes):
return decode(value)
elif isinstance(value, str):
return value
elif isinstance(value, numpy.object_):
return str(value)
elif hasattr(value, '__len__') and len(value) > 1:
return ' '.join(map(form, value))
return str(value) | [
"def",
"form",
"(",
"value",
")",
":",
"if",
"isinstance",
"(",
"value",
",",
"FLOAT",
"+",
"INT",
")",
":",
"if",
"value",
"<=",
"0",
":",
"return",
"str",
"(",
"value",
")",
"elif",
"value",
"<",
".001",
":",
"return",
"'%.3E'",
"%",
"value",
"elif",
"value",
"<",
"10",
"and",
"isinstance",
"(",
"value",
",",
"FLOAT",
")",
":",
"return",
"'%.5f'",
"%",
"value",
"elif",
"value",
">",
"1000",
":",
"return",
"'{:,d}'",
".",
"format",
"(",
"int",
"(",
"round",
"(",
"value",
")",
")",
")",
"elif",
"numpy",
".",
"isnan",
"(",
"value",
")",
":",
"return",
"'NaN'",
"else",
":",
"# in the range 10-1000",
"return",
"str",
"(",
"int",
"(",
"value",
")",
")",
"elif",
"isinstance",
"(",
"value",
",",
"bytes",
")",
":",
"return",
"decode",
"(",
"value",
")",
"elif",
"isinstance",
"(",
"value",
",",
"str",
")",
":",
"return",
"value",
"elif",
"isinstance",
"(",
"value",
",",
"numpy",
".",
"object_",
")",
":",
"return",
"str",
"(",
"value",
")",
"elif",
"hasattr",
"(",
"value",
",",
"'__len__'",
")",
"and",
"len",
"(",
"value",
")",
">",
"1",
":",
"return",
"' '",
".",
"join",
"(",
"map",
"(",
"form",
",",
"value",
")",
")",
"return",
"str",
"(",
"value",
")"
] | 24.365854 | 0.000962 | [
"def form(value):\n",
" \"\"\"\n",
" Format numbers in a nice way.\n",
"\n",
" >>> form(0)\n",
" '0'\n",
" >>> form(0.0)\n",
" '0.0'\n",
" >>> form(0.0001)\n",
" '1.000E-04'\n",
" >>> form(1003.4)\n",
" '1,003'\n",
" >>> form(103.4)\n",
" '103'\n",
" >>> form(9.3)\n",
" '9.30000'\n",
" >>> form(-1.2)\n",
" '-1.2'\n",
" \"\"\"\n",
" if isinstance(value, FLOAT + INT):\n",
" if value <= 0:\n",
" return str(value)\n",
" elif value < .001:\n",
" return '%.3E' % value\n",
" elif value < 10 and isinstance(value, FLOAT):\n",
" return '%.5f' % value\n",
" elif value > 1000:\n",
" return '{:,d}'.format(int(round(value)))\n",
" elif numpy.isnan(value):\n",
" return 'NaN'\n",
" else: # in the range 10-1000\n",
" return str(int(value))\n",
" elif isinstance(value, bytes):\n",
" return decode(value)\n",
" elif isinstance(value, str):\n",
" return value\n",
" elif isinstance(value, numpy.object_):\n",
" return str(value)\n",
" elif hasattr(value, '__len__') and len(value) > 1:\n",
" return ' '.join(map(form, value))\n",
" return str(value)"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.047619047619047616
] | 41 | 0.001161 |
def get_module_names(path_dir, exclude=None):
if exclude is None: exclude = _default_exclude
"Search a given `path_dir` and return all the modules contained inside except those in `exclude`"
files = sorted(path_dir.glob('*'), key=lambda x: (x.is_dir(), x.name), reverse=True) # directories first
res = [f'{path_dir.name}']
for f in files:
if f.is_dir() and f.name in exclude: continue # exclude directories
if any([f.name.endswith(ex) for ex in exclude]): continue # exclude extensions
if f.suffix == '.py': res.append(f'{path_dir.name}.{f.stem}')
elif f.is_dir(): res += [f'{path_dir.name}.{name}' for name in get_module_names(f)]
return res | [
"def",
"get_module_names",
"(",
"path_dir",
",",
"exclude",
"=",
"None",
")",
":",
"if",
"exclude",
"is",
"None",
":",
"exclude",
"=",
"_default_exclude",
"files",
"=",
"sorted",
"(",
"path_dir",
".",
"glob",
"(",
"'*'",
")",
",",
"key",
"=",
"lambda",
"x",
":",
"(",
"x",
".",
"is_dir",
"(",
")",
",",
"x",
".",
"name",
")",
",",
"reverse",
"=",
"True",
")",
"# directories first",
"res",
"=",
"[",
"f'{path_dir.name}'",
"]",
"for",
"f",
"in",
"files",
":",
"if",
"f",
".",
"is_dir",
"(",
")",
"and",
"f",
".",
"name",
"in",
"exclude",
":",
"continue",
"# exclude directories",
"if",
"any",
"(",
"[",
"f",
".",
"name",
".",
"endswith",
"(",
"ex",
")",
"for",
"ex",
"in",
"exclude",
"]",
")",
":",
"continue",
"# exclude extensions",
"if",
"f",
".",
"suffix",
"==",
"'.py'",
":",
"res",
".",
"append",
"(",
"f'{path_dir.name}.{f.stem}'",
")",
"elif",
"f",
".",
"is_dir",
"(",
")",
":",
"res",
"+=",
"[",
"f'{path_dir.name}.{name}'",
"for",
"name",
"in",
"get_module_names",
"(",
"f",
")",
"]",
"return",
"res"
] | 57.333333 | 0.018598 | [
"def get_module_names(path_dir, exclude=None):\n",
" if exclude is None: exclude = _default_exclude\n",
" \"Search a given `path_dir` and return all the modules contained inside except those in `exclude`\"\n",
" files = sorted(path_dir.glob('*'), key=lambda x: (x.is_dir(), x.name), reverse=True) # directories first\n",
" res = [f'{path_dir.name}']\n",
" for f in files:\n",
" if f.is_dir() and f.name in exclude: continue # exclude directories\n",
" if any([f.name.endswith(ex) for ex in exclude]): continue # exclude extensions\n",
"\n",
" if f.suffix == '.py': res.append(f'{path_dir.name}.{f.stem}')\n",
" elif f.is_dir(): res += [f'{path_dir.name}.{name}' for name in get_module_names(f)]\n",
" return res"
] | [
0,
0.0196078431372549,
0.00980392156862745,
0.01834862385321101,
0,
0,
0.02631578947368421,
0.034482758620689655,
0,
0.014285714285714285,
0.021739130434782608,
0.07142857142857142
] | 12 | 0.018001 |
def two_swap_helper(j, k, num_qubits, qubit_map):
"""
Generate the permutation matrix that permutes two single-particle Hilbert
spaces into adjacent positions.
ALWAYS swaps j TO k. Recall that Hilbert spaces are ordered in decreasing
qubit index order. Hence, j > k implies that j is to the left of k.
End results:
j == k: nothing happens
j > k: Swap j right to k, until j at ind (k) and k at ind (k+1).
j < k: Swap j left to k, until j at ind (k) and k at ind (k-1).
Done in preparation for arbitrary 2-qubit gate application on ADJACENT
qubits.
:param int j: starting qubit index
:param int k: ending qubit index
:param int num_qubits: number of qubits in Hilbert space
:param np.array qubit_map: current index mapping of qubits
:return: tuple of swap matrix for the specified permutation,
and the new qubit_map, after permutation is made
:rtype: tuple (np.array, np.array)
"""
if not (0 <= j < num_qubits and 0 <= k < num_qubits):
raise ValueError("Permutation SWAP index not valid")
perm = np.eye(2 ** num_qubits, dtype=np.complex128)
new_qubit_map = np.copy(qubit_map)
if j == k:
# nothing happens
return perm, new_qubit_map
elif j > k:
# swap j right to k, until j at ind (k) and k at ind (k+1)
for i in range(j, k, -1):
perm = qubit_adjacent_lifted_gate(i - 1, SWAP, num_qubits).dot(perm)
new_qubit_map[i - 1], new_qubit_map[i] = new_qubit_map[i], new_qubit_map[i - 1]
elif j < k:
# swap j left to k, until j at ind (k) and k at ind (k-1)
for i in range(j, k, 1):
perm = qubit_adjacent_lifted_gate(i, SWAP, num_qubits).dot(perm)
new_qubit_map[i], new_qubit_map[i + 1] = new_qubit_map[i + 1], new_qubit_map[i]
return perm, new_qubit_map | [
"def",
"two_swap_helper",
"(",
"j",
",",
"k",
",",
"num_qubits",
",",
"qubit_map",
")",
":",
"if",
"not",
"(",
"0",
"<=",
"j",
"<",
"num_qubits",
"and",
"0",
"<=",
"k",
"<",
"num_qubits",
")",
":",
"raise",
"ValueError",
"(",
"\"Permutation SWAP index not valid\"",
")",
"perm",
"=",
"np",
".",
"eye",
"(",
"2",
"**",
"num_qubits",
",",
"dtype",
"=",
"np",
".",
"complex128",
")",
"new_qubit_map",
"=",
"np",
".",
"copy",
"(",
"qubit_map",
")",
"if",
"j",
"==",
"k",
":",
"# nothing happens",
"return",
"perm",
",",
"new_qubit_map",
"elif",
"j",
">",
"k",
":",
"# swap j right to k, until j at ind (k) and k at ind (k+1)",
"for",
"i",
"in",
"range",
"(",
"j",
",",
"k",
",",
"-",
"1",
")",
":",
"perm",
"=",
"qubit_adjacent_lifted_gate",
"(",
"i",
"-",
"1",
",",
"SWAP",
",",
"num_qubits",
")",
".",
"dot",
"(",
"perm",
")",
"new_qubit_map",
"[",
"i",
"-",
"1",
"]",
",",
"new_qubit_map",
"[",
"i",
"]",
"=",
"new_qubit_map",
"[",
"i",
"]",
",",
"new_qubit_map",
"[",
"i",
"-",
"1",
"]",
"elif",
"j",
"<",
"k",
":",
"# swap j left to k, until j at ind (k) and k at ind (k-1)",
"for",
"i",
"in",
"range",
"(",
"j",
",",
"k",
",",
"1",
")",
":",
"perm",
"=",
"qubit_adjacent_lifted_gate",
"(",
"i",
",",
"SWAP",
",",
"num_qubits",
")",
".",
"dot",
"(",
"perm",
")",
"new_qubit_map",
"[",
"i",
"]",
",",
"new_qubit_map",
"[",
"i",
"+",
"1",
"]",
"=",
"new_qubit_map",
"[",
"i",
"+",
"1",
"]",
",",
"new_qubit_map",
"[",
"i",
"]",
"return",
"perm",
",",
"new_qubit_map"
] | 39.869565 | 0.002129 | [
"def two_swap_helper(j, k, num_qubits, qubit_map):\n",
" \"\"\"\n",
" Generate the permutation matrix that permutes two single-particle Hilbert\n",
" spaces into adjacent positions.\n",
"\n",
" ALWAYS swaps j TO k. Recall that Hilbert spaces are ordered in decreasing\n",
" qubit index order. Hence, j > k implies that j is to the left of k.\n",
"\n",
" End results:\n",
" j == k: nothing happens\n",
" j > k: Swap j right to k, until j at ind (k) and k at ind (k+1).\n",
" j < k: Swap j left to k, until j at ind (k) and k at ind (k-1).\n",
"\n",
" Done in preparation for arbitrary 2-qubit gate application on ADJACENT\n",
" qubits.\n",
"\n",
" :param int j: starting qubit index\n",
" :param int k: ending qubit index\n",
" :param int num_qubits: number of qubits in Hilbert space\n",
" :param np.array qubit_map: current index mapping of qubits\n",
"\n",
" :return: tuple of swap matrix for the specified permutation,\n",
" and the new qubit_map, after permutation is made\n",
" :rtype: tuple (np.array, np.array)\n",
" \"\"\"\n",
" if not (0 <= j < num_qubits and 0 <= k < num_qubits):\n",
" raise ValueError(\"Permutation SWAP index not valid\")\n",
"\n",
" perm = np.eye(2 ** num_qubits, dtype=np.complex128)\n",
" new_qubit_map = np.copy(qubit_map)\n",
"\n",
" if j == k:\n",
" # nothing happens\n",
" return perm, new_qubit_map\n",
" elif j > k:\n",
" # swap j right to k, until j at ind (k) and k at ind (k+1)\n",
" for i in range(j, k, -1):\n",
" perm = qubit_adjacent_lifted_gate(i - 1, SWAP, num_qubits).dot(perm)\n",
" new_qubit_map[i - 1], new_qubit_map[i] = new_qubit_map[i], new_qubit_map[i - 1]\n",
" elif j < k:\n",
" # swap j left to k, until j at ind (k) and k at ind (k-1)\n",
" for i in range(j, k, 1):\n",
" perm = qubit_adjacent_lifted_gate(i, SWAP, num_qubits).dot(perm)\n",
" new_qubit_map[i], new_qubit_map[i + 1] = new_qubit_map[i + 1], new_qubit_map[i]\n",
"\n",
" return perm, new_qubit_map"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.012345679012345678,
0.010869565217391304,
0,
0,
0,
0,
0.010869565217391304,
0,
0.03333333333333333
] | 46 | 0.001466 |
def clean_code(code, comments=True, macros=False, pragmas=False):
"""
Naive comment and macro striping from source code
:param comments: If True, all comments are stripped from code
:param macros: If True, all macros are stripped from code
:param pragmas: If True, all pragmas are stripped from code
:return: cleaned code. Line numbers are preserved with blank lines,
and multiline comments and macros are supported. BUT comment-like
strings are (wrongfully) treated as comments.
"""
if macros or pragmas:
lines = code.split('\n')
in_macro = False
in_pragma = False
for i in range(len(lines)):
l = lines[i].strip()
if macros and (l.startswith('#') and not l.startswith('#pragma') or in_macro):
lines[i] = ''
in_macro = l.endswith('\\')
if pragmas and (l.startswith('#pragma') or in_pragma):
lines[i] = ''
in_pragma = l.endswith('\\')
code = '\n'.join(lines)
if comments:
idx = 0
comment_start = None
while idx < len(code) - 1:
if comment_start is None and code[idx:idx + 2] == '//':
end_idx = code.find('\n', idx)
code = code[:idx] + code[end_idx:]
idx -= end_idx - idx
elif comment_start is None and code[idx:idx + 2] == '/*':
comment_start = idx
elif comment_start is not None and code[idx:idx + 2] == '*/':
code = (code[:comment_start] +
'\n' * code[comment_start:idx].count('\n') +
code[idx + 2:])
idx -= idx - comment_start
comment_start = None
idx += 1
return code | [
"def",
"clean_code",
"(",
"code",
",",
"comments",
"=",
"True",
",",
"macros",
"=",
"False",
",",
"pragmas",
"=",
"False",
")",
":",
"if",
"macros",
"or",
"pragmas",
":",
"lines",
"=",
"code",
".",
"split",
"(",
"'\\n'",
")",
"in_macro",
"=",
"False",
"in_pragma",
"=",
"False",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"lines",
")",
")",
":",
"l",
"=",
"lines",
"[",
"i",
"]",
".",
"strip",
"(",
")",
"if",
"macros",
"and",
"(",
"l",
".",
"startswith",
"(",
"'#'",
")",
"and",
"not",
"l",
".",
"startswith",
"(",
"'#pragma'",
")",
"or",
"in_macro",
")",
":",
"lines",
"[",
"i",
"]",
"=",
"''",
"in_macro",
"=",
"l",
".",
"endswith",
"(",
"'\\\\'",
")",
"if",
"pragmas",
"and",
"(",
"l",
".",
"startswith",
"(",
"'#pragma'",
")",
"or",
"in_pragma",
")",
":",
"lines",
"[",
"i",
"]",
"=",
"''",
"in_pragma",
"=",
"l",
".",
"endswith",
"(",
"'\\\\'",
")",
"code",
"=",
"'\\n'",
".",
"join",
"(",
"lines",
")",
"if",
"comments",
":",
"idx",
"=",
"0",
"comment_start",
"=",
"None",
"while",
"idx",
"<",
"len",
"(",
"code",
")",
"-",
"1",
":",
"if",
"comment_start",
"is",
"None",
"and",
"code",
"[",
"idx",
":",
"idx",
"+",
"2",
"]",
"==",
"'//'",
":",
"end_idx",
"=",
"code",
".",
"find",
"(",
"'\\n'",
",",
"idx",
")",
"code",
"=",
"code",
"[",
":",
"idx",
"]",
"+",
"code",
"[",
"end_idx",
":",
"]",
"idx",
"-=",
"end_idx",
"-",
"idx",
"elif",
"comment_start",
"is",
"None",
"and",
"code",
"[",
"idx",
":",
"idx",
"+",
"2",
"]",
"==",
"'/*'",
":",
"comment_start",
"=",
"idx",
"elif",
"comment_start",
"is",
"not",
"None",
"and",
"code",
"[",
"idx",
":",
"idx",
"+",
"2",
"]",
"==",
"'*/'",
":",
"code",
"=",
"(",
"code",
"[",
":",
"comment_start",
"]",
"+",
"'\\n'",
"*",
"code",
"[",
"comment_start",
":",
"idx",
"]",
".",
"count",
"(",
"'\\n'",
")",
"+",
"code",
"[",
"idx",
"+",
"2",
":",
"]",
")",
"idx",
"-=",
"idx",
"-",
"comment_start",
"comment_start",
"=",
"None",
"idx",
"+=",
"1",
"return",
"code"
] | 38.043478 | 0.001671 | [
"def clean_code(code, comments=True, macros=False, pragmas=False):\n",
" \"\"\"\n",
" Naive comment and macro striping from source code\n",
"\n",
" :param comments: If True, all comments are stripped from code\n",
" :param macros: If True, all macros are stripped from code\n",
" :param pragmas: If True, all pragmas are stripped from code\n",
"\n",
" :return: cleaned code. Line numbers are preserved with blank lines,\n",
" and multiline comments and macros are supported. BUT comment-like\n",
" strings are (wrongfully) treated as comments.\n",
" \"\"\"\n",
" if macros or pragmas:\n",
" lines = code.split('\\n')\n",
" in_macro = False\n",
" in_pragma = False\n",
" for i in range(len(lines)):\n",
" l = lines[i].strip()\n",
"\n",
" if macros and (l.startswith('#') and not l.startswith('#pragma') or in_macro):\n",
" lines[i] = ''\n",
" in_macro = l.endswith('\\\\')\n",
" if pragmas and (l.startswith('#pragma') or in_pragma):\n",
" lines[i] = ''\n",
" in_pragma = l.endswith('\\\\')\n",
" code = '\\n'.join(lines)\n",
"\n",
" if comments:\n",
" idx = 0\n",
" comment_start = None\n",
" while idx < len(code) - 1:\n",
" if comment_start is None and code[idx:idx + 2] == '//':\n",
" end_idx = code.find('\\n', idx)\n",
" code = code[:idx] + code[end_idx:]\n",
" idx -= end_idx - idx\n",
" elif comment_start is None and code[idx:idx + 2] == '/*':\n",
" comment_start = idx\n",
" elif comment_start is not None and code[idx:idx + 2] == '*/':\n",
" code = (code[:comment_start] +\n",
" '\\n' * code[comment_start:idx].count('\\n') +\n",
" code[idx + 2:])\n",
" idx -= idx - comment_start\n",
" comment_start = None\n",
" idx += 1\n",
"\n",
" return code"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.030303030303030304,
0,
0.01098901098901099,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.06666666666666667
] | 46 | 0.002347 |
def get_by_identifier(self, identifier):
"""Gets blocks by identifier
Args:
identifier (str): Should be any of: username, phone_number, email.
See: https://auth0.com/docs/api/management/v2#!/User_Blocks/get_user_blocks
"""
params = {'identifier': identifier}
return self.client.get(self._url(), params=params) | [
"def",
"get_by_identifier",
"(",
"self",
",",
"identifier",
")",
":",
"params",
"=",
"{",
"'identifier'",
":",
"identifier",
"}",
"return",
"self",
".",
"client",
".",
"get",
"(",
"self",
".",
"_url",
"(",
")",
",",
"params",
"=",
"params",
")"
] | 30.083333 | 0.008065 | [
"def get_by_identifier(self, identifier):\n",
" \"\"\"Gets blocks by identifier\n",
"\n",
" Args:\n",
" identifier (str): Should be any of: username, phone_number, email.\n",
"\n",
" See: https://auth0.com/docs/api/management/v2#!/User_Blocks/get_user_blocks\n",
" \"\"\"\n",
"\n",
" params = {'identifier': identifier}\n",
"\n",
" return self.client.get(self._url(), params=params)"
] | [
0,
0.02702702702702703,
0,
0,
0,
0,
0.011904761904761904,
0,
0,
0,
0,
0.017241379310344827
] | 12 | 0.004681 |
def new_workunit(self, name, labels=None, cmd='', log_config=None):
"""Creates a (hierarchical) subunit of work for the purpose of timing and reporting.
- name: A short name for this work. E.g., 'resolve', 'compile', 'scala', 'zinc'.
- labels: An optional iterable of labels. The reporters can use this to decide how to
display information about this work.
- cmd: An optional longer string representing this work.
E.g., the cmd line of a compiler invocation.
- log_config: An optional tuple WorkUnit.LogConfig of task-level options affecting reporting.
Use like this:
with run_tracker.new_workunit(name='compile', labels=[WorkUnitLabel.TASK]) as workunit:
<do scoped work here>
<set the outcome on workunit if necessary>
Note that the outcome will automatically be set to failure if an exception is raised
in a workunit, and to success otherwise, so usually you only need to set the
outcome explicitly if you want to set it to warning.
:API: public
"""
parent = self._threadlocal.current_workunit
with self.new_workunit_under_parent(name, parent=parent, labels=labels, cmd=cmd,
log_config=log_config) as workunit:
self._threadlocal.current_workunit = workunit
try:
yield workunit
finally:
self._threadlocal.current_workunit = parent | [
"def",
"new_workunit",
"(",
"self",
",",
"name",
",",
"labels",
"=",
"None",
",",
"cmd",
"=",
"''",
",",
"log_config",
"=",
"None",
")",
":",
"parent",
"=",
"self",
".",
"_threadlocal",
".",
"current_workunit",
"with",
"self",
".",
"new_workunit_under_parent",
"(",
"name",
",",
"parent",
"=",
"parent",
",",
"labels",
"=",
"labels",
",",
"cmd",
"=",
"cmd",
",",
"log_config",
"=",
"log_config",
")",
"as",
"workunit",
":",
"self",
".",
"_threadlocal",
".",
"current_workunit",
"=",
"workunit",
"try",
":",
"yield",
"workunit",
"finally",
":",
"self",
".",
"_threadlocal",
".",
"current_workunit",
"=",
"parent"
] | 45.833333 | 0.008547 | [
"def new_workunit(self, name, labels=None, cmd='', log_config=None):\n",
" \"\"\"Creates a (hierarchical) subunit of work for the purpose of timing and reporting.\n",
"\n",
" - name: A short name for this work. E.g., 'resolve', 'compile', 'scala', 'zinc'.\n",
" - labels: An optional iterable of labels. The reporters can use this to decide how to\n",
" display information about this work.\n",
" - cmd: An optional longer string representing this work.\n",
" E.g., the cmd line of a compiler invocation.\n",
" - log_config: An optional tuple WorkUnit.LogConfig of task-level options affecting reporting.\n",
"\n",
" Use like this:\n",
"\n",
" with run_tracker.new_workunit(name='compile', labels=[WorkUnitLabel.TASK]) as workunit:\n",
" <do scoped work here>\n",
" <set the outcome on workunit if necessary>\n",
"\n",
" Note that the outcome will automatically be set to failure if an exception is raised\n",
" in a workunit, and to success otherwise, so usually you only need to set the\n",
" outcome explicitly if you want to set it to warning.\n",
"\n",
" :API: public\n",
" \"\"\"\n",
" parent = self._threadlocal.current_workunit\n",
" with self.new_workunit_under_parent(name, parent=parent, labels=labels, cmd=cmd,\n",
" log_config=log_config) as workunit:\n",
" self._threadlocal.current_workunit = workunit\n",
" try:\n",
" yield workunit\n",
" finally:\n",
" self._threadlocal.current_workunit = parent"
] | [
0,
0.011235955056179775,
0,
0.011764705882352941,
0.011111111111111112,
0,
0,
0,
0.01020408163265306,
0,
0,
0,
0.010869565217391304,
0,
0,
0,
0.011235955056179775,
0.012345679012345678,
0,
0,
0,
0,
0,
0.011764705882352941,
0,
0.019230769230769232,
0.09090909090909091,
0,
0.06666666666666667,
0.0196078431372549
] | 30 | 0.009565 |
def start_host(session=None):
"""Promote the current process into python plugin host for Nvim.
Start msgpack-rpc event loop for `session`, listening for Nvim requests
and notifications. It registers Nvim commands for loading/unloading
python plugins.
The sys.stdout and sys.stderr streams are redirected to Nvim through
`session`. That means print statements probably won't work as expected
while this function doesn't return.
This function is normally called at program startup and could have been
defined as a separate executable. It is exposed as a library function for
testing purposes only.
"""
plugins = []
for arg in sys.argv:
_, ext = os.path.splitext(arg)
if ext == '.py':
plugins.append(arg)
elif os.path.isdir(arg):
init = os.path.join(arg, '__init__.py')
if os.path.isfile(init):
plugins.append(arg)
# This is a special case to support the old workaround of
# adding an empty .py file to make a package directory
# visible, and it should be removed soon.
for path in list(plugins):
dup = path + ".py"
if os.path.isdir(path) and dup in plugins:
plugins.remove(dup)
# Special case: the legacy scripthost receives a single relative filename
# while the rplugin host will receive absolute paths.
if plugins == ["script_host.py"]:
name = "script"
else:
name = "rplugin"
setup_logging(name)
if not session:
session = stdio_session()
nvim = Nvim.from_session(session)
if nvim.version.api_level < 1:
sys.stderr.write("This version of pynvim "
"requires nvim 0.1.6 or later")
sys.exit(1)
host = Host(nvim)
host.start(plugins) | [
"def",
"start_host",
"(",
"session",
"=",
"None",
")",
":",
"plugins",
"=",
"[",
"]",
"for",
"arg",
"in",
"sys",
".",
"argv",
":",
"_",
",",
"ext",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"arg",
")",
"if",
"ext",
"==",
"'.py'",
":",
"plugins",
".",
"append",
"(",
"arg",
")",
"elif",
"os",
".",
"path",
".",
"isdir",
"(",
"arg",
")",
":",
"init",
"=",
"os",
".",
"path",
".",
"join",
"(",
"arg",
",",
"'__init__.py'",
")",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"init",
")",
":",
"plugins",
".",
"append",
"(",
"arg",
")",
"# This is a special case to support the old workaround of",
"# adding an empty .py file to make a package directory",
"# visible, and it should be removed soon.",
"for",
"path",
"in",
"list",
"(",
"plugins",
")",
":",
"dup",
"=",
"path",
"+",
"\".py\"",
"if",
"os",
".",
"path",
".",
"isdir",
"(",
"path",
")",
"and",
"dup",
"in",
"plugins",
":",
"plugins",
".",
"remove",
"(",
"dup",
")",
"# Special case: the legacy scripthost receives a single relative filename",
"# while the rplugin host will receive absolute paths.",
"if",
"plugins",
"==",
"[",
"\"script_host.py\"",
"]",
":",
"name",
"=",
"\"script\"",
"else",
":",
"name",
"=",
"\"rplugin\"",
"setup_logging",
"(",
"name",
")",
"if",
"not",
"session",
":",
"session",
"=",
"stdio_session",
"(",
")",
"nvim",
"=",
"Nvim",
".",
"from_session",
"(",
"session",
")",
"if",
"nvim",
".",
"version",
".",
"api_level",
"<",
"1",
":",
"sys",
".",
"stderr",
".",
"write",
"(",
"\"This version of pynvim \"",
"\"requires nvim 0.1.6 or later\"",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"host",
"=",
"Host",
"(",
"nvim",
")",
"host",
".",
"start",
"(",
"plugins",
")"
] | 33.245283 | 0.000551 | [
"def start_host(session=None):\n",
" \"\"\"Promote the current process into python plugin host for Nvim.\n",
"\n",
" Start msgpack-rpc event loop for `session`, listening for Nvim requests\n",
" and notifications. It registers Nvim commands for loading/unloading\n",
" python plugins.\n",
"\n",
" The sys.stdout and sys.stderr streams are redirected to Nvim through\n",
" `session`. That means print statements probably won't work as expected\n",
" while this function doesn't return.\n",
"\n",
" This function is normally called at program startup and could have been\n",
" defined as a separate executable. It is exposed as a library function for\n",
" testing purposes only.\n",
" \"\"\"\n",
" plugins = []\n",
" for arg in sys.argv:\n",
" _, ext = os.path.splitext(arg)\n",
" if ext == '.py':\n",
" plugins.append(arg)\n",
" elif os.path.isdir(arg):\n",
" init = os.path.join(arg, '__init__.py')\n",
" if os.path.isfile(init):\n",
" plugins.append(arg)\n",
"\n",
" # This is a special case to support the old workaround of\n",
" # adding an empty .py file to make a package directory\n",
" # visible, and it should be removed soon.\n",
" for path in list(plugins):\n",
" dup = path + \".py\"\n",
" if os.path.isdir(path) and dup in plugins:\n",
" plugins.remove(dup)\n",
"\n",
" # Special case: the legacy scripthost receives a single relative filename\n",
" # while the rplugin host will receive absolute paths.\n",
" if plugins == [\"script_host.py\"]:\n",
" name = \"script\"\n",
" else:\n",
" name = \"rplugin\"\n",
"\n",
" setup_logging(name)\n",
"\n",
" if not session:\n",
" session = stdio_session()\n",
" nvim = Nvim.from_session(session)\n",
"\n",
" if nvim.version.api_level < 1:\n",
" sys.stderr.write(\"This version of pynvim \"\n",
" \"requires nvim 0.1.6 or later\")\n",
" sys.exit(1)\n",
"\n",
" host = Host(nvim)\n",
" host.start(plugins)"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.043478260869565216
] | 53 | 0.00082 |
def getBackgroundRange(fitParams):
'''
return minimum, average, maximum of the background peak
'''
smn, _, _ = getSignalParameters(fitParams)
bg = fitParams[0]
_, avg, std = bg
bgmn = max(0, avg - 3 * std)
if avg + 4 * std < smn:
bgmx = avg + 4 * std
if avg + 3 * std < smn:
bgmx = avg + 3 * std
if avg + 2 * std < smn:
bgmx = avg + 2 * std
else:
bgmx = avg + std
return bgmn, avg, bgmx | [
"def",
"getBackgroundRange",
"(",
"fitParams",
")",
":",
"smn",
",",
"_",
",",
"_",
"=",
"getSignalParameters",
"(",
"fitParams",
")",
"bg",
"=",
"fitParams",
"[",
"0",
"]",
"_",
",",
"avg",
",",
"std",
"=",
"bg",
"bgmn",
"=",
"max",
"(",
"0",
",",
"avg",
"-",
"3",
"*",
"std",
")",
"if",
"avg",
"+",
"4",
"*",
"std",
"<",
"smn",
":",
"bgmx",
"=",
"avg",
"+",
"4",
"*",
"std",
"if",
"avg",
"+",
"3",
"*",
"std",
"<",
"smn",
":",
"bgmx",
"=",
"avg",
"+",
"3",
"*",
"std",
"if",
"avg",
"+",
"2",
"*",
"std",
"<",
"smn",
":",
"bgmx",
"=",
"avg",
"+",
"2",
"*",
"std",
"else",
":",
"bgmx",
"=",
"avg",
"+",
"std",
"return",
"bgmn",
",",
"avg",
",",
"bgmx"
] | 24.631579 | 0.002058 | [
"def getBackgroundRange(fitParams):\r\n",
" '''\r\n",
" return minimum, average, maximum of the background peak\r\n",
" '''\r\n",
" smn, _, _ = getSignalParameters(fitParams)\r\n",
"\r\n",
" bg = fitParams[0]\r\n",
" _, avg, std = bg\r\n",
" bgmn = max(0, avg - 3 * std)\r\n",
"\r\n",
" if avg + 4 * std < smn:\r\n",
" bgmx = avg + 4 * std\r\n",
" if avg + 3 * std < smn:\r\n",
" bgmx = avg + 3 * std\r\n",
" if avg + 2 * std < smn:\r\n",
" bgmx = avg + 2 * std\r\n",
" else:\r\n",
" bgmx = avg + std\r\n",
" return bgmn, avg, bgmx"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.038461538461538464
] | 19 | 0.002024 |
def cmd(send, msg, args):
"""Gets a definition from urban dictionary.
Syntax: {command} <[#<num>] <term>|--blacklist (word)|--unblacklist (word)>
"""
key = args['config']['api']['bitlykey']
parser = arguments.ArgParser(args['config'])
parser.add_argument('--blacklist')
parser.add_argument('--unblacklist')
try:
cmdargs, msg = parser.parse_known_args(msg)
msg = ' '.join(msg)
except arguments.ArgumentException as e:
send(str(e))
return
if cmdargs.blacklist:
if args['is_admin'](args['nick']):
send(blacklist_word(args['db'], cmdargs.blacklist))
else:
send("Blacklisting is admin-only")
elif cmdargs.unblacklist:
if args['is_admin'](args['nick']):
send(unblacklist_word(args['db'], cmdargs.unblacklist))
else:
send("Unblacklisting is admin-only")
else:
defn, url = get_urban(msg, args['db'], key)
send(defn)
if url:
send("See full definition at %s" % url) | [
"def",
"cmd",
"(",
"send",
",",
"msg",
",",
"args",
")",
":",
"key",
"=",
"args",
"[",
"'config'",
"]",
"[",
"'api'",
"]",
"[",
"'bitlykey'",
"]",
"parser",
"=",
"arguments",
".",
"ArgParser",
"(",
"args",
"[",
"'config'",
"]",
")",
"parser",
".",
"add_argument",
"(",
"'--blacklist'",
")",
"parser",
".",
"add_argument",
"(",
"'--unblacklist'",
")",
"try",
":",
"cmdargs",
",",
"msg",
"=",
"parser",
".",
"parse_known_args",
"(",
"msg",
")",
"msg",
"=",
"' '",
".",
"join",
"(",
"msg",
")",
"except",
"arguments",
".",
"ArgumentException",
"as",
"e",
":",
"send",
"(",
"str",
"(",
"e",
")",
")",
"return",
"if",
"cmdargs",
".",
"blacklist",
":",
"if",
"args",
"[",
"'is_admin'",
"]",
"(",
"args",
"[",
"'nick'",
"]",
")",
":",
"send",
"(",
"blacklist_word",
"(",
"args",
"[",
"'db'",
"]",
",",
"cmdargs",
".",
"blacklist",
")",
")",
"else",
":",
"send",
"(",
"\"Blacklisting is admin-only\"",
")",
"elif",
"cmdargs",
".",
"unblacklist",
":",
"if",
"args",
"[",
"'is_admin'",
"]",
"(",
"args",
"[",
"'nick'",
"]",
")",
":",
"send",
"(",
"unblacklist_word",
"(",
"args",
"[",
"'db'",
"]",
",",
"cmdargs",
".",
"unblacklist",
")",
")",
"else",
":",
"send",
"(",
"\"Unblacklisting is admin-only\"",
")",
"else",
":",
"defn",
",",
"url",
"=",
"get_urban",
"(",
"msg",
",",
"args",
"[",
"'db'",
"]",
",",
"key",
")",
"send",
"(",
"defn",
")",
"if",
"url",
":",
"send",
"(",
"\"See full definition at %s\"",
"%",
"url",
")"
] | 31.96875 | 0.000949 | [
"def cmd(send, msg, args):\n",
" \"\"\"Gets a definition from urban dictionary.\n",
"\n",
" Syntax: {command} <[#<num>] <term>|--blacklist (word)|--unblacklist (word)>\n",
"\n",
" \"\"\"\n",
" key = args['config']['api']['bitlykey']\n",
" parser = arguments.ArgParser(args['config'])\n",
" parser.add_argument('--blacklist')\n",
" parser.add_argument('--unblacklist')\n",
"\n",
" try:\n",
" cmdargs, msg = parser.parse_known_args(msg)\n",
" msg = ' '.join(msg)\n",
" except arguments.ArgumentException as e:\n",
" send(str(e))\n",
" return\n",
" if cmdargs.blacklist:\n",
" if args['is_admin'](args['nick']):\n",
" send(blacklist_word(args['db'], cmdargs.blacklist))\n",
" else:\n",
" send(\"Blacklisting is admin-only\")\n",
" elif cmdargs.unblacklist:\n",
" if args['is_admin'](args['nick']):\n",
" send(unblacklist_word(args['db'], cmdargs.unblacklist))\n",
" else:\n",
" send(\"Unblacklisting is admin-only\")\n",
" else:\n",
" defn, url = get_urban(msg, args['db'], key)\n",
" send(defn)\n",
" if url:\n",
" send(\"See full definition at %s\" % url)"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.0196078431372549
] | 32 | 0.000613 |
def computePWCorrelations(spikeTrains, removeAutoCorr):
"""
Computes pairwise correlations from spikeTrains
@param spikeTrains (array) spike trains obtained from the activation of cells in the TM
the array dimensions are: numCells x timeSteps
@param removeAutoCorr (boolean) if true, auto-correlations are removed by substracting
the diagonal of the correlation matrix
@return corrMatrix (array) numCells x numCells matrix containing the Pearson correlation
coefficient of spike trains of cell i and cell j
@return numNegPCC (int) number of negative pairwise correlations (PCC(i,j) < 0)
"""
numCells = np.shape(spikeTrains)[0]
corrMatrix = np.zeros((numCells, numCells))
numNegPCC = 0
for i in range(numCells):
for j in range(numCells):
if i == j and removeAutoCorr == True:
continue
if not all(spikeTrains[i,:] == 0) and not all(spikeTrains[j,:] == 0):
corrMatrix[i,j] = np.corrcoef(spikeTrains[i,:], spikeTrains[j,:])[0,1]
if corrMatrix[i,j] < 0:
numNegPCC += 1
return (corrMatrix, numNegPCC) | [
"def",
"computePWCorrelations",
"(",
"spikeTrains",
",",
"removeAutoCorr",
")",
":",
"numCells",
"=",
"np",
".",
"shape",
"(",
"spikeTrains",
")",
"[",
"0",
"]",
"corrMatrix",
"=",
"np",
".",
"zeros",
"(",
"(",
"numCells",
",",
"numCells",
")",
")",
"numNegPCC",
"=",
"0",
"for",
"i",
"in",
"range",
"(",
"numCells",
")",
":",
"for",
"j",
"in",
"range",
"(",
"numCells",
")",
":",
"if",
"i",
"==",
"j",
"and",
"removeAutoCorr",
"==",
"True",
":",
"continue",
"if",
"not",
"all",
"(",
"spikeTrains",
"[",
"i",
",",
":",
"]",
"==",
"0",
")",
"and",
"not",
"all",
"(",
"spikeTrains",
"[",
"j",
",",
":",
"]",
"==",
"0",
")",
":",
"corrMatrix",
"[",
"i",
",",
"j",
"]",
"=",
"np",
".",
"corrcoef",
"(",
"spikeTrains",
"[",
"i",
",",
":",
"]",
",",
"spikeTrains",
"[",
"j",
",",
":",
"]",
")",
"[",
"0",
",",
"1",
"]",
"if",
"corrMatrix",
"[",
"i",
",",
"j",
"]",
"<",
"0",
":",
"numNegPCC",
"+=",
"1",
"return",
"(",
"corrMatrix",
",",
"numNegPCC",
")"
] | 45.375 | 0.022482 | [
"def computePWCorrelations(spikeTrains, removeAutoCorr):\n",
" \"\"\"\n",
" Computes pairwise correlations from spikeTrains\n",
" \n",
" @param spikeTrains (array) spike trains obtained from the activation of cells in the TM\n",
" the array dimensions are: numCells x timeSteps\n",
" @param removeAutoCorr (boolean) if true, auto-correlations are removed by substracting\n",
" the diagonal of the correlation matrix \n",
" @return corrMatrix (array) numCells x numCells matrix containing the Pearson correlation\n",
" coefficient of spike trains of cell i and cell j\n",
" @return numNegPCC (int) number of negative pairwise correlations (PCC(i,j) < 0)\n",
" \"\"\"\n",
" numCells = np.shape(spikeTrains)[0]\n",
" corrMatrix = np.zeros((numCells, numCells))\n",
" numNegPCC = 0\n",
" for i in range(numCells):\n",
" for j in range(numCells):\n",
" if i == j and removeAutoCorr == True:\n",
" continue\n",
" if not all(spikeTrains[i,:] == 0) and not all(spikeTrains[j,:] == 0):\n",
" corrMatrix[i,j] = np.corrcoef(spikeTrains[i,:], spikeTrains[j,:])[0,1] \n",
" if corrMatrix[i,j] < 0:\n",
" numNegPCC += 1\n",
" return (corrMatrix, numNegPCC)"
] | [
0,
0.16666666666666666,
0,
0.3333333333333333,
0.011111111111111112,
0,
0.011235955056179775,
0.017543859649122806,
0.01098901098901099,
0,
0.012195121951219513,
0,
0.02631578947368421,
0.021739130434782608,
0.0625,
0.03571428571428571,
0,
0.045454545454545456,
0,
0.039473684210526314,
0.060240963855421686,
0.03125,
0.04,
0.0625
] | 24 | 0.041178 |
def run(file, access_key, secret_key, **kwargs):
"""命令行运行huobitrade"""
if file:
import sys
file_path, file_name = os.path.split(file)
sys.path.append(file_path)
strategy_module = importlib.import_module(os.path.splitext(file_name)[0])
init = getattr(strategy_module, 'init', None)
handle_func = getattr(strategy_module, 'handle_func', None)
schedule = getattr(strategy_module, 'schedule', None)
else:
init, handle_func, scedule = [None] * 3
setKey(access_key, secret_key)
url = kwargs.get('url')
hostname = 'api.huobi.br.com'
if url:
hostname = urlparse(url).hostname
setUrl('https://' + hostname, 'https://' + hostname)
reconn = kwargs.get('reconn', -1)
from huobitrade import HBWebsocket, HBRestAPI
from huobitrade.datatype import HBMarket, HBAccount, HBMargin
restapi = HBRestAPI(get_acc=True)
ws = HBWebsocket(host=hostname, reconn=reconn)
auth_ws = HBWebsocket(host=hostname, auth=True, reconn=reconn)
data = HBMarket()
account = HBAccount()
margin = HBMargin()
ws_open = False
ws_auth = False
@ws.after_open
def _open():
nonlocal ws_open
click.echo('行情接口连接成功')
ws_open = True
@auth_ws.after_auth
def _auth():
nonlocal ws_auth
click.echo('鉴权接口鉴权成功')
ws_auth = True
ws.run()
auth_ws.run()
for i in range(10):
time.sleep(3)
click.echo(f'连接:第{i+1}次连接')
if ws_open&ws_auth:
break
else:
ws.stop()
auth_ws.stop()
raise Exception('连接失败')
if init:
init(restapi, ws, auth_ws)
if handle_func:
for k, v in handle_func.items():
if k.split('.')[0].lower() == 'market':
ws.register_handle_func(k)(v)
else:
auth_ws.register_handle_func(k)(v)
if schedule:
print('testing')
from huobitrade.handler import TimeHandler
interval = scedule.__kwdefaults__['interval']
timerhandler = TimeHandler('scheduler', interval)
timerhandler.handle = lambda msg: schedule(restapi, ws, auth_ws)
timerhandler.start()
while True:
try:
code = click.prompt('huobitrade>>')
if code == 'exit':
if click.confirm('是否要退出huobitrade'):
break
else:
continue
else:
result = eval(code)
click.echo(result)
except Exception as e:
click.echo(traceback.format_exc())
ws.stop()
auth_ws.stop() | [
"def",
"run",
"(",
"file",
",",
"access_key",
",",
"secret_key",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"file",
":",
"import",
"sys",
"file_path",
",",
"file_name",
"=",
"os",
".",
"path",
".",
"split",
"(",
"file",
")",
"sys",
".",
"path",
".",
"append",
"(",
"file_path",
")",
"strategy_module",
"=",
"importlib",
".",
"import_module",
"(",
"os",
".",
"path",
".",
"splitext",
"(",
"file_name",
")",
"[",
"0",
"]",
")",
"init",
"=",
"getattr",
"(",
"strategy_module",
",",
"'init'",
",",
"None",
")",
"handle_func",
"=",
"getattr",
"(",
"strategy_module",
",",
"'handle_func'",
",",
"None",
")",
"schedule",
"=",
"getattr",
"(",
"strategy_module",
",",
"'schedule'",
",",
"None",
")",
"else",
":",
"init",
",",
"handle_func",
",",
"scedule",
"=",
"[",
"None",
"]",
"*",
"3",
"setKey",
"(",
"access_key",
",",
"secret_key",
")",
"url",
"=",
"kwargs",
".",
"get",
"(",
"'url'",
")",
"hostname",
"=",
"'api.huobi.br.com'",
"if",
"url",
":",
"hostname",
"=",
"urlparse",
"(",
"url",
")",
".",
"hostname",
"setUrl",
"(",
"'https://'",
"+",
"hostname",
",",
"'https://'",
"+",
"hostname",
")",
"reconn",
"=",
"kwargs",
".",
"get",
"(",
"'reconn'",
",",
"-",
"1",
")",
"from",
"huobitrade",
"import",
"HBWebsocket",
",",
"HBRestAPI",
"from",
"huobitrade",
".",
"datatype",
"import",
"HBMarket",
",",
"HBAccount",
",",
"HBMargin",
"restapi",
"=",
"HBRestAPI",
"(",
"get_acc",
"=",
"True",
")",
"ws",
"=",
"HBWebsocket",
"(",
"host",
"=",
"hostname",
",",
"reconn",
"=",
"reconn",
")",
"auth_ws",
"=",
"HBWebsocket",
"(",
"host",
"=",
"hostname",
",",
"auth",
"=",
"True",
",",
"reconn",
"=",
"reconn",
")",
"data",
"=",
"HBMarket",
"(",
")",
"account",
"=",
"HBAccount",
"(",
")",
"margin",
"=",
"HBMargin",
"(",
")",
"ws_open",
"=",
"False",
"ws_auth",
"=",
"False",
"@",
"ws",
".",
"after_open",
"def",
"_open",
"(",
")",
":",
"nonlocal",
"ws_open",
"click",
".",
"echo",
"(",
"'行情接口连接成功')",
"",
"ws_open",
"=",
"True",
"@",
"auth_ws",
".",
"after_auth",
"def",
"_auth",
"(",
")",
":",
"nonlocal",
"ws_auth",
"click",
".",
"echo",
"(",
"'鉴权接口鉴权成功')",
"",
"ws_auth",
"=",
"True",
"ws",
".",
"run",
"(",
")",
"auth_ws",
".",
"run",
"(",
")",
"for",
"i",
"in",
"range",
"(",
"10",
")",
":",
"time",
".",
"sleep",
"(",
"3",
")",
"click",
".",
"echo",
"(",
"f'连接:第{i+1}次连接')",
"",
"if",
"ws_open",
"&",
"ws_auth",
":",
"break",
"else",
":",
"ws",
".",
"stop",
"(",
")",
"auth_ws",
".",
"stop",
"(",
")",
"raise",
"Exception",
"(",
"'连接失败')",
"",
"if",
"init",
":",
"init",
"(",
"restapi",
",",
"ws",
",",
"auth_ws",
")",
"if",
"handle_func",
":",
"for",
"k",
",",
"v",
"in",
"handle_func",
".",
"items",
"(",
")",
":",
"if",
"k",
".",
"split",
"(",
"'.'",
")",
"[",
"0",
"]",
".",
"lower",
"(",
")",
"==",
"'market'",
":",
"ws",
".",
"register_handle_func",
"(",
"k",
")",
"(",
"v",
")",
"else",
":",
"auth_ws",
".",
"register_handle_func",
"(",
"k",
")",
"(",
"v",
")",
"if",
"schedule",
":",
"print",
"(",
"'testing'",
")",
"from",
"huobitrade",
".",
"handler",
"import",
"TimeHandler",
"interval",
"=",
"scedule",
".",
"__kwdefaults__",
"[",
"'interval'",
"]",
"timerhandler",
"=",
"TimeHandler",
"(",
"'scheduler'",
",",
"interval",
")",
"timerhandler",
".",
"handle",
"=",
"lambda",
"msg",
":",
"schedule",
"(",
"restapi",
",",
"ws",
",",
"auth_ws",
")",
"timerhandler",
".",
"start",
"(",
")",
"while",
"True",
":",
"try",
":",
"code",
"=",
"click",
".",
"prompt",
"(",
"'huobitrade>>'",
")",
"if",
"code",
"==",
"'exit'",
":",
"if",
"click",
".",
"confirm",
"(",
"'是否要退出huobitrade'):",
"",
"",
"break",
"else",
":",
"continue",
"else",
":",
"result",
"=",
"eval",
"(",
"code",
")",
"click",
".",
"echo",
"(",
"result",
")",
"except",
"Exception",
"as",
"e",
":",
"click",
".",
"echo",
"(",
"traceback",
".",
"format_exc",
"(",
")",
")",
"ws",
".",
"stop",
"(",
")",
"auth_ws",
".",
"stop",
"(",
")"
] | 28.230769 | 0.001504 | [
"def run(file, access_key, secret_key, **kwargs):\n",
" \"\"\"命令行运行huobitrade\"\"\"\n",
" if file:\n",
" import sys\n",
" file_path, file_name = os.path.split(file)\n",
" sys.path.append(file_path)\n",
" strategy_module = importlib.import_module(os.path.splitext(file_name)[0])\n",
" init = getattr(strategy_module, 'init', None)\n",
" handle_func = getattr(strategy_module, 'handle_func', None)\n",
" schedule = getattr(strategy_module, 'schedule', None)\n",
" else:\n",
" init, handle_func, scedule = [None] * 3\n",
"\n",
" setKey(access_key, secret_key)\n",
" url = kwargs.get('url')\n",
" hostname = 'api.huobi.br.com'\n",
" if url:\n",
" hostname = urlparse(url).hostname\n",
" setUrl('https://' + hostname, 'https://' + hostname)\n",
"\n",
" reconn = kwargs.get('reconn', -1)\n",
" from huobitrade import HBWebsocket, HBRestAPI\n",
" from huobitrade.datatype import HBMarket, HBAccount, HBMargin\n",
" restapi = HBRestAPI(get_acc=True)\n",
" ws = HBWebsocket(host=hostname, reconn=reconn)\n",
" auth_ws = HBWebsocket(host=hostname, auth=True, reconn=reconn)\n",
" data = HBMarket()\n",
" account = HBAccount()\n",
" margin = HBMargin()\n",
" ws_open = False\n",
" ws_auth = False\n",
"\n",
" @ws.after_open\n",
" def _open():\n",
" nonlocal ws_open\n",
" click.echo('行情接口连接成功')\n",
" ws_open = True\n",
"\n",
" @auth_ws.after_auth\n",
" def _auth():\n",
" nonlocal ws_auth\n",
" click.echo('鉴权接口鉴权成功')\n",
" ws_auth = True\n",
"\n",
" ws.run()\n",
" auth_ws.run()\n",
"\n",
" for i in range(10):\n",
" time.sleep(3)\n",
" click.echo(f'连接:第{i+1}次连接')\n",
" if ws_open&ws_auth:\n",
" break\n",
" else:\n",
" ws.stop()\n",
" auth_ws.stop()\n",
" raise Exception('连接失败')\n",
" if init:\n",
" init(restapi, ws, auth_ws)\n",
"\n",
" if handle_func:\n",
" for k, v in handle_func.items():\n",
" if k.split('.')[0].lower() == 'market':\n",
" ws.register_handle_func(k)(v)\n",
" else:\n",
" auth_ws.register_handle_func(k)(v)\n",
"\n",
" if schedule:\n",
" print('testing')\n",
" from huobitrade.handler import TimeHandler\n",
" interval = scedule.__kwdefaults__['interval']\n",
" timerhandler = TimeHandler('scheduler', interval)\n",
" timerhandler.handle = lambda msg: schedule(restapi, ws, auth_ws)\n",
" timerhandler.start()\n",
"\n",
"\n",
" while True:\n",
" try:\n",
" code = click.prompt('huobitrade>>')\n",
" if code == 'exit':\n",
" if click.confirm('是否要退出huobitrade'):\n",
" break\n",
" else:\n",
" continue\n",
" else:\n",
" result = eval(code)\n",
" click.echo(result)\n",
" except Exception as e:\n",
" click.echo(traceback.format_exc())\n",
"\n",
" ws.stop()\n",
" auth_ws.stop()"
] | [
0,
0,
0,
0,
0,
0,
0.012195121951219513,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.03571428571428571,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.0625,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.05555555555555555
] | 91 | 0.001824 |
def grid_list(data):
'''
#=================================================
/process the grid data
/convert to list data for poly fitting
#=================================================
'''
a = []
b = []
M = []
for i in data:
a.append(i[0]) # np.array([i[1] for i in data], dtype=np.float64)
b.append(i[1]) # np.array([i[0] for i in data], dtype=np.float64)
M.append(i[2]) # np.array([i[2] for i in data], dtype=np.float64)
a = np.array(a, dtype=np.float64).tolist()
b = np.array(b, dtype=np.float64).tolist()
M = np.array(M, dtype=np.float64).tolist()
a = list(set(a))
b = list(set(b))
return a, b, M | [
"def",
"grid_list",
"(",
"data",
")",
":",
"a",
"=",
"[",
"]",
"b",
"=",
"[",
"]",
"M",
"=",
"[",
"]",
"for",
"i",
"in",
"data",
":",
"a",
".",
"append",
"(",
"i",
"[",
"0",
"]",
")",
"# np.array([i[1] for i in data], dtype=np.float64)",
"b",
".",
"append",
"(",
"i",
"[",
"1",
"]",
")",
"# np.array([i[0] for i in data], dtype=np.float64)",
"M",
".",
"append",
"(",
"i",
"[",
"2",
"]",
")",
"# np.array([i[2] for i in data], dtype=np.float64)",
"a",
"=",
"np",
".",
"array",
"(",
"a",
",",
"dtype",
"=",
"np",
".",
"float64",
")",
".",
"tolist",
"(",
")",
"b",
"=",
"np",
".",
"array",
"(",
"b",
",",
"dtype",
"=",
"np",
".",
"float64",
")",
".",
"tolist",
"(",
")",
"M",
"=",
"np",
".",
"array",
"(",
"M",
",",
"dtype",
"=",
"np",
".",
"float64",
")",
".",
"tolist",
"(",
")",
"a",
"=",
"list",
"(",
"set",
"(",
"a",
")",
")",
"b",
"=",
"list",
"(",
"set",
"(",
"b",
")",
")",
"return",
"a",
",",
"b",
",",
"M"
] | 33.8 | 0.001439 | [
"def grid_list(data):\n",
" '''\n",
" #=================================================\n",
" /process the grid data\n",
" /convert to list data for poly fitting\n",
" #=================================================\n",
" '''\n",
" a = []\n",
" b = []\n",
" M = []\n",
" for i in data:\n",
" a.append(i[0]) # np.array([i[1] for i in data], dtype=np.float64)\n",
" b.append(i[1]) # np.array([i[0] for i in data], dtype=np.float64)\n",
" M.append(i[2]) # np.array([i[2] for i in data], dtype=np.float64)\n",
" a = np.array(a, dtype=np.float64).tolist()\n",
" b = np.array(b, dtype=np.float64).tolist()\n",
" M = np.array(M, dtype=np.float64).tolist()\n",
" a = list(set(a))\n",
" b = list(set(b))\n",
" return a, b, M"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.05555555555555555
] | 20 | 0.002778 |
def to_rgba(colors, alpha):
"""
Covert hex colors to rgba values.
Parameters
----------
colors : iterable | str
colors to convert
alphas : iterable | float
alpha values
Returns
-------
out : ndarray | tuple
rgba color(s)
Notes
-----
Matplotlib plotting functions only accept scalar
alpha values. Hence no two objects with different
alpha values may be plotted in one call. This would
make plots with continuous alpha values innefficient.
However :), the colors can be rgba hex values or
list-likes and the alpha dimension will be respected.
"""
def is_iterable(var):
return cbook.iterable(var) and not is_string(var)
def has_alpha(c):
if isinstance(c, tuple):
if len(c) == 4:
return True
elif isinstance(c, str):
if c[0] == '#' and len(c) == 9:
return True
return False
def no_color(c):
return c is None or c == '' or c.lower() == 'none'
def to_rgba_hex(c, a):
"""
Conver rgb color to rgba hex value
If color c has an alpha channel, then alpha value
a is ignored
"""
_has_alpha = has_alpha(c)
c = mcolors.to_hex(c, keep_alpha=_has_alpha)
if not _has_alpha:
arr = colorConverter.to_rgba(c, a)
return mcolors.to_hex(arr, keep_alpha=True)
return c
if is_iterable(colors):
if all(no_color(c) for c in colors):
return 'none'
if is_iterable(alpha):
return [to_rgba_hex(c, a) for c, a in zip(colors, alpha)]
else:
return [to_rgba_hex(c, alpha) for c in colors]
else:
if no_color(colors):
return colors
if is_iterable(alpha):
return [to_rgba_hex(colors, a) for a in alpha]
else:
return to_rgba_hex(colors, alpha) | [
"def",
"to_rgba",
"(",
"colors",
",",
"alpha",
")",
":",
"def",
"is_iterable",
"(",
"var",
")",
":",
"return",
"cbook",
".",
"iterable",
"(",
"var",
")",
"and",
"not",
"is_string",
"(",
"var",
")",
"def",
"has_alpha",
"(",
"c",
")",
":",
"if",
"isinstance",
"(",
"c",
",",
"tuple",
")",
":",
"if",
"len",
"(",
"c",
")",
"==",
"4",
":",
"return",
"True",
"elif",
"isinstance",
"(",
"c",
",",
"str",
")",
":",
"if",
"c",
"[",
"0",
"]",
"==",
"'#'",
"and",
"len",
"(",
"c",
")",
"==",
"9",
":",
"return",
"True",
"return",
"False",
"def",
"no_color",
"(",
"c",
")",
":",
"return",
"c",
"is",
"None",
"or",
"c",
"==",
"''",
"or",
"c",
".",
"lower",
"(",
")",
"==",
"'none'",
"def",
"to_rgba_hex",
"(",
"c",
",",
"a",
")",
":",
"\"\"\"\n Conver rgb color to rgba hex value\n\n If color c has an alpha channel, then alpha value\n a is ignored\n \"\"\"",
"_has_alpha",
"=",
"has_alpha",
"(",
"c",
")",
"c",
"=",
"mcolors",
".",
"to_hex",
"(",
"c",
",",
"keep_alpha",
"=",
"_has_alpha",
")",
"if",
"not",
"_has_alpha",
":",
"arr",
"=",
"colorConverter",
".",
"to_rgba",
"(",
"c",
",",
"a",
")",
"return",
"mcolors",
".",
"to_hex",
"(",
"arr",
",",
"keep_alpha",
"=",
"True",
")",
"return",
"c",
"if",
"is_iterable",
"(",
"colors",
")",
":",
"if",
"all",
"(",
"no_color",
"(",
"c",
")",
"for",
"c",
"in",
"colors",
")",
":",
"return",
"'none'",
"if",
"is_iterable",
"(",
"alpha",
")",
":",
"return",
"[",
"to_rgba_hex",
"(",
"c",
",",
"a",
")",
"for",
"c",
",",
"a",
"in",
"zip",
"(",
"colors",
",",
"alpha",
")",
"]",
"else",
":",
"return",
"[",
"to_rgba_hex",
"(",
"c",
",",
"alpha",
")",
"for",
"c",
"in",
"colors",
"]",
"else",
":",
"if",
"no_color",
"(",
"colors",
")",
":",
"return",
"colors",
"if",
"is_iterable",
"(",
"alpha",
")",
":",
"return",
"[",
"to_rgba_hex",
"(",
"colors",
",",
"a",
")",
"for",
"a",
"in",
"alpha",
"]",
"else",
":",
"return",
"to_rgba_hex",
"(",
"colors",
",",
"alpha",
")"
] | 26.43662 | 0.000514 | [
"def to_rgba(colors, alpha):\n",
" \"\"\"\n",
" Covert hex colors to rgba values.\n",
"\n",
" Parameters\n",
" ----------\n",
" colors : iterable | str\n",
" colors to convert\n",
" alphas : iterable | float\n",
" alpha values\n",
"\n",
" Returns\n",
" -------\n",
" out : ndarray | tuple\n",
" rgba color(s)\n",
"\n",
" Notes\n",
" -----\n",
" Matplotlib plotting functions only accept scalar\n",
" alpha values. Hence no two objects with different\n",
" alpha values may be plotted in one call. This would\n",
" make plots with continuous alpha values innefficient.\n",
" However :), the colors can be rgba hex values or\n",
" list-likes and the alpha dimension will be respected.\n",
" \"\"\"\n",
" def is_iterable(var):\n",
" return cbook.iterable(var) and not is_string(var)\n",
"\n",
" def has_alpha(c):\n",
" if isinstance(c, tuple):\n",
" if len(c) == 4:\n",
" return True\n",
" elif isinstance(c, str):\n",
" if c[0] == '#' and len(c) == 9:\n",
" return True\n",
" return False\n",
"\n",
" def no_color(c):\n",
" return c is None or c == '' or c.lower() == 'none'\n",
"\n",
" def to_rgba_hex(c, a):\n",
" \"\"\"\n",
" Conver rgb color to rgba hex value\n",
"\n",
" If color c has an alpha channel, then alpha value\n",
" a is ignored\n",
" \"\"\"\n",
" _has_alpha = has_alpha(c)\n",
" c = mcolors.to_hex(c, keep_alpha=_has_alpha)\n",
"\n",
" if not _has_alpha:\n",
" arr = colorConverter.to_rgba(c, a)\n",
" return mcolors.to_hex(arr, keep_alpha=True)\n",
"\n",
" return c\n",
"\n",
" if is_iterable(colors):\n",
" if all(no_color(c) for c in colors):\n",
" return 'none'\n",
"\n",
" if is_iterable(alpha):\n",
" return [to_rgba_hex(c, a) for c, a in zip(colors, alpha)]\n",
" else:\n",
" return [to_rgba_hex(c, alpha) for c in colors]\n",
" else:\n",
" if no_color(colors):\n",
" return colors\n",
" if is_iterable(alpha):\n",
" return [to_rgba_hex(colors, a) for a in alpha]\n",
" else:\n",
" return to_rgba_hex(colors, alpha)"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.022222222222222223
] | 71 | 0.000313 |
def tmp_file(suffix=u"", root=None):
"""
Return a (handler, path) tuple
for a temporary file with given suffix created by ``tempfile``.
:param string suffix: the suffix (e.g., the extension) of the file
:param string root: path to the root temporary directory;
if ``None``, the default temporary directory
will be used instead
:rtype: tuple
"""
if root is None:
root = custom_tmp_dir()
return tempfile.mkstemp(suffix=suffix, dir=root) | [
"def",
"tmp_file",
"(",
"suffix",
"=",
"u\"\"",
",",
"root",
"=",
"None",
")",
":",
"if",
"root",
"is",
"None",
":",
"root",
"=",
"custom_tmp_dir",
"(",
")",
"return",
"tempfile",
".",
"mkstemp",
"(",
"suffix",
"=",
"suffix",
",",
"dir",
"=",
"root",
")"
] | 36.714286 | 0.001898 | [
"def tmp_file(suffix=u\"\", root=None):\n",
" \"\"\"\n",
" Return a (handler, path) tuple\n",
" for a temporary file with given suffix created by ``tempfile``.\n",
"\n",
" :param string suffix: the suffix (e.g., the extension) of the file\n",
" :param string root: path to the root temporary directory;\n",
" if ``None``, the default temporary directory\n",
" will be used instead\n",
" :rtype: tuple\n",
" \"\"\"\n",
" if root is None:\n",
" root = custom_tmp_dir()\n",
" return tempfile.mkstemp(suffix=suffix, dir=root)"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.019230769230769232
] | 14 | 0.001374 |
def get_client(self, client_type):
"""get_client.
"""
if client_type not in self._client_cache:
client_class = self._get_class(client_type)
self._client_cache[client_type] = self._get_client_instance(client_class)
return self._client_cache[client_type] | [
"def",
"get_client",
"(",
"self",
",",
"client_type",
")",
":",
"if",
"client_type",
"not",
"in",
"self",
".",
"_client_cache",
":",
"client_class",
"=",
"self",
".",
"_get_class",
"(",
"client_type",
")",
"self",
".",
"_client_cache",
"[",
"client_type",
"]",
"=",
"self",
".",
"_get_client_instance",
"(",
"client_class",
")",
"return",
"self",
".",
"_client_cache",
"[",
"client_type",
"]"
] | 43.142857 | 0.00974 | [
"def get_client(self, client_type):\n",
" \"\"\"get_client.\n",
" \"\"\"\n",
" if client_type not in self._client_cache:\n",
" client_class = self._get_class(client_type)\n",
" self._client_cache[client_type] = self._get_client_instance(client_class)\n",
" return self._client_cache[client_type]"
] | [
0,
0.043478260869565216,
0,
0,
0,
0.011627906976744186,
0.021739130434782608
] | 7 | 0.010978 |
def build_api_url(
cls, path, query_params=None, api_base_url=None, api_version=None
):
"""Construct an API url given a few components, some optional.
Typically, you shouldn't need to use this method.
:type path: str
:param path: The path to the resource (ie, ``'/b/bucket-name'``).
:type query_params: dict or list
:param query_params: A dictionary of keys and values (or list of
key-value pairs) to insert into the query
string of the URL.
:type api_base_url: str
:param api_base_url: The base URL for the API endpoint.
Typically you won't have to provide this.
:type api_version: str
:param api_version: The version of the API to call.
Typically you shouldn't provide this and instead
use the default for the library.
:rtype: str
:returns: The URL assembled from the pieces provided.
"""
url = cls.API_URL_TEMPLATE.format(
api_base_url=(api_base_url or cls.API_BASE_URL),
api_version=(api_version or cls.API_VERSION),
path=path,
)
query_params = query_params or {}
if query_params:
url += "?" + urlencode(query_params, doseq=True)
return url | [
"def",
"build_api_url",
"(",
"cls",
",",
"path",
",",
"query_params",
"=",
"None",
",",
"api_base_url",
"=",
"None",
",",
"api_version",
"=",
"None",
")",
":",
"url",
"=",
"cls",
".",
"API_URL_TEMPLATE",
".",
"format",
"(",
"api_base_url",
"=",
"(",
"api_base_url",
"or",
"cls",
".",
"API_BASE_URL",
")",
",",
"api_version",
"=",
"(",
"api_version",
"or",
"cls",
".",
"API_VERSION",
")",
",",
"path",
"=",
"path",
",",
")",
"query_params",
"=",
"query_params",
"or",
"{",
"}",
"if",
"query_params",
":",
"url",
"+=",
"\"?\"",
"+",
"urlencode",
"(",
"query_params",
",",
"doseq",
"=",
"True",
")",
"return",
"url"
] | 35.842105 | 0.002144 | [
"def build_api_url(\n",
" cls, path, query_params=None, api_base_url=None, api_version=None\n",
" ):\n",
" \"\"\"Construct an API url given a few components, some optional.\n",
"\n",
" Typically, you shouldn't need to use this method.\n",
"\n",
" :type path: str\n",
" :param path: The path to the resource (ie, ``'/b/bucket-name'``).\n",
"\n",
" :type query_params: dict or list\n",
" :param query_params: A dictionary of keys and values (or list of\n",
" key-value pairs) to insert into the query\n",
" string of the URL.\n",
"\n",
" :type api_base_url: str\n",
" :param api_base_url: The base URL for the API endpoint.\n",
" Typically you won't have to provide this.\n",
"\n",
" :type api_version: str\n",
" :param api_version: The version of the API to call.\n",
" Typically you shouldn't provide this and instead\n",
" use the default for the library.\n",
"\n",
" :rtype: str\n",
" :returns: The URL assembled from the pieces provided.\n",
" \"\"\"\n",
" url = cls.API_URL_TEMPLATE.format(\n",
" api_base_url=(api_base_url or cls.API_BASE_URL),\n",
" api_version=(api_version or cls.API_VERSION),\n",
" path=path,\n",
" )\n",
"\n",
" query_params = query_params or {}\n",
" if query_params:\n",
" url += \"?\" + urlencode(query_params, doseq=True)\n",
"\n",
" return url"
] | [
0,
0,
0.14285714285714285,
0.014084507042253521,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.05555555555555555
] | 38 | 0.005592 |
def shuffle_genome(genome, cat, fraction = float(100), plot = True, \
alpha = 0.1, beta = 100000, \
min_length = 1000, max_length = 200000):
"""
randomly shuffle genome
"""
header = '>randomized_%s' % (genome.name)
sequence = list(''.join([i[1] for i in parse_fasta(genome)]))
length = len(sequence)
shuffled = []
# break genome into pieces
while sequence is not False:
s = int(random.gammavariate(alpha, beta))
if s <= min_length or s >= max_length:
continue
if len(sequence) < s:
seq = sequence[0:]
else:
seq = sequence[0:s]
sequence = sequence[s:]
# if bool(random.getrandbits(1)) is True:
# seq = rev_c(seq)
# print('fragment length: %s reverse complement: True' % ('{:,}'.format(s)), file=sys.stderr)
# else:
# print('fragment length: %s reverse complement: False' % ('{:,}'.format(s)), file=sys.stderr)
shuffled.append(''.join(seq))
if sequence == []:
break
# shuffle pieces
random.shuffle(shuffled)
# subset fragments
if fraction == float(100):
subset = shuffled
else:
max_pieces = int(length * fraction/100)
subset, total = [], 0
for fragment in shuffled:
length = len(fragment)
if total + length <= max_pieces:
subset.append(fragment)
total += length
else:
diff = max_pieces - total
subset.append(fragment[0:diff])
break
# combine sequences, if requested
if cat is True:
yield [header, ''.join(subset)]
else:
for i, seq in enumerate(subset):
yield ['%s fragment:%s' % (header, i), seq] | [
"def",
"shuffle_genome",
"(",
"genome",
",",
"cat",
",",
"fraction",
"=",
"float",
"(",
"100",
")",
",",
"plot",
"=",
"True",
",",
"alpha",
"=",
"0.1",
",",
"beta",
"=",
"100000",
",",
"min_length",
"=",
"1000",
",",
"max_length",
"=",
"200000",
")",
":",
"header",
"=",
"'>randomized_%s'",
"%",
"(",
"genome",
".",
"name",
")",
"sequence",
"=",
"list",
"(",
"''",
".",
"join",
"(",
"[",
"i",
"[",
"1",
"]",
"for",
"i",
"in",
"parse_fasta",
"(",
"genome",
")",
"]",
")",
")",
"length",
"=",
"len",
"(",
"sequence",
")",
"shuffled",
"=",
"[",
"]",
"# break genome into pieces",
"while",
"sequence",
"is",
"not",
"False",
":",
"s",
"=",
"int",
"(",
"random",
".",
"gammavariate",
"(",
"alpha",
",",
"beta",
")",
")",
"if",
"s",
"<=",
"min_length",
"or",
"s",
">=",
"max_length",
":",
"continue",
"if",
"len",
"(",
"sequence",
")",
"<",
"s",
":",
"seq",
"=",
"sequence",
"[",
"0",
":",
"]",
"else",
":",
"seq",
"=",
"sequence",
"[",
"0",
":",
"s",
"]",
"sequence",
"=",
"sequence",
"[",
"s",
":",
"]",
"# if bool(random.getrandbits(1)) is True:",
"# seq = rev_c(seq)",
"# print('fragment length: %s reverse complement: True' % ('{:,}'.format(s)), file=sys.stderr)",
"# else:",
"# print('fragment length: %s reverse complement: False' % ('{:,}'.format(s)), file=sys.stderr)",
"shuffled",
".",
"append",
"(",
"''",
".",
"join",
"(",
"seq",
")",
")",
"if",
"sequence",
"==",
"[",
"]",
":",
"break",
"# shuffle pieces",
"random",
".",
"shuffle",
"(",
"shuffled",
")",
"# subset fragments",
"if",
"fraction",
"==",
"float",
"(",
"100",
")",
":",
"subset",
"=",
"shuffled",
"else",
":",
"max_pieces",
"=",
"int",
"(",
"length",
"*",
"fraction",
"/",
"100",
")",
"subset",
",",
"total",
"=",
"[",
"]",
",",
"0",
"for",
"fragment",
"in",
"shuffled",
":",
"length",
"=",
"len",
"(",
"fragment",
")",
"if",
"total",
"+",
"length",
"<=",
"max_pieces",
":",
"subset",
".",
"append",
"(",
"fragment",
")",
"total",
"+=",
"length",
"else",
":",
"diff",
"=",
"max_pieces",
"-",
"total",
"subset",
".",
"append",
"(",
"fragment",
"[",
"0",
":",
"diff",
"]",
")",
"break",
"# combine sequences, if requested",
"if",
"cat",
"is",
"True",
":",
"yield",
"[",
"header",
",",
"''",
".",
"join",
"(",
"subset",
")",
"]",
"else",
":",
"for",
"i",
",",
"seq",
"in",
"enumerate",
"(",
"subset",
")",
":",
"yield",
"[",
"'%s fragment:%s'",
"%",
"(",
"header",
",",
"i",
")",
",",
"seq",
"]"
] | 34.411765 | 0.010526 | [
"def shuffle_genome(genome, cat, fraction = float(100), plot = True, \\\n",
" alpha = 0.1, beta = 100000, \\\n",
" min_length = 1000, max_length = 200000):\n",
" \"\"\"\n",
" randomly shuffle genome\n",
" \"\"\"\n",
" header = '>randomized_%s' % (genome.name)\n",
" sequence = list(''.join([i[1] for i in parse_fasta(genome)]))\n",
" length = len(sequence)\n",
" shuffled = []\n",
" # break genome into pieces\n",
" while sequence is not False:\n",
" s = int(random.gammavariate(alpha, beta))\n",
" if s <= min_length or s >= max_length:\n",
" continue\n",
" if len(sequence) < s:\n",
" seq = sequence[0:]\n",
" else:\n",
" seq = sequence[0:s]\n",
" sequence = sequence[s:]\n",
"# if bool(random.getrandbits(1)) is True:\n",
"# seq = rev_c(seq)\n",
"# print('fragment length: %s reverse complement: True' % ('{:,}'.format(s)), file=sys.stderr)\n",
"# else:\n",
"# print('fragment length: %s reverse complement: False' % ('{:,}'.format(s)), file=sys.stderr)\n",
" shuffled.append(''.join(seq))\n",
" if sequence == []:\n",
" break\n",
" # shuffle pieces\n",
" random.shuffle(shuffled)\n",
" # subset fragments\n",
" if fraction == float(100):\n",
" subset = shuffled\n",
" else:\n",
" max_pieces = int(length * fraction/100)\n",
" subset, total = [], 0\n",
" for fragment in shuffled:\n",
" length = len(fragment)\n",
" if total + length <= max_pieces:\n",
" subset.append(fragment)\n",
" total += length\n",
" else:\n",
" diff = max_pieces - total\n",
" subset.append(fragment[0:diff])\n",
" break\n",
" # combine sequences, if requested\n",
" if cat is True:\n",
" yield [header, ''.join(subset)]\n",
" else:\n",
" for i, seq in enumerate(subset):\n",
" yield ['%s fragment:%s' % (header, i), seq]"
] | [
0.07142857142857142,
0.15789473684210525,
0.10204081632653061,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.009523809523809525,
0,
0.009433962264150943,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.01818181818181818
] | 51 | 0.007226 |
def getlayer(self, cls, nb=1, _track=None, _subclass=None, **flt):
"""Return the nb^th layer that is an instance of cls, matching flt
values.
"""
if _subclass is None:
_subclass = self.match_subclass or None
if _subclass:
match = lambda cls1, cls2: issubclass(cls1, cls2)
else:
match = lambda cls1, cls2: cls1 == cls2
if isinstance(cls, int):
nb = cls + 1
cls = None
if isinstance(cls, str) and "." in cls:
ccls, fld = cls.split(".", 1)
else:
ccls, fld = cls, None
if cls is None or match(self.__class__, cls) \
or ccls in [self.__class__.__name__, self._name]:
if all(self.getfieldval(fldname) == fldvalue
for fldname, fldvalue in six.iteritems(flt)):
if nb == 1:
if fld is None:
return self
else:
return self.getfieldval(fld)
else:
nb -= 1
for f in self.packetfields:
fvalue_gen = self.getfieldval(f.name)
if fvalue_gen is None:
continue
if not f.islist:
fvalue_gen = SetGen(fvalue_gen, _iterpacket=0)
for fvalue in fvalue_gen:
if isinstance(fvalue, Packet):
track = []
ret = fvalue.getlayer(cls, nb=nb, _track=track,
_subclass=_subclass, **flt)
if ret is not None:
return ret
nb = track[0]
return self.payload.getlayer(cls, nb=nb, _track=_track,
_subclass=_subclass, **flt) | [
"def",
"getlayer",
"(",
"self",
",",
"cls",
",",
"nb",
"=",
"1",
",",
"_track",
"=",
"None",
",",
"_subclass",
"=",
"None",
",",
"*",
"*",
"flt",
")",
":",
"if",
"_subclass",
"is",
"None",
":",
"_subclass",
"=",
"self",
".",
"match_subclass",
"or",
"None",
"if",
"_subclass",
":",
"match",
"=",
"lambda",
"cls1",
",",
"cls2",
":",
"issubclass",
"(",
"cls1",
",",
"cls2",
")",
"else",
":",
"match",
"=",
"lambda",
"cls1",
",",
"cls2",
":",
"cls1",
"==",
"cls2",
"if",
"isinstance",
"(",
"cls",
",",
"int",
")",
":",
"nb",
"=",
"cls",
"+",
"1",
"cls",
"=",
"None",
"if",
"isinstance",
"(",
"cls",
",",
"str",
")",
"and",
"\".\"",
"in",
"cls",
":",
"ccls",
",",
"fld",
"=",
"cls",
".",
"split",
"(",
"\".\"",
",",
"1",
")",
"else",
":",
"ccls",
",",
"fld",
"=",
"cls",
",",
"None",
"if",
"cls",
"is",
"None",
"or",
"match",
"(",
"self",
".",
"__class__",
",",
"cls",
")",
"or",
"ccls",
"in",
"[",
"self",
".",
"__class__",
".",
"__name__",
",",
"self",
".",
"_name",
"]",
":",
"if",
"all",
"(",
"self",
".",
"getfieldval",
"(",
"fldname",
")",
"==",
"fldvalue",
"for",
"fldname",
",",
"fldvalue",
"in",
"six",
".",
"iteritems",
"(",
"flt",
")",
")",
":",
"if",
"nb",
"==",
"1",
":",
"if",
"fld",
"is",
"None",
":",
"return",
"self",
"else",
":",
"return",
"self",
".",
"getfieldval",
"(",
"fld",
")",
"else",
":",
"nb",
"-=",
"1",
"for",
"f",
"in",
"self",
".",
"packetfields",
":",
"fvalue_gen",
"=",
"self",
".",
"getfieldval",
"(",
"f",
".",
"name",
")",
"if",
"fvalue_gen",
"is",
"None",
":",
"continue",
"if",
"not",
"f",
".",
"islist",
":",
"fvalue_gen",
"=",
"SetGen",
"(",
"fvalue_gen",
",",
"_iterpacket",
"=",
"0",
")",
"for",
"fvalue",
"in",
"fvalue_gen",
":",
"if",
"isinstance",
"(",
"fvalue",
",",
"Packet",
")",
":",
"track",
"=",
"[",
"]",
"ret",
"=",
"fvalue",
".",
"getlayer",
"(",
"cls",
",",
"nb",
"=",
"nb",
",",
"_track",
"=",
"track",
",",
"_subclass",
"=",
"_subclass",
",",
"*",
"*",
"flt",
")",
"if",
"ret",
"is",
"not",
"None",
":",
"return",
"ret",
"nb",
"=",
"track",
"[",
"0",
"]",
"return",
"self",
".",
"payload",
".",
"getlayer",
"(",
"cls",
",",
"nb",
"=",
"nb",
",",
"_track",
"=",
"_track",
",",
"_subclass",
"=",
"_subclass",
",",
"*",
"*",
"flt",
")"
] | 40.136364 | 0.002211 | [
"def getlayer(self, cls, nb=1, _track=None, _subclass=None, **flt):\n",
" \"\"\"Return the nb^th layer that is an instance of cls, matching flt\n",
"values.\n",
" \"\"\"\n",
" if _subclass is None:\n",
" _subclass = self.match_subclass or None\n",
" if _subclass:\n",
" match = lambda cls1, cls2: issubclass(cls1, cls2)\n",
" else:\n",
" match = lambda cls1, cls2: cls1 == cls2\n",
" if isinstance(cls, int):\n",
" nb = cls + 1\n",
" cls = None\n",
" if isinstance(cls, str) and \".\" in cls:\n",
" ccls, fld = cls.split(\".\", 1)\n",
" else:\n",
" ccls, fld = cls, None\n",
" if cls is None or match(self.__class__, cls) \\\n",
" or ccls in [self.__class__.__name__, self._name]:\n",
" if all(self.getfieldval(fldname) == fldvalue\n",
" for fldname, fldvalue in six.iteritems(flt)):\n",
" if nb == 1:\n",
" if fld is None:\n",
" return self\n",
" else:\n",
" return self.getfieldval(fld)\n",
" else:\n",
" nb -= 1\n",
" for f in self.packetfields:\n",
" fvalue_gen = self.getfieldval(f.name)\n",
" if fvalue_gen is None:\n",
" continue\n",
" if not f.islist:\n",
" fvalue_gen = SetGen(fvalue_gen, _iterpacket=0)\n",
" for fvalue in fvalue_gen:\n",
" if isinstance(fvalue, Packet):\n",
" track = []\n",
" ret = fvalue.getlayer(cls, nb=nb, _track=track,\n",
" _subclass=_subclass, **flt)\n",
" if ret is not None:\n",
" return ret\n",
" nb = track[0]\n",
" return self.payload.getlayer(cls, nb=nb, _track=_track,\n",
" _subclass=_subclass, **flt)"
] | [
0,
0.013333333333333334,
0,
0,
0,
0,
0,
0.016129032258064516,
0,
0.019230769230769232,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.015625
] | 44 | 0.001462 |
def execNetstatCmd(self, *args):
"""Execute ps command with positional params args and return result as
list of lines.
@param *args: Positional params for netstat command.
@return: List of output lines
"""
out = util.exec_command([netstatCmd,] + list(args))
return out.splitlines() | [
"def",
"execNetstatCmd",
"(",
"self",
",",
"*",
"args",
")",
":",
"out",
"=",
"util",
".",
"exec_command",
"(",
"[",
"netstatCmd",
",",
"]",
"+",
"list",
"(",
"args",
")",
")",
"return",
"out",
".",
"splitlines",
"(",
")"
] | 35.2 | 0.01662 | [
"def execNetstatCmd(self, *args):\n",
" \"\"\"Execute ps command with positional params args and return result as \n",
" list of lines.\n",
" \n",
" @param *args: Positional params for netstat command.\n",
" @return: List of output lines\n",
" \n",
" \"\"\"\n",
" out = util.exec_command([netstatCmd,] + list(args))\n",
" return out.splitlines()"
] | [
0,
0.025,
0,
0.1111111111111111,
0,
0,
0.1111111111111111,
0,
0.016666666666666666,
0.03225806451612903
] | 10 | 0.029615 |
def remove_row(self, row_number: int=-1):
"""
Removes a specified row of data
:param row_number: the row to remove (defaults to the last row)
:return: None
"""
if len(self._rows) == 0:
return
row = self._rows.pop(row_number)
for widget in row:
widget.destroy() | [
"def",
"remove_row",
"(",
"self",
",",
"row_number",
":",
"int",
"=",
"-",
"1",
")",
":",
"if",
"len",
"(",
"self",
".",
"_rows",
")",
"==",
"0",
":",
"return",
"row",
"=",
"self",
".",
"_rows",
".",
"pop",
"(",
"row_number",
")",
"for",
"widget",
"in",
"row",
":",
"widget",
".",
"destroy",
"(",
")"
] | 26 | 0.011429 | [
"def remove_row(self, row_number: int=-1):\n",
" \"\"\"\n",
" Removes a specified row of data\n",
"\n",
" :param row_number: the row to remove (defaults to the last row)\n",
" :return: None\n",
" \"\"\"\n",
" if len(self._rows) == 0:\n",
" return\n",
"\n",
" row = self._rows.pop(row_number)\n",
" for widget in row:\n",
" widget.destroy()"
] | [
0.047619047619047616,
0.08333333333333333,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.03571428571428571
] | 13 | 0.012821 |
def register_class(cls):
"""Regsiter this class in the `LinkFactory` """
if cls.appname in LinkFactory._class_dict:
return
LinkFactory.register(cls.appname, cls) | [
"def",
"register_class",
"(",
"cls",
")",
":",
"if",
"cls",
".",
"appname",
"in",
"LinkFactory",
".",
"_class_dict",
":",
"return",
"LinkFactory",
".",
"register",
"(",
"cls",
".",
"appname",
",",
"cls",
")"
] | 38.6 | 0.010152 | [
"def register_class(cls):\n",
" \"\"\"Regsiter this class in the `LinkFactory` \"\"\"\n",
" if cls.appname in LinkFactory._class_dict:\n",
" return\n",
" LinkFactory.register(cls.appname, cls)"
] | [
0,
0.017857142857142856,
0,
0,
0.021739130434782608
] | 5 | 0.007919 |
def completion_hints(config, prompt, session, context, current, arguments):
# type: (CompletionInfo, str, ShellSession, BundleContext, str, List[str]) -> List[str]
"""
Returns the possible completions of the current argument
:param config: Configuration of the current completion
:param prompt: The shell prompt string
:param session: Current shell session
:param context: Context of the shell UI bundle
:param current: Current argument (to be completed)
:param arguments: List of all arguments in their current state
:return: A list of possible completions
"""
if not current:
# No word yet, so the current position is after the existing ones
arg_idx = len(arguments)
else:
# Find the current word position
arg_idx = arguments.index(current)
# Find the ID of the next completer
completers = config.completers
if arg_idx > len(completers) - 1:
# Argument is too far to be positional, try
if config.multiple:
# Multiple calls allowed for the last completer
completer_id = completers[-1]
else:
# Nothing to return
return []
else:
completer_id = completers[arg_idx]
if completer_id == DUMMY:
# Dummy completer: do nothing
return []
# Find the matching service
svc_ref = context.get_service_reference(
SVC_COMPLETER, "({}={})".format(PROP_COMPLETER_ID, completer_id)
)
if svc_ref is None:
# Handler not found
_logger.debug("Unknown shell completer ID: %s", completer_id)
return []
# Call the completer
try:
with use_service(context, svc_ref) as completer:
matches = completer.complete(
config, prompt, session, context, arguments, current
)
if not matches:
return []
return matches
except Exception as ex:
_logger.exception("Error calling completer %s: %s", completer_id, ex)
return [] | [
"def",
"completion_hints",
"(",
"config",
",",
"prompt",
",",
"session",
",",
"context",
",",
"current",
",",
"arguments",
")",
":",
"# type: (CompletionInfo, str, ShellSession, BundleContext, str, List[str]) -> List[str]",
"if",
"not",
"current",
":",
"# No word yet, so the current position is after the existing ones",
"arg_idx",
"=",
"len",
"(",
"arguments",
")",
"else",
":",
"# Find the current word position",
"arg_idx",
"=",
"arguments",
".",
"index",
"(",
"current",
")",
"# Find the ID of the next completer",
"completers",
"=",
"config",
".",
"completers",
"if",
"arg_idx",
">",
"len",
"(",
"completers",
")",
"-",
"1",
":",
"# Argument is too far to be positional, try",
"if",
"config",
".",
"multiple",
":",
"# Multiple calls allowed for the last completer",
"completer_id",
"=",
"completers",
"[",
"-",
"1",
"]",
"else",
":",
"# Nothing to return",
"return",
"[",
"]",
"else",
":",
"completer_id",
"=",
"completers",
"[",
"arg_idx",
"]",
"if",
"completer_id",
"==",
"DUMMY",
":",
"# Dummy completer: do nothing",
"return",
"[",
"]",
"# Find the matching service",
"svc_ref",
"=",
"context",
".",
"get_service_reference",
"(",
"SVC_COMPLETER",
",",
"\"({}={})\"",
".",
"format",
"(",
"PROP_COMPLETER_ID",
",",
"completer_id",
")",
")",
"if",
"svc_ref",
"is",
"None",
":",
"# Handler not found",
"_logger",
".",
"debug",
"(",
"\"Unknown shell completer ID: %s\"",
",",
"completer_id",
")",
"return",
"[",
"]",
"# Call the completer",
"try",
":",
"with",
"use_service",
"(",
"context",
",",
"svc_ref",
")",
"as",
"completer",
":",
"matches",
"=",
"completer",
".",
"complete",
"(",
"config",
",",
"prompt",
",",
"session",
",",
"context",
",",
"arguments",
",",
"current",
")",
"if",
"not",
"matches",
":",
"return",
"[",
"]",
"return",
"matches",
"except",
"Exception",
"as",
"ex",
":",
"_logger",
".",
"exception",
"(",
"\"Error calling completer %s: %s\"",
",",
"completer_id",
",",
"ex",
")",
"return",
"[",
"]"
] | 33.779661 | 0.000975 | [
"def completion_hints(config, prompt, session, context, current, arguments):\n",
" # type: (CompletionInfo, str, ShellSession, BundleContext, str, List[str]) -> List[str]\n",
" \"\"\"\n",
" Returns the possible completions of the current argument\n",
"\n",
" :param config: Configuration of the current completion\n",
" :param prompt: The shell prompt string\n",
" :param session: Current shell session\n",
" :param context: Context of the shell UI bundle\n",
" :param current: Current argument (to be completed)\n",
" :param arguments: List of all arguments in their current state\n",
" :return: A list of possible completions\n",
" \"\"\"\n",
" if not current:\n",
" # No word yet, so the current position is after the existing ones\n",
" arg_idx = len(arguments)\n",
" else:\n",
" # Find the current word position\n",
" arg_idx = arguments.index(current)\n",
"\n",
" # Find the ID of the next completer\n",
" completers = config.completers\n",
" if arg_idx > len(completers) - 1:\n",
" # Argument is too far to be positional, try\n",
" if config.multiple:\n",
" # Multiple calls allowed for the last completer\n",
" completer_id = completers[-1]\n",
" else:\n",
" # Nothing to return\n",
" return []\n",
" else:\n",
" completer_id = completers[arg_idx]\n",
"\n",
" if completer_id == DUMMY:\n",
" # Dummy completer: do nothing\n",
" return []\n",
"\n",
" # Find the matching service\n",
" svc_ref = context.get_service_reference(\n",
" SVC_COMPLETER, \"({}={})\".format(PROP_COMPLETER_ID, completer_id)\n",
" )\n",
" if svc_ref is None:\n",
" # Handler not found\n",
" _logger.debug(\"Unknown shell completer ID: %s\", completer_id)\n",
" return []\n",
"\n",
" # Call the completer\n",
" try:\n",
" with use_service(context, svc_ref) as completer:\n",
" matches = completer.complete(\n",
" config, prompt, session, context, arguments, current\n",
" )\n",
" if not matches:\n",
" return []\n",
"\n",
" return matches\n",
" except Exception as ex:\n",
" _logger.exception(\"Error calling completer %s: %s\", completer_id, ex)\n",
" return []"
] | [
0,
0.010869565217391304,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.058823529411764705
] | 59 | 0.001181 |
def from_string(cls, string, relpath=None, encoding=None, is_sass=None):
"""Read Sass source from the contents of a string.
The origin is always None. `relpath` defaults to "string:...".
"""
if isinstance(string, six.text_type):
# Already decoded; we don't know what encoding to use for output,
# though, so still check for a @charset.
# TODO what if the given encoding conflicts with the one in the
# file? do we care?
if encoding is None:
encoding = determine_encoding(string)
byte_contents = string.encode(encoding)
text_contents = string
elif isinstance(string, six.binary_type):
encoding = determine_encoding(string)
byte_contents = string
text_contents = string.decode(encoding)
else:
raise TypeError("Expected text or bytes, got {0!r}".format(string))
origin = None
if relpath is None:
m = hashlib.sha256()
m.update(byte_contents)
relpath = repr("string:{0}:{1}".format(
m.hexdigest()[:16], text_contents[:100]))
return cls(
origin, relpath, text_contents, encoding=encoding,
is_sass=is_sass,
) | [
"def",
"from_string",
"(",
"cls",
",",
"string",
",",
"relpath",
"=",
"None",
",",
"encoding",
"=",
"None",
",",
"is_sass",
"=",
"None",
")",
":",
"if",
"isinstance",
"(",
"string",
",",
"six",
".",
"text_type",
")",
":",
"# Already decoded; we don't know what encoding to use for output,",
"# though, so still check for a @charset.",
"# TODO what if the given encoding conflicts with the one in the",
"# file? do we care?",
"if",
"encoding",
"is",
"None",
":",
"encoding",
"=",
"determine_encoding",
"(",
"string",
")",
"byte_contents",
"=",
"string",
".",
"encode",
"(",
"encoding",
")",
"text_contents",
"=",
"string",
"elif",
"isinstance",
"(",
"string",
",",
"six",
".",
"binary_type",
")",
":",
"encoding",
"=",
"determine_encoding",
"(",
"string",
")",
"byte_contents",
"=",
"string",
"text_contents",
"=",
"string",
".",
"decode",
"(",
"encoding",
")",
"else",
":",
"raise",
"TypeError",
"(",
"\"Expected text or bytes, got {0!r}\"",
".",
"format",
"(",
"string",
")",
")",
"origin",
"=",
"None",
"if",
"relpath",
"is",
"None",
":",
"m",
"=",
"hashlib",
".",
"sha256",
"(",
")",
"m",
".",
"update",
"(",
"byte_contents",
")",
"relpath",
"=",
"repr",
"(",
"\"string:{0}:{1}\"",
".",
"format",
"(",
"m",
".",
"hexdigest",
"(",
")",
"[",
":",
"16",
"]",
",",
"text_contents",
"[",
":",
"100",
"]",
")",
")",
"return",
"cls",
"(",
"origin",
",",
"relpath",
",",
"text_contents",
",",
"encoding",
"=",
"encoding",
",",
"is_sass",
"=",
"is_sass",
",",
")"
] | 38.757576 | 0.001526 | [
"def from_string(cls, string, relpath=None, encoding=None, is_sass=None):\n",
" \"\"\"Read Sass source from the contents of a string.\n",
"\n",
" The origin is always None. `relpath` defaults to \"string:...\".\n",
" \"\"\"\n",
" if isinstance(string, six.text_type):\n",
" # Already decoded; we don't know what encoding to use for output,\n",
" # though, so still check for a @charset.\n",
" # TODO what if the given encoding conflicts with the one in the\n",
" # file? do we care?\n",
" if encoding is None:\n",
" encoding = determine_encoding(string)\n",
"\n",
" byte_contents = string.encode(encoding)\n",
" text_contents = string\n",
" elif isinstance(string, six.binary_type):\n",
" encoding = determine_encoding(string)\n",
" byte_contents = string\n",
" text_contents = string.decode(encoding)\n",
" else:\n",
" raise TypeError(\"Expected text or bytes, got {0!r}\".format(string))\n",
"\n",
" origin = None\n",
" if relpath is None:\n",
" m = hashlib.sha256()\n",
" m.update(byte_contents)\n",
" relpath = repr(\"string:{0}:{1}\".format(\n",
" m.hexdigest()[:16], text_contents[:100]))\n",
"\n",
" return cls(\n",
" origin, relpath, text_contents, encoding=encoding,\n",
" is_sass=is_sass,\n",
" )"
] | [
0,
0.01694915254237288,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.1111111111111111
] | 33 | 0.003881 |
def naturaldate(value):
"""Like naturalday, but will append a year for dates that are a year
ago or more."""
try:
value = date(value.year, value.month, value.day)
except AttributeError:
# Passed value wasn't date-ish
return value
except (OverflowError, ValueError):
# Date arguments out of range
return value
delta = abs_timedelta(value - date.today())
if delta.days >= 365:
return naturalday(value, '%b %d %Y')
return naturalday(value) | [
"def",
"naturaldate",
"(",
"value",
")",
":",
"try",
":",
"value",
"=",
"date",
"(",
"value",
".",
"year",
",",
"value",
".",
"month",
",",
"value",
".",
"day",
")",
"except",
"AttributeError",
":",
"# Passed value wasn't date-ish",
"return",
"value",
"except",
"(",
"OverflowError",
",",
"ValueError",
")",
":",
"# Date arguments out of range",
"return",
"value",
"delta",
"=",
"abs_timedelta",
"(",
"value",
"-",
"date",
".",
"today",
"(",
")",
")",
"if",
"delta",
".",
"days",
">=",
"365",
":",
"return",
"naturalday",
"(",
"value",
",",
"'%b %d %Y'",
")",
"return",
"naturalday",
"(",
"value",
")"
] | 33.466667 | 0.001938 | [
"def naturaldate(value):\n",
" \"\"\"Like naturalday, but will append a year for dates that are a year\n",
" ago or more.\"\"\"\n",
" try:\n",
" value = date(value.year, value.month, value.day)\n",
" except AttributeError:\n",
" # Passed value wasn't date-ish\n",
" return value\n",
" except (OverflowError, ValueError):\n",
" # Date arguments out of range\n",
" return value\n",
" delta = abs_timedelta(value - date.today())\n",
" if delta.days >= 365:\n",
" return naturalday(value, '%b %d %Y')\n",
" return naturalday(value)"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.03571428571428571
] | 15 | 0.002381 |
def i2m(self, pkt, x):
"""Convert internal value to machine value"""
if x is None:
# Try to return zero if undefined
x = self.h2i(pkt, 0)
return x | [
"def",
"i2m",
"(",
"self",
",",
"pkt",
",",
"x",
")",
":",
"if",
"x",
"is",
"None",
":",
"# Try to return zero if undefined",
"x",
"=",
"self",
".",
"h2i",
"(",
"pkt",
",",
"0",
")",
"return",
"x"
] | 31.5 | 0.010309 | [
"def i2m(self, pkt, x):\n",
" \"\"\"Convert internal value to machine value\"\"\"\n",
" if x is None:\n",
" # Try to return zero if undefined\n",
" x = self.h2i(pkt, 0)\n",
" return x"
] | [
0,
0.018518518518518517,
0,
0,
0,
0.0625
] | 6 | 0.013503 |
def update(self):
"""Update the HVAC state."""
self._controller.update(self._id, wake_if_asleep=False)
data = self._controller.get_climate_params(self._id)
if data:
if time.time() - self.__manual_update_time > 60:
self.__is_auto_conditioning_on = (data
['is_auto_conditioning_on'])
self.__is_climate_on = data['is_climate_on']
self.__driver_temp_setting = (data['driver_temp_setting']
if data['driver_temp_setting']
else self.__driver_temp_setting)
self.__passenger_temp_setting = (data['passenger_temp_setting']
if
data['passenger_temp_setting']
else
self.__passenger_temp_setting)
self.__inside_temp = (data['inside_temp'] if data['inside_temp']
else self.__inside_temp)
self.__outside_temp = (data['outside_temp'] if data['outside_temp']
else self.__outside_temp)
self.__fan_status = data['fan_status'] | [
"def",
"update",
"(",
"self",
")",
":",
"self",
".",
"_controller",
".",
"update",
"(",
"self",
".",
"_id",
",",
"wake_if_asleep",
"=",
"False",
")",
"data",
"=",
"self",
".",
"_controller",
".",
"get_climate_params",
"(",
"self",
".",
"_id",
")",
"if",
"data",
":",
"if",
"time",
".",
"time",
"(",
")",
"-",
"self",
".",
"__manual_update_time",
">",
"60",
":",
"self",
".",
"__is_auto_conditioning_on",
"=",
"(",
"data",
"[",
"'is_auto_conditioning_on'",
"]",
")",
"self",
".",
"__is_climate_on",
"=",
"data",
"[",
"'is_climate_on'",
"]",
"self",
".",
"__driver_temp_setting",
"=",
"(",
"data",
"[",
"'driver_temp_setting'",
"]",
"if",
"data",
"[",
"'driver_temp_setting'",
"]",
"else",
"self",
".",
"__driver_temp_setting",
")",
"self",
".",
"__passenger_temp_setting",
"=",
"(",
"data",
"[",
"'passenger_temp_setting'",
"]",
"if",
"data",
"[",
"'passenger_temp_setting'",
"]",
"else",
"self",
".",
"__passenger_temp_setting",
")",
"self",
".",
"__inside_temp",
"=",
"(",
"data",
"[",
"'inside_temp'",
"]",
"if",
"data",
"[",
"'inside_temp'",
"]",
"else",
"self",
".",
"__inside_temp",
")",
"self",
".",
"__outside_temp",
"=",
"(",
"data",
"[",
"'outside_temp'",
"]",
"if",
"data",
"[",
"'outside_temp'",
"]",
"else",
"self",
".",
"__outside_temp",
")",
"self",
".",
"__fan_status",
"=",
"data",
"[",
"'fan_status'",
"]"
] | 58.043478 | 0.001474 | [
"def update(self):\n",
" \"\"\"Update the HVAC state.\"\"\"\n",
" self._controller.update(self._id, wake_if_asleep=False)\n",
"\n",
" data = self._controller.get_climate_params(self._id)\n",
" if data:\n",
" if time.time() - self.__manual_update_time > 60:\n",
" self.__is_auto_conditioning_on = (data\n",
" ['is_auto_conditioning_on'])\n",
" self.__is_climate_on = data['is_climate_on']\n",
" self.__driver_temp_setting = (data['driver_temp_setting']\n",
" if data['driver_temp_setting']\n",
" else self.__driver_temp_setting)\n",
" self.__passenger_temp_setting = (data['passenger_temp_setting']\n",
" if\n",
" data['passenger_temp_setting']\n",
" else\n",
" self.__passenger_temp_setting)\n",
" self.__inside_temp = (data['inside_temp'] if data['inside_temp']\n",
" else self.__inside_temp)\n",
" self.__outside_temp = (data['outside_temp'] if data['outside_temp']\n",
" else self.__outside_temp)\n",
" self.__fan_status = data['fan_status']"
] | [
0,
0.02702702702702703,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.02
] | 23 | 0.002045 |
def cuts(self, data,
xtitle=None, ytitle=None, title=None, rtitle=None,
color=None):
"""data: pixel values along a line.
"""
y = data
x = np.arange(len(data))
self.plot(x, y, color=color, drawstyle='steps-mid',
xtitle=xtitle, ytitle=ytitle, title=title, rtitle=rtitle,
alpha=1.0, linewidth=1.0, linestyle='-') | [
"def",
"cuts",
"(",
"self",
",",
"data",
",",
"xtitle",
"=",
"None",
",",
"ytitle",
"=",
"None",
",",
"title",
"=",
"None",
",",
"rtitle",
"=",
"None",
",",
"color",
"=",
"None",
")",
":",
"y",
"=",
"data",
"x",
"=",
"np",
".",
"arange",
"(",
"len",
"(",
"data",
")",
")",
"self",
".",
"plot",
"(",
"x",
",",
"y",
",",
"color",
"=",
"color",
",",
"drawstyle",
"=",
"'steps-mid'",
",",
"xtitle",
"=",
"xtitle",
",",
"ytitle",
"=",
"ytitle",
",",
"title",
"=",
"title",
",",
"rtitle",
"=",
"rtitle",
",",
"alpha",
"=",
"1.0",
",",
"linewidth",
"=",
"1.0",
",",
"linestyle",
"=",
"'-'",
")"
] | 36.545455 | 0.009709 | [
"def cuts(self, data,\n",
" xtitle=None, ytitle=None, title=None, rtitle=None,\n",
" color=None):\n",
" \"\"\"data: pixel values along a line.\n",
" \"\"\"\n",
" y = data\n",
" x = np.arange(len(data))\n",
"\n",
" self.plot(x, y, color=color, drawstyle='steps-mid',\n",
" xtitle=xtitle, ytitle=ytitle, title=title, rtitle=rtitle,\n",
" alpha=1.0, linewidth=1.0, linestyle='-')"
] | [
0,
0.015625,
0.038461538461538464,
0.022727272727272728,
0,
0,
0,
0,
0,
0,
0.017241379310344827
] | 11 | 0.00855 |
def getActiveJobCountForClientKey(self, clientKey):
""" Return the number of jobs for the given clientKey and a status that is
not completed.
"""
with ConnectionFactory.get() as conn:
query = 'SELECT count(job_id) ' \
'FROM %s ' \
'WHERE client_key = %%s ' \
' AND status != %%s' % self.jobsTableName
conn.cursor.execute(query, [clientKey, self.STATUS_COMPLETED])
activeJobCount = conn.cursor.fetchone()[0]
return activeJobCount | [
"def",
"getActiveJobCountForClientKey",
"(",
"self",
",",
"clientKey",
")",
":",
"with",
"ConnectionFactory",
".",
"get",
"(",
")",
"as",
"conn",
":",
"query",
"=",
"'SELECT count(job_id) '",
"'FROM %s '",
"'WHERE client_key = %%s '",
"' AND status != %%s'",
"%",
"self",
".",
"jobsTableName",
"conn",
".",
"cursor",
".",
"execute",
"(",
"query",
",",
"[",
"clientKey",
",",
"self",
".",
"STATUS_COMPLETED",
"]",
")",
"activeJobCount",
"=",
"conn",
".",
"cursor",
".",
"fetchone",
"(",
")",
"[",
"0",
"]",
"return",
"activeJobCount"
] | 38.307692 | 0.009804 | [
"def getActiveJobCountForClientKey(self, clientKey):\n",
" \"\"\" Return the number of jobs for the given clientKey and a status that is\n",
" not completed.\n",
" \"\"\"\n",
" with ConnectionFactory.get() as conn:\n",
" query = 'SELECT count(job_id) ' \\\n",
" 'FROM %s ' \\\n",
" 'WHERE client_key = %%s ' \\\n",
" ' AND status != %%s' % self.jobsTableName\n",
" conn.cursor.execute(query, [clientKey, self.STATUS_COMPLETED])\n",
" activeJobCount = conn.cursor.fetchone()[0]\n",
"\n",
" return activeJobCount"
] | [
0,
0,
0,
0,
0,
0.025,
0,
0,
0.017543859649122806,
0.014492753623188406,
0.02040816326530612,
0,
0.04
] | 13 | 0.009034 |
def map(function, iterable, *args, **kwargs):
"""This function is equivalent to:
>>> [function(x, args[0], args[1],...) for x in iterable]
:param pm_parallel: Force parallelization on/off
:type pm_parallel: bool
:param pm_chunksize: see :py:class:`multiprocessing.pool.Pool`
:type pm_chunksize: int
:param pm_pool: Pass an existing pool
:type pm_pool: multiprocessing.pool.Pool
:param pm_processes: Number of processes to use in the pool. See
:py:class:`multiprocessing.pool.Pool`
:type pm_processes: int
:param pm_pbar: Show progress bar
:type pm_pbar: bool
"""
return _map_or_starmap(function, iterable, args, kwargs, "map") | [
"def",
"map",
"(",
"function",
",",
"iterable",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"_map_or_starmap",
"(",
"function",
",",
"iterable",
",",
"args",
",",
"kwargs",
",",
"\"map\"",
")"
] | 41.823529 | 0.001376 | [
"def map(function, iterable, *args, **kwargs):\n",
" \"\"\"This function is equivalent to:\n",
" >>> [function(x, args[0], args[1],...) for x in iterable]\n",
"\n",
" :param pm_parallel: Force parallelization on/off\n",
" :type pm_parallel: bool\n",
" :param pm_chunksize: see :py:class:`multiprocessing.pool.Pool`\n",
" :type pm_chunksize: int\n",
" :param pm_pool: Pass an existing pool\n",
" :type pm_pool: multiprocessing.pool.Pool\n",
" :param pm_processes: Number of processes to use in the pool. See\n",
" :py:class:`multiprocessing.pool.Pool`\n",
" :type pm_processes: int\n",
" :param pm_pbar: Show progress bar\n",
" :type pm_pbar: bool\n",
" \"\"\"\n",
" return _map_or_starmap(function, iterable, args, kwargs, \"map\")"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.014925373134328358
] | 17 | 0.000878 |
def main():
"""
Connect to an SNI-enabled server and request a specific hostname, specified
by argv[1], of it.
"""
if len(argv) < 2:
print('Usage: %s <hostname>' % (argv[0],))
return 1
client = socket()
print('Connecting...', end="")
stdout.flush()
client.connect(('127.0.0.1', 8443))
print('connected', client.getpeername())
client_ssl = Connection(Context(TLSv1_METHOD), client)
client_ssl.set_connect_state()
client_ssl.set_tlsext_host_name(argv[1])
client_ssl.do_handshake()
print('Server subject is', client_ssl.get_peer_certificate().get_subject())
client_ssl.close() | [
"def",
"main",
"(",
")",
":",
"if",
"len",
"(",
"argv",
")",
"<",
"2",
":",
"print",
"(",
"'Usage: %s <hostname>'",
"%",
"(",
"argv",
"[",
"0",
"]",
",",
")",
")",
"return",
"1",
"client",
"=",
"socket",
"(",
")",
"print",
"(",
"'Connecting...'",
",",
"end",
"=",
"\"\"",
")",
"stdout",
".",
"flush",
"(",
")",
"client",
".",
"connect",
"(",
"(",
"'127.0.0.1'",
",",
"8443",
")",
")",
"print",
"(",
"'connected'",
",",
"client",
".",
"getpeername",
"(",
")",
")",
"client_ssl",
"=",
"Connection",
"(",
"Context",
"(",
"TLSv1_METHOD",
")",
",",
"client",
")",
"client_ssl",
".",
"set_connect_state",
"(",
")",
"client_ssl",
".",
"set_tlsext_host_name",
"(",
"argv",
"[",
"1",
"]",
")",
"client_ssl",
".",
"do_handshake",
"(",
")",
"print",
"(",
"'Server subject is'",
",",
"client_ssl",
".",
"get_peer_certificate",
"(",
")",
".",
"get_subject",
"(",
")",
")",
"client_ssl",
".",
"close",
"(",
")"
] | 28.863636 | 0.001524 | [
"def main():\n",
" \"\"\"\n",
" Connect to an SNI-enabled server and request a specific hostname, specified\n",
" by argv[1], of it.\n",
" \"\"\"\n",
" if len(argv) < 2:\n",
" print('Usage: %s <hostname>' % (argv[0],))\n",
" return 1\n",
"\n",
" client = socket()\n",
"\n",
" print('Connecting...', end=\"\")\n",
" stdout.flush()\n",
" client.connect(('127.0.0.1', 8443))\n",
" print('connected', client.getpeername())\n",
"\n",
" client_ssl = Connection(Context(TLSv1_METHOD), client)\n",
" client_ssl.set_connect_state()\n",
" client_ssl.set_tlsext_host_name(argv[1])\n",
" client_ssl.do_handshake()\n",
" print('Server subject is', client_ssl.get_peer_certificate().get_subject())\n",
" client_ssl.close()"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.045454545454545456
] | 22 | 0.002066 |
def bounce_cluster(name):
'''
Bounce all Traffic Server nodes in the cluster. Bouncing Traffic Server
shuts down and immediately restarts Traffic Server, node-by-node.
.. code-block:: yaml
bounce_ats_cluster:
trafficserver.bounce_cluster
'''
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
if __opts__['test']:
ret['comment'] = 'Bouncing cluster'
return ret
__salt__['trafficserver.bounce_cluster']()
ret['result'] = True
ret['comment'] = 'Bounced cluster'
return ret | [
"def",
"bounce_cluster",
"(",
"name",
")",
":",
"ret",
"=",
"{",
"'name'",
":",
"name",
",",
"'changes'",
":",
"{",
"}",
",",
"'result'",
":",
"None",
",",
"'comment'",
":",
"''",
"}",
"if",
"__opts__",
"[",
"'test'",
"]",
":",
"ret",
"[",
"'comment'",
"]",
"=",
"'Bouncing cluster'",
"return",
"ret",
"__salt__",
"[",
"'trafficserver.bounce_cluster'",
"]",
"(",
")",
"ret",
"[",
"'result'",
"]",
"=",
"True",
"ret",
"[",
"'comment'",
"]",
"=",
"'Bounced cluster'",
"return",
"ret"
] | 24.125 | 0.001661 | [
"def bounce_cluster(name):\n",
" '''\n",
" Bounce all Traffic Server nodes in the cluster. Bouncing Traffic Server\n",
" shuts down and immediately restarts Traffic Server, node-by-node.\n",
"\n",
" .. code-block:: yaml\n",
"\n",
" bounce_ats_cluster:\n",
" trafficserver.bounce_cluster\n",
" '''\n",
" ret = {'name': name,\n",
" 'changes': {},\n",
" 'result': None,\n",
" 'comment': ''}\n",
"\n",
" if __opts__['test']:\n",
" ret['comment'] = 'Bouncing cluster'\n",
" return ret\n",
"\n",
" __salt__['trafficserver.bounce_cluster']()\n",
"\n",
" ret['result'] = True\n",
" ret['comment'] = 'Bounced cluster'\n",
" return ret"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.07142857142857142
] | 24 | 0.002976 |
def save_widget(cls, editor):
"""
Implements SplittableTabWidget.save_widget to actually save the
code editor widget.
If the editor.file.path is None or empty or the file does not exist,
a save as dialog is shown (save as).
:param editor: editor widget to save.
:return: False if there was a problem saving the editor (e.g. the save
as dialog has been canceled by the user, or a permission error,...)
"""
if editor.original:
editor = editor.original
if editor.file.path is None or not os.path.exists(editor.file.path):
# save as
path, filter = cls._ask_path(editor)
if not path:
return False
if not os.path.splitext(path)[1]:
if len(editor.mimetypes):
path += mimetypes.guess_extension(editor.mimetypes[0])
try:
_logger().debug('saving %r as %r', editor.file._old_path, path)
except AttributeError:
_logger().debug('saving %r as %r', editor.file.path, path)
editor.file._path = path
else:
path = editor.file.path
try:
editor.file.save(path)
except Exception as e:
QtWidgets.QMessageBox.warning(editor, "Failed to save file", 'Failed to save %r.\n\nError="%s"' %
(path, e))
else:
tw = editor.parent_tab_widget
text = tw.tabText(tw.indexOf(editor)).replace('*', '')
tw.setTabText(tw.indexOf(editor), text)
for clone in [editor] + editor.clones:
if clone != editor:
tw = clone.parent_tab_widget
tw.setTabText(tw.indexOf(clone), text)
return True | [
"def",
"save_widget",
"(",
"cls",
",",
"editor",
")",
":",
"if",
"editor",
".",
"original",
":",
"editor",
"=",
"editor",
".",
"original",
"if",
"editor",
".",
"file",
".",
"path",
"is",
"None",
"or",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"editor",
".",
"file",
".",
"path",
")",
":",
"# save as",
"path",
",",
"filter",
"=",
"cls",
".",
"_ask_path",
"(",
"editor",
")",
"if",
"not",
"path",
":",
"return",
"False",
"if",
"not",
"os",
".",
"path",
".",
"splitext",
"(",
"path",
")",
"[",
"1",
"]",
":",
"if",
"len",
"(",
"editor",
".",
"mimetypes",
")",
":",
"path",
"+=",
"mimetypes",
".",
"guess_extension",
"(",
"editor",
".",
"mimetypes",
"[",
"0",
"]",
")",
"try",
":",
"_logger",
"(",
")",
".",
"debug",
"(",
"'saving %r as %r'",
",",
"editor",
".",
"file",
".",
"_old_path",
",",
"path",
")",
"except",
"AttributeError",
":",
"_logger",
"(",
")",
".",
"debug",
"(",
"'saving %r as %r'",
",",
"editor",
".",
"file",
".",
"path",
",",
"path",
")",
"editor",
".",
"file",
".",
"_path",
"=",
"path",
"else",
":",
"path",
"=",
"editor",
".",
"file",
".",
"path",
"try",
":",
"editor",
".",
"file",
".",
"save",
"(",
"path",
")",
"except",
"Exception",
"as",
"e",
":",
"QtWidgets",
".",
"QMessageBox",
".",
"warning",
"(",
"editor",
",",
"\"Failed to save file\"",
",",
"'Failed to save %r.\\n\\nError=\"%s\"'",
"%",
"(",
"path",
",",
"e",
")",
")",
"else",
":",
"tw",
"=",
"editor",
".",
"parent_tab_widget",
"text",
"=",
"tw",
".",
"tabText",
"(",
"tw",
".",
"indexOf",
"(",
"editor",
")",
")",
".",
"replace",
"(",
"'*'",
",",
"''",
")",
"tw",
".",
"setTabText",
"(",
"tw",
".",
"indexOf",
"(",
"editor",
")",
",",
"text",
")",
"for",
"clone",
"in",
"[",
"editor",
"]",
"+",
"editor",
".",
"clones",
":",
"if",
"clone",
"!=",
"editor",
":",
"tw",
"=",
"clone",
".",
"parent_tab_widget",
"tw",
".",
"setTabText",
"(",
"tw",
".",
"indexOf",
"(",
"clone",
")",
",",
"text",
")",
"return",
"True"
] | 41.674419 | 0.001636 | [
"def save_widget(cls, editor):\n",
" \"\"\"\n",
" Implements SplittableTabWidget.save_widget to actually save the\n",
" code editor widget.\n",
"\n",
" If the editor.file.path is None or empty or the file does not exist,\n",
" a save as dialog is shown (save as).\n",
"\n",
" :param editor: editor widget to save.\n",
" :return: False if there was a problem saving the editor (e.g. the save\n",
" as dialog has been canceled by the user, or a permission error,...)\n",
" \"\"\"\n",
" if editor.original:\n",
" editor = editor.original\n",
" if editor.file.path is None or not os.path.exists(editor.file.path):\n",
" # save as\n",
" path, filter = cls._ask_path(editor)\n",
" if not path:\n",
" return False\n",
" if not os.path.splitext(path)[1]:\n",
" if len(editor.mimetypes):\n",
" path += mimetypes.guess_extension(editor.mimetypes[0])\n",
" try:\n",
" _logger().debug('saving %r as %r', editor.file._old_path, path)\n",
" except AttributeError:\n",
" _logger().debug('saving %r as %r', editor.file.path, path)\n",
" editor.file._path = path\n",
" else:\n",
" path = editor.file.path\n",
" try:\n",
" editor.file.save(path)\n",
" except Exception as e:\n",
" QtWidgets.QMessageBox.warning(editor, \"Failed to save file\", 'Failed to save %r.\\n\\nError=\"%s\"' %\n",
" (path, e))\n",
" else:\n",
" tw = editor.parent_tab_widget\n",
" text = tw.tabText(tw.indexOf(editor)).replace('*', '')\n",
" tw.setTabText(tw.indexOf(editor), text)\n",
" for clone in [editor] + editor.clones:\n",
" if clone != editor:\n",
" tw = clone.parent_tab_widget\n",
" tw.setTabText(tw.indexOf(clone), text)\n",
" return True"
] | [
0,
0.08333333333333333,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.00909090909090909,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.05263157894736842
] | 43 | 0.003373 |
def generate_strip_subparser(subparsers):
"""Adds a sub-command parser to `subparsers` to process prepared files
for use with the tacl ngrams command."""
parser = subparsers.add_parser(
'strip', description=constants.STRIP_DESCRIPTION,
epilog=constants.STRIP_EPILOG, formatter_class=ParagraphFormatter,
help=constants.STRIP_HELP)
parser.set_defaults(func=strip_files)
utils.add_common_arguments(parser)
parser.add_argument('input', help=constants.STRIP_INPUT_HELP,
metavar='INPUT')
parser.add_argument('output', help=constants.STRIP_OUTPUT_HELP,
metavar='OUTPUT') | [
"def",
"generate_strip_subparser",
"(",
"subparsers",
")",
":",
"parser",
"=",
"subparsers",
".",
"add_parser",
"(",
"'strip'",
",",
"description",
"=",
"constants",
".",
"STRIP_DESCRIPTION",
",",
"epilog",
"=",
"constants",
".",
"STRIP_EPILOG",
",",
"formatter_class",
"=",
"ParagraphFormatter",
",",
"help",
"=",
"constants",
".",
"STRIP_HELP",
")",
"parser",
".",
"set_defaults",
"(",
"func",
"=",
"strip_files",
")",
"utils",
".",
"add_common_arguments",
"(",
"parser",
")",
"parser",
".",
"add_argument",
"(",
"'input'",
",",
"help",
"=",
"constants",
".",
"STRIP_INPUT_HELP",
",",
"metavar",
"=",
"'INPUT'",
")",
"parser",
".",
"add_argument",
"(",
"'output'",
",",
"help",
"=",
"constants",
".",
"STRIP_OUTPUT_HELP",
",",
"metavar",
"=",
"'OUTPUT'",
")"
] | 50.076923 | 0.001508 | [
"def generate_strip_subparser(subparsers):\n",
" \"\"\"Adds a sub-command parser to `subparsers` to process prepared files\n",
" for use with the tacl ngrams command.\"\"\"\n",
" parser = subparsers.add_parser(\n",
" 'strip', description=constants.STRIP_DESCRIPTION,\n",
" epilog=constants.STRIP_EPILOG, formatter_class=ParagraphFormatter,\n",
" help=constants.STRIP_HELP)\n",
" parser.set_defaults(func=strip_files)\n",
" utils.add_common_arguments(parser)\n",
" parser.add_argument('input', help=constants.STRIP_INPUT_HELP,\n",
" metavar='INPUT')\n",
" parser.add_argument('output', help=constants.STRIP_OUTPUT_HELP,\n",
" metavar='OUTPUT')"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.024390243902439025
] | 13 | 0.001876 |
def xception_internal(inputs, hparams):
"""Xception body."""
with tf.variable_scope("xception"):
cur = inputs
if cur.get_shape().as_list()[1] > 200:
# Large image, Xception entry flow
cur = xception_entry(cur, hparams.hidden_size)
else:
# Small image, conv
cur = common_layers.conv_block(
cur,
hparams.hidden_size, [((1, 1), (3, 3))],
first_relu=False,
padding="SAME",
force2d=True,
name="small_image_conv")
for i in range(hparams.num_hidden_layers):
with tf.variable_scope("layer_%d" % i):
cur = residual_block(cur, hparams)
return xception_exit(cur) | [
"def",
"xception_internal",
"(",
"inputs",
",",
"hparams",
")",
":",
"with",
"tf",
".",
"variable_scope",
"(",
"\"xception\"",
")",
":",
"cur",
"=",
"inputs",
"if",
"cur",
".",
"get_shape",
"(",
")",
".",
"as_list",
"(",
")",
"[",
"1",
"]",
">",
"200",
":",
"# Large image, Xception entry flow",
"cur",
"=",
"xception_entry",
"(",
"cur",
",",
"hparams",
".",
"hidden_size",
")",
"else",
":",
"# Small image, conv",
"cur",
"=",
"common_layers",
".",
"conv_block",
"(",
"cur",
",",
"hparams",
".",
"hidden_size",
",",
"[",
"(",
"(",
"1",
",",
"1",
")",
",",
"(",
"3",
",",
"3",
")",
")",
"]",
",",
"first_relu",
"=",
"False",
",",
"padding",
"=",
"\"SAME\"",
",",
"force2d",
"=",
"True",
",",
"name",
"=",
"\"small_image_conv\"",
")",
"for",
"i",
"in",
"range",
"(",
"hparams",
".",
"num_hidden_layers",
")",
":",
"with",
"tf",
".",
"variable_scope",
"(",
"\"layer_%d\"",
"%",
"i",
")",
":",
"cur",
"=",
"residual_block",
"(",
"cur",
",",
"hparams",
")",
"return",
"xception_exit",
"(",
"cur",
")"
] | 28.434783 | 0.011834 | [
"def xception_internal(inputs, hparams):\n",
" \"\"\"Xception body.\"\"\"\n",
" with tf.variable_scope(\"xception\"):\n",
" cur = inputs\n",
"\n",
" if cur.get_shape().as_list()[1] > 200:\n",
" # Large image, Xception entry flow\n",
" cur = xception_entry(cur, hparams.hidden_size)\n",
" else:\n",
" # Small image, conv\n",
" cur = common_layers.conv_block(\n",
" cur,\n",
" hparams.hidden_size, [((1, 1), (3, 3))],\n",
" first_relu=False,\n",
" padding=\"SAME\",\n",
" force2d=True,\n",
" name=\"small_image_conv\")\n",
"\n",
" for i in range(hparams.num_hidden_layers):\n",
" with tf.variable_scope(\"layer_%d\" % i):\n",
" cur = residual_block(cur, hparams)\n",
"\n",
" return xception_exit(cur)"
] | [
0,
0.043478260869565216,
0.02631578947368421,
0,
0,
0,
0.024390243902439025,
0.018867924528301886,
0,
0.038461538461538464,
0.02631578947368421,
0,
0,
0,
0,
0,
0,
0,
0,
0.021739130434782608,
0,
0,
0.034482758620689655
] | 23 | 0.010176 |
def _read_syncmap_file(self, path, extension, text=False):
""" Read labels from a SyncMap file """
syncmap = SyncMap(logger=self.logger)
syncmap.read(extension, path, parameters=None)
if text:
return [(f.begin, f.end, u" ".join(f.text_fragment.lines)) for f in syncmap.fragments]
return [(f.begin, f.end, f.text_fragment.identifier) for f in syncmap.fragments] | [
"def",
"_read_syncmap_file",
"(",
"self",
",",
"path",
",",
"extension",
",",
"text",
"=",
"False",
")",
":",
"syncmap",
"=",
"SyncMap",
"(",
"logger",
"=",
"self",
".",
"logger",
")",
"syncmap",
".",
"read",
"(",
"extension",
",",
"path",
",",
"parameters",
"=",
"None",
")",
"if",
"text",
":",
"return",
"[",
"(",
"f",
".",
"begin",
",",
"f",
".",
"end",
",",
"u\" \"",
".",
"join",
"(",
"f",
".",
"text_fragment",
".",
"lines",
")",
")",
"for",
"f",
"in",
"syncmap",
".",
"fragments",
"]",
"return",
"[",
"(",
"f",
".",
"begin",
",",
"f",
".",
"end",
",",
"f",
".",
"text_fragment",
".",
"identifier",
")",
"for",
"f",
"in",
"syncmap",
".",
"fragments",
"]"
] | 58 | 0.009709 | [
"def _read_syncmap_file(self, path, extension, text=False):\n",
" \"\"\" Read labels from a SyncMap file \"\"\"\n",
" syncmap = SyncMap(logger=self.logger)\n",
" syncmap.read(extension, path, parameters=None)\n",
" if text:\n",
" return [(f.begin, f.end, u\" \".join(f.text_fragment.lines)) for f in syncmap.fragments]\n",
" return [(f.begin, f.end, f.text_fragment.identifier) for f in syncmap.fragments]"
] | [
0,
0.020833333333333332,
0,
0,
0,
0.010101010101010102,
0.022727272727272728
] | 7 | 0.007666 |
def _build_offset(offset, kwargs, default):
"""
Builds the offset argument for event rules.
"""
if offset is None:
if not kwargs:
return default # use the default.
else:
return _td_check(datetime.timedelta(**kwargs))
elif kwargs:
raise ValueError('Cannot pass kwargs and an offset')
elif isinstance(offset, datetime.timedelta):
return _td_check(offset)
else:
raise TypeError("Must pass 'hours' and/or 'minutes' as keywords") | [
"def",
"_build_offset",
"(",
"offset",
",",
"kwargs",
",",
"default",
")",
":",
"if",
"offset",
"is",
"None",
":",
"if",
"not",
"kwargs",
":",
"return",
"default",
"# use the default.",
"else",
":",
"return",
"_td_check",
"(",
"datetime",
".",
"timedelta",
"(",
"*",
"*",
"kwargs",
")",
")",
"elif",
"kwargs",
":",
"raise",
"ValueError",
"(",
"'Cannot pass kwargs and an offset'",
")",
"elif",
"isinstance",
"(",
"offset",
",",
"datetime",
".",
"timedelta",
")",
":",
"return",
"_td_check",
"(",
"offset",
")",
"else",
":",
"raise",
"TypeError",
"(",
"\"Must pass 'hours' and/or 'minutes' as keywords\"",
")"
] | 33.533333 | 0.001934 | [
"def _build_offset(offset, kwargs, default):\n",
" \"\"\"\n",
" Builds the offset argument for event rules.\n",
" \"\"\"\n",
" if offset is None:\n",
" if not kwargs:\n",
" return default # use the default.\n",
" else:\n",
" return _td_check(datetime.timedelta(**kwargs))\n",
" elif kwargs:\n",
" raise ValueError('Cannot pass kwargs and an offset')\n",
" elif isinstance(offset, datetime.timedelta):\n",
" return _td_check(offset)\n",
" else:\n",
" raise TypeError(\"Must pass 'hours' and/or 'minutes' as keywords\")"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.0136986301369863
] | 15 | 0.000913 |
def execute(self, context):
"""
Call the OpsgenieAlertHook to post message
"""
self.hook = OpsgenieAlertHook(self.opsgenie_conn_id)
self.hook.execute(self._build_opsgenie_payload()) | [
"def",
"execute",
"(",
"self",
",",
"context",
")",
":",
"self",
".",
"hook",
"=",
"OpsgenieAlertHook",
"(",
"self",
".",
"opsgenie_conn_id",
")",
"self",
".",
"hook",
".",
"execute",
"(",
"self",
".",
"_build_opsgenie_payload",
"(",
")",
")"
] | 36 | 0.00905 | [
"def execute(self, context):\n",
" \"\"\"\n",
" Call the OpsgenieAlertHook to post message\n",
" \"\"\"\n",
" self.hook = OpsgenieAlertHook(self.opsgenie_conn_id)\n",
" self.hook.execute(self._build_opsgenie_payload())"
] | [
0,
0.08333333333333333,
0,
0,
0,
0.017543859649122806
] | 6 | 0.016813 |
def deploy_windows(host,
port=445,
timeout=900,
username='Administrator',
password=None,
name=None,
sock_dir=None,
conf_file=None,
start_action=None,
parallel=False,
minion_pub=None,
minion_pem=None,
minion_conf=None,
keep_tmp=False,
script_args=None,
script_env=None,
port_timeout=15,
preseed_minion_keys=None,
win_installer=None,
master=None,
tmp_dir='C:\\salttmp',
opts=None,
master_sign_pub_file=None,
use_winrm=False,
winrm_port=5986,
winrm_use_ssl=True,
winrm_verify_ssl=True,
**kwargs):
'''
Copy the install files to a remote Windows box, and execute them
'''
if not isinstance(opts, dict):
opts = {}
if use_winrm and not HAS_WINRM:
log.error('WinRM requested but module winrm could not be imported')
return False
if not use_winrm and has_winexe() and not HAS_PSEXEC:
salt.utils.versions.warn_until(
'Sodium',
'Support for winexe has been deprecated and will be removed in '
'Sodium, please install pypsexec instead.'
)
starttime = time.mktime(time.localtime())
log.debug('Deploying %s at %s (Windows)', host, starttime)
log.trace('HAS_WINRM: %s, use_winrm: %s', HAS_WINRM, use_winrm)
port_available = wait_for_port(host=host, port=port, timeout=port_timeout * 60)
if not port_available:
return False
service_available = False
winrm_session = None
if HAS_WINRM and use_winrm:
winrm_session = wait_for_winrm(host=host, port=winrm_port,
username=username, password=password,
timeout=port_timeout * 60, use_ssl=winrm_use_ssl,
verify=winrm_verify_ssl)
if winrm_session is not None:
service_available = True
else:
service_available = wait_for_psexecsvc(host=host, port=port,
username=username, password=password,
timeout=port_timeout * 60)
if port_available and service_available:
log.debug('SMB port %s on %s is available', port, host)
log.debug('Logging into %s:%s as %s', host, port, username)
newtimeout = timeout - (time.mktime(time.localtime()) - starttime)
smb_conn = salt.utils.smb.get_conn(host, username, password)
if smb_conn is False:
log.error('Please install smbprotocol to enable SMB functionality')
return False
salt.utils.smb.mkdirs('salttemp', conn=smb_conn)
salt.utils.smb.mkdirs('salt/conf/pki/minion', conn=smb_conn)
if minion_pub:
salt.utils.smb.put_str(minion_pub, 'salt\\conf\\pki\\minion\\minion.pub', conn=smb_conn)
if minion_pem:
salt.utils.smb.put_str(minion_pem, 'salt\\conf\\pki\\minion\\minion.pem', conn=smb_conn)
if master_sign_pub_file:
# Read master-sign.pub file
log.debug("Copying master_sign.pub file from %s to minion", master_sign_pub_file)
try:
salt.utils.smb.put_file(
master_sign_pub_file,
'salt\\conf\\pki\\minion\\master_sign.pub',
'C$',
conn=smb_conn,
)
except Exception as e:
log.debug("Exception copying master_sign.pub file %s to minion", master_sign_pub_file)
# Copy over win_installer
# win_installer refers to a file such as:
# /root/Salt-Minion-0.17.0-win32-Setup.exe
# ..which exists on the same machine as salt-cloud
comps = win_installer.split('/')
local_path = '/'.join(comps[:-1])
installer = comps[-1]
salt.utils.smb.put_file(
win_installer,
'salttemp\\{0}'.format(installer),
'C$',
conn=smb_conn,
)
if use_winrm:
winrm_cmd(winrm_session, 'c:\\salttemp\\{0}'.format(installer), ['/S', '/master={0}'.format(master),
'/minion-name={0}'.format(name)]
)
else:
cmd = 'c:\\salttemp\\{0}'.format(installer)
args = "/S /master={0} /minion-name={1}".format(master, name)
stdout, stderr, ret_code = run_psexec_command(
cmd, args, host, username, password
)
if ret_code != 0:
raise Exception('Fail installer {0}'.format(ret_code))
# Copy over minion_conf
if minion_conf:
if not isinstance(minion_conf, dict):
# Let's not just fail regarding this change, specially
# since we can handle it
raise DeprecationWarning(
'`salt.utils.cloud.deploy_windows` now only accepts '
'dictionaries for its `minion_conf` parameter. '
'Loading YAML...'
)
minion_grains = minion_conf.pop('grains', {})
if minion_grains:
salt.utils.smb.put_str(
salt_config_to_yaml(minion_grains, line_break='\r\n'),
'salt\\conf\\grains',
conn=smb_conn
)
# Add special windows minion configuration
# that must be in the minion config file
windows_minion_conf = {
'ipc_mode': 'tcp',
'root_dir': 'c:\\salt',
'pki_dir': '/conf/pki/minion',
'multiprocessing': False,
}
minion_conf = dict(minion_conf, **windows_minion_conf)
salt.utils.smb.put_str(
salt_config_to_yaml(minion_conf, line_break='\r\n'),
'salt\\conf\\minion',
conn=smb_conn
)
# Delete C:\salttmp\ and installer file
# Unless keep_tmp is True
if not keep_tmp:
if use_winrm:
winrm_cmd(winrm_session, 'rmdir', ['/Q', '/S', 'C:\\salttemp\\'])
else:
salt.utils.smb.delete_file('salttemp\\{0}'.format(installer), 'C$', conn=smb_conn)
salt.utils.smb.delete_directory('salttemp', 'C$', conn=smb_conn)
# Shell out to psexec to ensure salt-minion service started
if use_winrm:
winrm_cmd(winrm_session, 'sc', ['stop', 'salt-minion'])
time.sleep(5)
winrm_cmd(winrm_session, 'sc', ['start', 'salt-minion'])
else:
stdout, stderr, ret_code = run_psexec_command(
'cmd.exe', '/c sc stop salt-minion', host, username, password
)
if ret_code != 0:
return False
time.sleep(5)
log.debug('Run psexec: sc start salt-minion')
stdout, stderr, ret_code = run_psexec_command(
'cmd.exe', '/c sc start salt-minion', host, username, password
)
if ret_code != 0:
return False
# Fire deploy action
fire_event(
'event',
'{0} has been deployed at {1}'.format(name, host),
'salt/cloud/{0}/deploy_windows'.format(name),
args={'name': name},
sock_dir=opts.get(
'sock_dir',
os.path.join(__opts__['sock_dir'], 'master')),
transport=opts.get('transport', 'zeromq')
)
return True
return False | [
"def",
"deploy_windows",
"(",
"host",
",",
"port",
"=",
"445",
",",
"timeout",
"=",
"900",
",",
"username",
"=",
"'Administrator'",
",",
"password",
"=",
"None",
",",
"name",
"=",
"None",
",",
"sock_dir",
"=",
"None",
",",
"conf_file",
"=",
"None",
",",
"start_action",
"=",
"None",
",",
"parallel",
"=",
"False",
",",
"minion_pub",
"=",
"None",
",",
"minion_pem",
"=",
"None",
",",
"minion_conf",
"=",
"None",
",",
"keep_tmp",
"=",
"False",
",",
"script_args",
"=",
"None",
",",
"script_env",
"=",
"None",
",",
"port_timeout",
"=",
"15",
",",
"preseed_minion_keys",
"=",
"None",
",",
"win_installer",
"=",
"None",
",",
"master",
"=",
"None",
",",
"tmp_dir",
"=",
"'C:\\\\salttmp'",
",",
"opts",
"=",
"None",
",",
"master_sign_pub_file",
"=",
"None",
",",
"use_winrm",
"=",
"False",
",",
"winrm_port",
"=",
"5986",
",",
"winrm_use_ssl",
"=",
"True",
",",
"winrm_verify_ssl",
"=",
"True",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"not",
"isinstance",
"(",
"opts",
",",
"dict",
")",
":",
"opts",
"=",
"{",
"}",
"if",
"use_winrm",
"and",
"not",
"HAS_WINRM",
":",
"log",
".",
"error",
"(",
"'WinRM requested but module winrm could not be imported'",
")",
"return",
"False",
"if",
"not",
"use_winrm",
"and",
"has_winexe",
"(",
")",
"and",
"not",
"HAS_PSEXEC",
":",
"salt",
".",
"utils",
".",
"versions",
".",
"warn_until",
"(",
"'Sodium'",
",",
"'Support for winexe has been deprecated and will be removed in '",
"'Sodium, please install pypsexec instead.'",
")",
"starttime",
"=",
"time",
".",
"mktime",
"(",
"time",
".",
"localtime",
"(",
")",
")",
"log",
".",
"debug",
"(",
"'Deploying %s at %s (Windows)'",
",",
"host",
",",
"starttime",
")",
"log",
".",
"trace",
"(",
"'HAS_WINRM: %s, use_winrm: %s'",
",",
"HAS_WINRM",
",",
"use_winrm",
")",
"port_available",
"=",
"wait_for_port",
"(",
"host",
"=",
"host",
",",
"port",
"=",
"port",
",",
"timeout",
"=",
"port_timeout",
"*",
"60",
")",
"if",
"not",
"port_available",
":",
"return",
"False",
"service_available",
"=",
"False",
"winrm_session",
"=",
"None",
"if",
"HAS_WINRM",
"and",
"use_winrm",
":",
"winrm_session",
"=",
"wait_for_winrm",
"(",
"host",
"=",
"host",
",",
"port",
"=",
"winrm_port",
",",
"username",
"=",
"username",
",",
"password",
"=",
"password",
",",
"timeout",
"=",
"port_timeout",
"*",
"60",
",",
"use_ssl",
"=",
"winrm_use_ssl",
",",
"verify",
"=",
"winrm_verify_ssl",
")",
"if",
"winrm_session",
"is",
"not",
"None",
":",
"service_available",
"=",
"True",
"else",
":",
"service_available",
"=",
"wait_for_psexecsvc",
"(",
"host",
"=",
"host",
",",
"port",
"=",
"port",
",",
"username",
"=",
"username",
",",
"password",
"=",
"password",
",",
"timeout",
"=",
"port_timeout",
"*",
"60",
")",
"if",
"port_available",
"and",
"service_available",
":",
"log",
".",
"debug",
"(",
"'SMB port %s on %s is available'",
",",
"port",
",",
"host",
")",
"log",
".",
"debug",
"(",
"'Logging into %s:%s as %s'",
",",
"host",
",",
"port",
",",
"username",
")",
"newtimeout",
"=",
"timeout",
"-",
"(",
"time",
".",
"mktime",
"(",
"time",
".",
"localtime",
"(",
")",
")",
"-",
"starttime",
")",
"smb_conn",
"=",
"salt",
".",
"utils",
".",
"smb",
".",
"get_conn",
"(",
"host",
",",
"username",
",",
"password",
")",
"if",
"smb_conn",
"is",
"False",
":",
"log",
".",
"error",
"(",
"'Please install smbprotocol to enable SMB functionality'",
")",
"return",
"False",
"salt",
".",
"utils",
".",
"smb",
".",
"mkdirs",
"(",
"'salttemp'",
",",
"conn",
"=",
"smb_conn",
")",
"salt",
".",
"utils",
".",
"smb",
".",
"mkdirs",
"(",
"'salt/conf/pki/minion'",
",",
"conn",
"=",
"smb_conn",
")",
"if",
"minion_pub",
":",
"salt",
".",
"utils",
".",
"smb",
".",
"put_str",
"(",
"minion_pub",
",",
"'salt\\\\conf\\\\pki\\\\minion\\\\minion.pub'",
",",
"conn",
"=",
"smb_conn",
")",
"if",
"minion_pem",
":",
"salt",
".",
"utils",
".",
"smb",
".",
"put_str",
"(",
"minion_pem",
",",
"'salt\\\\conf\\\\pki\\\\minion\\\\minion.pem'",
",",
"conn",
"=",
"smb_conn",
")",
"if",
"master_sign_pub_file",
":",
"# Read master-sign.pub file",
"log",
".",
"debug",
"(",
"\"Copying master_sign.pub file from %s to minion\"",
",",
"master_sign_pub_file",
")",
"try",
":",
"salt",
".",
"utils",
".",
"smb",
".",
"put_file",
"(",
"master_sign_pub_file",
",",
"'salt\\\\conf\\\\pki\\\\minion\\\\master_sign.pub'",
",",
"'C$'",
",",
"conn",
"=",
"smb_conn",
",",
")",
"except",
"Exception",
"as",
"e",
":",
"log",
".",
"debug",
"(",
"\"Exception copying master_sign.pub file %s to minion\"",
",",
"master_sign_pub_file",
")",
"# Copy over win_installer",
"# win_installer refers to a file such as:",
"# /root/Salt-Minion-0.17.0-win32-Setup.exe",
"# ..which exists on the same machine as salt-cloud",
"comps",
"=",
"win_installer",
".",
"split",
"(",
"'/'",
")",
"local_path",
"=",
"'/'",
".",
"join",
"(",
"comps",
"[",
":",
"-",
"1",
"]",
")",
"installer",
"=",
"comps",
"[",
"-",
"1",
"]",
"salt",
".",
"utils",
".",
"smb",
".",
"put_file",
"(",
"win_installer",
",",
"'salttemp\\\\{0}'",
".",
"format",
"(",
"installer",
")",
",",
"'C$'",
",",
"conn",
"=",
"smb_conn",
",",
")",
"if",
"use_winrm",
":",
"winrm_cmd",
"(",
"winrm_session",
",",
"'c:\\\\salttemp\\\\{0}'",
".",
"format",
"(",
"installer",
")",
",",
"[",
"'/S'",
",",
"'/master={0}'",
".",
"format",
"(",
"master",
")",
",",
"'/minion-name={0}'",
".",
"format",
"(",
"name",
")",
"]",
")",
"else",
":",
"cmd",
"=",
"'c:\\\\salttemp\\\\{0}'",
".",
"format",
"(",
"installer",
")",
"args",
"=",
"\"/S /master={0} /minion-name={1}\"",
".",
"format",
"(",
"master",
",",
"name",
")",
"stdout",
",",
"stderr",
",",
"ret_code",
"=",
"run_psexec_command",
"(",
"cmd",
",",
"args",
",",
"host",
",",
"username",
",",
"password",
")",
"if",
"ret_code",
"!=",
"0",
":",
"raise",
"Exception",
"(",
"'Fail installer {0}'",
".",
"format",
"(",
"ret_code",
")",
")",
"# Copy over minion_conf",
"if",
"minion_conf",
":",
"if",
"not",
"isinstance",
"(",
"minion_conf",
",",
"dict",
")",
":",
"# Let's not just fail regarding this change, specially",
"# since we can handle it",
"raise",
"DeprecationWarning",
"(",
"'`salt.utils.cloud.deploy_windows` now only accepts '",
"'dictionaries for its `minion_conf` parameter. '",
"'Loading YAML...'",
")",
"minion_grains",
"=",
"minion_conf",
".",
"pop",
"(",
"'grains'",
",",
"{",
"}",
")",
"if",
"minion_grains",
":",
"salt",
".",
"utils",
".",
"smb",
".",
"put_str",
"(",
"salt_config_to_yaml",
"(",
"minion_grains",
",",
"line_break",
"=",
"'\\r\\n'",
")",
",",
"'salt\\\\conf\\\\grains'",
",",
"conn",
"=",
"smb_conn",
")",
"# Add special windows minion configuration",
"# that must be in the minion config file",
"windows_minion_conf",
"=",
"{",
"'ipc_mode'",
":",
"'tcp'",
",",
"'root_dir'",
":",
"'c:\\\\salt'",
",",
"'pki_dir'",
":",
"'/conf/pki/minion'",
",",
"'multiprocessing'",
":",
"False",
",",
"}",
"minion_conf",
"=",
"dict",
"(",
"minion_conf",
",",
"*",
"*",
"windows_minion_conf",
")",
"salt",
".",
"utils",
".",
"smb",
".",
"put_str",
"(",
"salt_config_to_yaml",
"(",
"minion_conf",
",",
"line_break",
"=",
"'\\r\\n'",
")",
",",
"'salt\\\\conf\\\\minion'",
",",
"conn",
"=",
"smb_conn",
")",
"# Delete C:\\salttmp\\ and installer file",
"# Unless keep_tmp is True",
"if",
"not",
"keep_tmp",
":",
"if",
"use_winrm",
":",
"winrm_cmd",
"(",
"winrm_session",
",",
"'rmdir'",
",",
"[",
"'/Q'",
",",
"'/S'",
",",
"'C:\\\\salttemp\\\\'",
"]",
")",
"else",
":",
"salt",
".",
"utils",
".",
"smb",
".",
"delete_file",
"(",
"'salttemp\\\\{0}'",
".",
"format",
"(",
"installer",
")",
",",
"'C$'",
",",
"conn",
"=",
"smb_conn",
")",
"salt",
".",
"utils",
".",
"smb",
".",
"delete_directory",
"(",
"'salttemp'",
",",
"'C$'",
",",
"conn",
"=",
"smb_conn",
")",
"# Shell out to psexec to ensure salt-minion service started",
"if",
"use_winrm",
":",
"winrm_cmd",
"(",
"winrm_session",
",",
"'sc'",
",",
"[",
"'stop'",
",",
"'salt-minion'",
"]",
")",
"time",
".",
"sleep",
"(",
"5",
")",
"winrm_cmd",
"(",
"winrm_session",
",",
"'sc'",
",",
"[",
"'start'",
",",
"'salt-minion'",
"]",
")",
"else",
":",
"stdout",
",",
"stderr",
",",
"ret_code",
"=",
"run_psexec_command",
"(",
"'cmd.exe'",
",",
"'/c sc stop salt-minion'",
",",
"host",
",",
"username",
",",
"password",
")",
"if",
"ret_code",
"!=",
"0",
":",
"return",
"False",
"time",
".",
"sleep",
"(",
"5",
")",
"log",
".",
"debug",
"(",
"'Run psexec: sc start salt-minion'",
")",
"stdout",
",",
"stderr",
",",
"ret_code",
"=",
"run_psexec_command",
"(",
"'cmd.exe'",
",",
"'/c sc start salt-minion'",
",",
"host",
",",
"username",
",",
"password",
")",
"if",
"ret_code",
"!=",
"0",
":",
"return",
"False",
"# Fire deploy action",
"fire_event",
"(",
"'event'",
",",
"'{0} has been deployed at {1}'",
".",
"format",
"(",
"name",
",",
"host",
")",
",",
"'salt/cloud/{0}/deploy_windows'",
".",
"format",
"(",
"name",
")",
",",
"args",
"=",
"{",
"'name'",
":",
"name",
"}",
",",
"sock_dir",
"=",
"opts",
".",
"get",
"(",
"'sock_dir'",
",",
"os",
".",
"path",
".",
"join",
"(",
"__opts__",
"[",
"'sock_dir'",
"]",
",",
"'master'",
")",
")",
",",
"transport",
"=",
"opts",
".",
"get",
"(",
"'transport'",
",",
"'zeromq'",
")",
")",
"return",
"True",
"return",
"False"
] | 38.633663 | 0.001749 | [
"def deploy_windows(host,\n",
" port=445,\n",
" timeout=900,\n",
" username='Administrator',\n",
" password=None,\n",
" name=None,\n",
" sock_dir=None,\n",
" conf_file=None,\n",
" start_action=None,\n",
" parallel=False,\n",
" minion_pub=None,\n",
" minion_pem=None,\n",
" minion_conf=None,\n",
" keep_tmp=False,\n",
" script_args=None,\n",
" script_env=None,\n",
" port_timeout=15,\n",
" preseed_minion_keys=None,\n",
" win_installer=None,\n",
" master=None,\n",
" tmp_dir='C:\\\\salttmp',\n",
" opts=None,\n",
" master_sign_pub_file=None,\n",
" use_winrm=False,\n",
" winrm_port=5986,\n",
" winrm_use_ssl=True,\n",
" winrm_verify_ssl=True,\n",
" **kwargs):\n",
" '''\n",
" Copy the install files to a remote Windows box, and execute them\n",
" '''\n",
" if not isinstance(opts, dict):\n",
" opts = {}\n",
"\n",
" if use_winrm and not HAS_WINRM:\n",
" log.error('WinRM requested but module winrm could not be imported')\n",
" return False\n",
"\n",
" if not use_winrm and has_winexe() and not HAS_PSEXEC:\n",
" salt.utils.versions.warn_until(\n",
" 'Sodium',\n",
" 'Support for winexe has been deprecated and will be removed in '\n",
" 'Sodium, please install pypsexec instead.'\n",
" )\n",
"\n",
" starttime = time.mktime(time.localtime())\n",
" log.debug('Deploying %s at %s (Windows)', host, starttime)\n",
" log.trace('HAS_WINRM: %s, use_winrm: %s', HAS_WINRM, use_winrm)\n",
"\n",
" port_available = wait_for_port(host=host, port=port, timeout=port_timeout * 60)\n",
"\n",
" if not port_available:\n",
" return False\n",
"\n",
" service_available = False\n",
" winrm_session = None\n",
"\n",
" if HAS_WINRM and use_winrm:\n",
" winrm_session = wait_for_winrm(host=host, port=winrm_port,\n",
" username=username, password=password,\n",
" timeout=port_timeout * 60, use_ssl=winrm_use_ssl,\n",
" verify=winrm_verify_ssl)\n",
" if winrm_session is not None:\n",
" service_available = True\n",
" else:\n",
" service_available = wait_for_psexecsvc(host=host, port=port,\n",
" username=username, password=password,\n",
" timeout=port_timeout * 60)\n",
"\n",
" if port_available and service_available:\n",
" log.debug('SMB port %s on %s is available', port, host)\n",
" log.debug('Logging into %s:%s as %s', host, port, username)\n",
" newtimeout = timeout - (time.mktime(time.localtime()) - starttime)\n",
" smb_conn = salt.utils.smb.get_conn(host, username, password)\n",
" if smb_conn is False:\n",
" log.error('Please install smbprotocol to enable SMB functionality')\n",
" return False\n",
"\n",
" salt.utils.smb.mkdirs('salttemp', conn=smb_conn)\n",
" salt.utils.smb.mkdirs('salt/conf/pki/minion', conn=smb_conn)\n",
"\n",
" if minion_pub:\n",
" salt.utils.smb.put_str(minion_pub, 'salt\\\\conf\\\\pki\\\\minion\\\\minion.pub', conn=smb_conn)\n",
"\n",
" if minion_pem:\n",
" salt.utils.smb.put_str(minion_pem, 'salt\\\\conf\\\\pki\\\\minion\\\\minion.pem', conn=smb_conn)\n",
"\n",
" if master_sign_pub_file:\n",
" # Read master-sign.pub file\n",
" log.debug(\"Copying master_sign.pub file from %s to minion\", master_sign_pub_file)\n",
" try:\n",
" salt.utils.smb.put_file(\n",
" master_sign_pub_file,\n",
" 'salt\\\\conf\\\\pki\\\\minion\\\\master_sign.pub',\n",
" 'C$',\n",
" conn=smb_conn,\n",
" )\n",
" except Exception as e:\n",
" log.debug(\"Exception copying master_sign.pub file %s to minion\", master_sign_pub_file)\n",
"\n",
" # Copy over win_installer\n",
" # win_installer refers to a file such as:\n",
" # /root/Salt-Minion-0.17.0-win32-Setup.exe\n",
" # ..which exists on the same machine as salt-cloud\n",
" comps = win_installer.split('/')\n",
" local_path = '/'.join(comps[:-1])\n",
" installer = comps[-1]\n",
" salt.utils.smb.put_file(\n",
" win_installer,\n",
" 'salttemp\\\\{0}'.format(installer),\n",
" 'C$',\n",
" conn=smb_conn,\n",
" )\n",
"\n",
" if use_winrm:\n",
" winrm_cmd(winrm_session, 'c:\\\\salttemp\\\\{0}'.format(installer), ['/S', '/master={0}'.format(master),\n",
" '/minion-name={0}'.format(name)]\n",
" )\n",
" else:\n",
" cmd = 'c:\\\\salttemp\\\\{0}'.format(installer)\n",
" args = \"/S /master={0} /minion-name={1}\".format(master, name)\n",
" stdout, stderr, ret_code = run_psexec_command(\n",
" cmd, args, host, username, password\n",
" )\n",
"\n",
" if ret_code != 0:\n",
" raise Exception('Fail installer {0}'.format(ret_code))\n",
"\n",
" # Copy over minion_conf\n",
" if minion_conf:\n",
" if not isinstance(minion_conf, dict):\n",
" # Let's not just fail regarding this change, specially\n",
" # since we can handle it\n",
" raise DeprecationWarning(\n",
" '`salt.utils.cloud.deploy_windows` now only accepts '\n",
" 'dictionaries for its `minion_conf` parameter. '\n",
" 'Loading YAML...'\n",
" )\n",
" minion_grains = minion_conf.pop('grains', {})\n",
" if minion_grains:\n",
" salt.utils.smb.put_str(\n",
" salt_config_to_yaml(minion_grains, line_break='\\r\\n'),\n",
" 'salt\\\\conf\\\\grains',\n",
" conn=smb_conn\n",
" )\n",
" # Add special windows minion configuration\n",
" # that must be in the minion config file\n",
" windows_minion_conf = {\n",
" 'ipc_mode': 'tcp',\n",
" 'root_dir': 'c:\\\\salt',\n",
" 'pki_dir': '/conf/pki/minion',\n",
" 'multiprocessing': False,\n",
" }\n",
" minion_conf = dict(minion_conf, **windows_minion_conf)\n",
" salt.utils.smb.put_str(\n",
" salt_config_to_yaml(minion_conf, line_break='\\r\\n'),\n",
" 'salt\\\\conf\\\\minion',\n",
" conn=smb_conn\n",
" )\n",
" # Delete C:\\salttmp\\ and installer file\n",
" # Unless keep_tmp is True\n",
" if not keep_tmp:\n",
" if use_winrm:\n",
" winrm_cmd(winrm_session, 'rmdir', ['/Q', '/S', 'C:\\\\salttemp\\\\'])\n",
" else:\n",
" salt.utils.smb.delete_file('salttemp\\\\{0}'.format(installer), 'C$', conn=smb_conn)\n",
" salt.utils.smb.delete_directory('salttemp', 'C$', conn=smb_conn)\n",
" # Shell out to psexec to ensure salt-minion service started\n",
" if use_winrm:\n",
" winrm_cmd(winrm_session, 'sc', ['stop', 'salt-minion'])\n",
" time.sleep(5)\n",
" winrm_cmd(winrm_session, 'sc', ['start', 'salt-minion'])\n",
" else:\n",
" stdout, stderr, ret_code = run_psexec_command(\n",
" 'cmd.exe', '/c sc stop salt-minion', host, username, password\n",
" )\n",
" if ret_code != 0:\n",
" return False\n",
"\n",
" time.sleep(5)\n",
"\n",
" log.debug('Run psexec: sc start salt-minion')\n",
" stdout, stderr, ret_code = run_psexec_command(\n",
" 'cmd.exe', '/c sc start salt-minion', host, username, password\n",
" )\n",
" if ret_code != 0:\n",
" return False\n",
"\n",
" # Fire deploy action\n",
" fire_event(\n",
" 'event',\n",
" '{0} has been deployed at {1}'.format(name, host),\n",
" 'salt/cloud/{0}/deploy_windows'.format(name),\n",
" args={'name': name},\n",
" sock_dir=opts.get(\n",
" 'sock_dir',\n",
" os.path.join(__opts__['sock_dir'], 'master')),\n",
" transport=opts.get('transport', 'zeromq')\n",
" )\n",
"\n",
" return True\n",
" return False"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011904761904761904,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011235955056179775,
0,
0,
0,
0,
0,
0.011764705882352941,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.009900990099009901,
0,
0,
0.009900990099009901,
0,
0,
0,
0.010638297872340425,
0,
0,
0,
0,
0,
0,
0,
0,
0.009708737864077669,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.008849557522123894,
0.00909090909090909,
0.07142857142857142,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.012195121951219513,
0,
0.010101010101010102,
0.012345679012345678,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.0625
] | 202 | 0.001295 |
def set_default_decoder_parameters():
"""Wraps openjp2 library function opj_set_default_decoder_parameters.
Sets decoding parameters to default values.
Returns
-------
dparam : DecompressionParametersType
Decompression parameters.
"""
ARGTYPES = [ctypes.POINTER(DecompressionParametersType)]
OPENJP2.opj_set_default_decoder_parameters.argtypes = ARGTYPES
OPENJP2.opj_set_default_decoder_parameters.restype = ctypes.c_void_p
dparams = DecompressionParametersType()
OPENJP2.opj_set_default_decoder_parameters(ctypes.byref(dparams))
return dparams | [
"def",
"set_default_decoder_parameters",
"(",
")",
":",
"ARGTYPES",
"=",
"[",
"ctypes",
".",
"POINTER",
"(",
"DecompressionParametersType",
")",
"]",
"OPENJP2",
".",
"opj_set_default_decoder_parameters",
".",
"argtypes",
"=",
"ARGTYPES",
"OPENJP2",
".",
"opj_set_default_decoder_parameters",
".",
"restype",
"=",
"ctypes",
".",
"c_void_p",
"dparams",
"=",
"DecompressionParametersType",
"(",
")",
"OPENJP2",
".",
"opj_set_default_decoder_parameters",
"(",
"ctypes",
".",
"byref",
"(",
"dparams",
")",
")",
"return",
"dparams"
] | 34.529412 | 0.001658 | [
"def set_default_decoder_parameters():\n",
" \"\"\"Wraps openjp2 library function opj_set_default_decoder_parameters.\n",
"\n",
" Sets decoding parameters to default values.\n",
"\n",
" Returns\n",
" -------\n",
" dparam : DecompressionParametersType\n",
" Decompression parameters.\n",
" \"\"\"\n",
" ARGTYPES = [ctypes.POINTER(DecompressionParametersType)]\n",
" OPENJP2.opj_set_default_decoder_parameters.argtypes = ARGTYPES\n",
" OPENJP2.opj_set_default_decoder_parameters.restype = ctypes.c_void_p\n",
"\n",
" dparams = DecompressionParametersType()\n",
" OPENJP2.opj_set_default_decoder_parameters(ctypes.byref(dparams))\n",
" return dparams"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.05555555555555555
] | 17 | 0.003268 |
def _pulse_method_call(pulse_op, func=None, index_arg=True):
'''Creates following synchronous wrapper for async pa_operation callable:
wrapper(index, ...) -> pulse_op(index, [*]args_func(...))
index_arg=False: wrapper(...) -> pulse_op([*]args_func(...))'''
def _wrapper(self, *args, **kws):
if index_arg:
if 'index' in kws: index = kws.pop('index')
else: index, args = args[0], args[1:]
pulse_args = func(*args, **kws) if func else list()
if not is_list(pulse_args): pulse_args = [pulse_args]
if index_arg: pulse_args = [index] + list(pulse_args)
with self._pulse_op_cb() as cb:
try: pulse_op(self._ctx, *(list(pulse_args) + [cb, None]))
except c.ArgumentError as err: raise TypeError(err.args)
except c.pa.CallError as err: raise PulseOperationInvalid(err.args[-1])
func_args = list(inspect.getargspec(func or (lambda: None)))
func_args[0] = list(func_args[0])
if index_arg: func_args[0] = ['index'] + func_args[0]
_wrapper.__name__ = '...'
_wrapper.__doc__ = 'Signature: func' + inspect.formatargspec(*func_args)
if func.__doc__: _wrapper.__doc__ += '\n\n' + func.__doc__
return _wrapper | [
"def",
"_pulse_method_call",
"(",
"pulse_op",
",",
"func",
"=",
"None",
",",
"index_arg",
"=",
"True",
")",
":",
"def",
"_wrapper",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kws",
")",
":",
"if",
"index_arg",
":",
"if",
"'index'",
"in",
"kws",
":",
"index",
"=",
"kws",
".",
"pop",
"(",
"'index'",
")",
"else",
":",
"index",
",",
"args",
"=",
"args",
"[",
"0",
"]",
",",
"args",
"[",
"1",
":",
"]",
"pulse_args",
"=",
"func",
"(",
"*",
"args",
",",
"*",
"*",
"kws",
")",
"if",
"func",
"else",
"list",
"(",
")",
"if",
"not",
"is_list",
"(",
"pulse_args",
")",
":",
"pulse_args",
"=",
"[",
"pulse_args",
"]",
"if",
"index_arg",
":",
"pulse_args",
"=",
"[",
"index",
"]",
"+",
"list",
"(",
"pulse_args",
")",
"with",
"self",
".",
"_pulse_op_cb",
"(",
")",
"as",
"cb",
":",
"try",
":",
"pulse_op",
"(",
"self",
".",
"_ctx",
",",
"*",
"(",
"list",
"(",
"pulse_args",
")",
"+",
"[",
"cb",
",",
"None",
"]",
")",
")",
"except",
"c",
".",
"ArgumentError",
"as",
"err",
":",
"raise",
"TypeError",
"(",
"err",
".",
"args",
")",
"except",
"c",
".",
"pa",
".",
"CallError",
"as",
"err",
":",
"raise",
"PulseOperationInvalid",
"(",
"err",
".",
"args",
"[",
"-",
"1",
"]",
")",
"func_args",
"=",
"list",
"(",
"inspect",
".",
"getargspec",
"(",
"func",
"or",
"(",
"lambda",
":",
"None",
")",
")",
")",
"func_args",
"[",
"0",
"]",
"=",
"list",
"(",
"func_args",
"[",
"0",
"]",
")",
"if",
"index_arg",
":",
"func_args",
"[",
"0",
"]",
"=",
"[",
"'index'",
"]",
"+",
"func_args",
"[",
"0",
"]",
"_wrapper",
".",
"__name__",
"=",
"'...'",
"_wrapper",
".",
"__doc__",
"=",
"'Signature: func'",
"+",
"inspect",
".",
"formatargspec",
"(",
"*",
"func_args",
")",
"if",
"func",
".",
"__doc__",
":",
"_wrapper",
".",
"__doc__",
"+=",
"'\\n\\n'",
"+",
"func",
".",
"__doc__",
"return",
"_wrapper"
] | 51.227273 | 0.027875 | [
"def _pulse_method_call(pulse_op, func=None, index_arg=True):\n",
"\t\t'''Creates following synchronous wrapper for async pa_operation callable:\n",
"\t\t\twrapper(index, ...) -> pulse_op(index, [*]args_func(...))\n",
"\t\t\tindex_arg=False: wrapper(...) -> pulse_op([*]args_func(...))'''\n",
"\t\tdef _wrapper(self, *args, **kws):\n",
"\t\t\tif index_arg:\n",
"\t\t\t\tif 'index' in kws: index = kws.pop('index')\n",
"\t\t\t\telse: index, args = args[0], args[1:]\n",
"\t\t\tpulse_args = func(*args, **kws) if func else list()\n",
"\t\t\tif not is_list(pulse_args): pulse_args = [pulse_args]\n",
"\t\t\tif index_arg: pulse_args = [index] + list(pulse_args)\n",
"\t\t\twith self._pulse_op_cb() as cb:\n",
"\t\t\t\ttry: pulse_op(self._ctx, *(list(pulse_args) + [cb, None]))\n",
"\t\t\t\texcept c.ArgumentError as err: raise TypeError(err.args)\n",
"\t\t\t\texcept c.pa.CallError as err: raise PulseOperationInvalid(err.args[-1])\n",
"\t\tfunc_args = list(inspect.getargspec(func or (lambda: None)))\n",
"\t\tfunc_args[0] = list(func_args[0])\n",
"\t\tif index_arg: func_args[0] = ['index'] + func_args[0]\n",
"\t\t_wrapper.__name__ = '...'\n",
"\t\t_wrapper.__doc__ = 'Signature: func' + inspect.formatargspec(*func_args)\n",
"\t\tif func.__doc__: _wrapper.__doc__ += '\\n\\n' + func.__doc__\n",
"\t\treturn _wrapper"
] | [
0,
0.02631578947368421,
0.01639344262295082,
0.014925373134328358,
0.027777777777777776,
0.058823529411764705,
0.041666666666666664,
0.047619047619047616,
0.01818181818181818,
0.03508771929824561,
0.03508771929824561,
0.02857142857142857,
0.031746031746031744,
0.03278688524590164,
0.02631578947368421,
0.015873015873015872,
0.027777777777777776,
0.03571428571428571,
0.03571428571428571,
0.013333333333333334,
0.03278688524590164,
0.11764705882352941
] | 22 | 0.032734 |
def weld_replace(array, weld_type, this, to):
"""Replaces 'this' values to 'to' value.
Parameters
----------
array : numpy.ndarray or WeldObject
Input array.
weld_type : WeldType
Type of the data in the array.
this : {int, float, str, bool, bytes}
Scalar to replace.
to : {int, float, str, bool, bytes}
Scalar to replace with.
Returns
-------
WeldObject
Representation of this computation.
"""
if not isinstance(this, str):
this = to_weld_literal(this, weld_type)
to = to_weld_literal(to, weld_type)
obj_id, weld_obj = create_weld_object(array)
weld_template = """map({array},
|e: {type}|
if(e == {this},
{to},
e
)
)"""
weld_obj.weld_code = weld_template.format(array=obj_id,
type=weld_type,
this=this,
to=to)
return weld_obj | [
"def",
"weld_replace",
"(",
"array",
",",
"weld_type",
",",
"this",
",",
"to",
")",
":",
"if",
"not",
"isinstance",
"(",
"this",
",",
"str",
")",
":",
"this",
"=",
"to_weld_literal",
"(",
"this",
",",
"weld_type",
")",
"to",
"=",
"to_weld_literal",
"(",
"to",
",",
"weld_type",
")",
"obj_id",
",",
"weld_obj",
"=",
"create_weld_object",
"(",
"array",
")",
"weld_template",
"=",
"\"\"\"map({array},\n |e: {type}|\n if(e == {this},\n {to},\n e\n ) \n)\"\"\"",
"weld_obj",
".",
"weld_code",
"=",
"weld_template",
".",
"format",
"(",
"array",
"=",
"obj_id",
",",
"type",
"=",
"weld_type",
",",
"this",
"=",
"this",
",",
"to",
"=",
"to",
")",
"return",
"weld_obj"
] | 24.875 | 0.001934 | [
"def weld_replace(array, weld_type, this, to):\n",
" \"\"\"Replaces 'this' values to 'to' value.\n",
"\n",
" Parameters\n",
" ----------\n",
" array : numpy.ndarray or WeldObject\n",
" Input array.\n",
" weld_type : WeldType\n",
" Type of the data in the array.\n",
" this : {int, float, str, bool, bytes}\n",
" Scalar to replace.\n",
" to : {int, float, str, bool, bytes}\n",
" Scalar to replace with.\n",
"\n",
" Returns\n",
" -------\n",
" WeldObject\n",
" Representation of this computation.\n",
"\n",
" \"\"\"\n",
" if not isinstance(this, str):\n",
" this = to_weld_literal(this, weld_type)\n",
" to = to_weld_literal(to, weld_type)\n",
"\n",
" obj_id, weld_obj = create_weld_object(array)\n",
"\n",
" weld_template = \"\"\"map({array},\n",
" |e: {type}|\n",
" if(e == {this},\n",
" {to},\n",
" e\n",
" ) \n",
")\"\"\"\n",
"\n",
" weld_obj.weld_code = weld_template.format(array=obj_id,\n",
" type=weld_type,\n",
" this=this,\n",
" to=to)\n",
"\n",
" return weld_obj"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.07142857142857142,
0,
0,
0,
0,
0,
0,
0,
0.05263157894736842
] | 40 | 0.003102 |
def load_with_classes(filename, classes):
"""Attempts to load file by trial-and-error using a given list of classes.
Arguments:
filename -- full path to file
classes -- list of classes having a load() method
Returns: DataFile object if loaded successfully, or None if not.
Note: it will stop at the first successful load.
Attention: this is not good if there is a bug in any of the file readers,
because *all exceptions will be silenced!*
"""
ok = False
for class_ in classes:
obj = class_()
try:
obj.load(filename)
ok = True
# # cannot let IOError through because pyfits raises IOError!!
# except IOError:
# raise
# # also cannot let OSError through because astropy.io.fits raises OSError!!
# except OSError:
# raise
except FileNotFoundError:
raise
except Exception as e: # (ValueError, NotImplementedError):
# Note: for debugging, switch the below to True
if a99.logging_level == logging.DEBUG:
a99.get_python_logger().exception("Error trying with class \"{0!s}\"".format(
class_.__name__))
pass
if ok:
break
if ok:
return obj
return None | [
"def",
"load_with_classes",
"(",
"filename",
",",
"classes",
")",
":",
"ok",
"=",
"False",
"for",
"class_",
"in",
"classes",
":",
"obj",
"=",
"class_",
"(",
")",
"try",
":",
"obj",
".",
"load",
"(",
"filename",
")",
"ok",
"=",
"True",
"# # cannot let IOError through because pyfits raises IOError!!",
"# except IOError:",
"# raise",
"# # also cannot let OSError through because astropy.io.fits raises OSError!!",
"# except OSError:",
"# raise",
"except",
"FileNotFoundError",
":",
"raise",
"except",
"Exception",
"as",
"e",
":",
"# (ValueError, NotImplementedError):",
"# Note: for debugging, switch the below to True",
"if",
"a99",
".",
"logging_level",
"==",
"logging",
".",
"DEBUG",
":",
"a99",
".",
"get_python_logger",
"(",
")",
".",
"exception",
"(",
"\"Error trying with class \\\"{0!s}\\\"\"",
".",
"format",
"(",
"class_",
".",
"__name__",
")",
")",
"pass",
"if",
"ok",
":",
"break",
"if",
"ok",
":",
"return",
"obj",
"return",
"None"
] | 32.825 | 0.002219 | [
"def load_with_classes(filename, classes):\n",
" \"\"\"Attempts to load file by trial-and-error using a given list of classes.\n",
"\n",
" Arguments:\n",
" filename -- full path to file\n",
" classes -- list of classes having a load() method\n",
"\n",
" Returns: DataFile object if loaded successfully, or None if not.\n",
"\n",
" Note: it will stop at the first successful load.\n",
"\n",
" Attention: this is not good if there is a bug in any of the file readers,\n",
" because *all exceptions will be silenced!*\n",
" \"\"\"\n",
"\n",
" ok = False\n",
" for class_ in classes:\n",
" obj = class_()\n",
" try:\n",
" obj.load(filename)\n",
" ok = True\n",
" # # cannot let IOError through because pyfits raises IOError!!\n",
" # except IOError:\n",
" # raise\n",
" # # also cannot let OSError through because astropy.io.fits raises OSError!!\n",
" # except OSError:\n",
" # raise\n",
" except FileNotFoundError:\n",
" raise\n",
" except Exception as e: # (ValueError, NotImplementedError):\n",
" # Note: for debugging, switch the below to True\n",
" if a99.logging_level == logging.DEBUG:\n",
" a99.get_python_logger().exception(\"Error trying with class \\\"{0!s}\\\"\".format(\n",
" class_.__name__))\n",
" pass\n",
" if ok:\n",
" break\n",
" if ok:\n",
" return obj\n",
" return None"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011764705882352941,
0,
0,
0,
0,
0,
0,
0,
0.010638297872340425,
0,
0,
0,
0,
0,
0,
0.06666666666666667
] | 40 | 0.002227 |
def eq(self, other):
"""
Construct a Filter returning True for asset/date pairs where the output
of ``self`` matches ``other``.
"""
# We treat this as an error because missing_values have NaN semantics,
# which means this would return an array of all False, which is almost
# certainly not what the user wants.
if other == self.missing_value:
raise ValueError(
"Comparison against self.missing_value ({value!r}) in"
" {typename}.eq().\n"
"Missing values have NaN semantics, so the "
"requested comparison would always produce False.\n"
"Use the isnull() method to check for missing values.".format(
value=other,
typename=(type(self).__name__),
)
)
if isinstance(other, Number) != (self.dtype == int64_dtype):
raise InvalidClassifierComparison(self, other)
if isinstance(other, Number):
return NumExprFilter.create(
"x_0 == {other}".format(other=int(other)),
binds=(self,),
)
else:
return ArrayPredicate(
term=self,
op=operator.eq,
opargs=(other,),
) | [
"def",
"eq",
"(",
"self",
",",
"other",
")",
":",
"# We treat this as an error because missing_values have NaN semantics,",
"# which means this would return an array of all False, which is almost",
"# certainly not what the user wants.",
"if",
"other",
"==",
"self",
".",
"missing_value",
":",
"raise",
"ValueError",
"(",
"\"Comparison against self.missing_value ({value!r}) in\"",
"\" {typename}.eq().\\n\"",
"\"Missing values have NaN semantics, so the \"",
"\"requested comparison would always produce False.\\n\"",
"\"Use the isnull() method to check for missing values.\"",
".",
"format",
"(",
"value",
"=",
"other",
",",
"typename",
"=",
"(",
"type",
"(",
"self",
")",
".",
"__name__",
")",
",",
")",
")",
"if",
"isinstance",
"(",
"other",
",",
"Number",
")",
"!=",
"(",
"self",
".",
"dtype",
"==",
"int64_dtype",
")",
":",
"raise",
"InvalidClassifierComparison",
"(",
"self",
",",
"other",
")",
"if",
"isinstance",
"(",
"other",
",",
"Number",
")",
":",
"return",
"NumExprFilter",
".",
"create",
"(",
"\"x_0 == {other}\"",
".",
"format",
"(",
"other",
"=",
"int",
"(",
"other",
")",
")",
",",
"binds",
"=",
"(",
"self",
",",
")",
",",
")",
"else",
":",
"return",
"ArrayPredicate",
"(",
"term",
"=",
"self",
",",
"op",
"=",
"operator",
".",
"eq",
",",
"opargs",
"=",
"(",
"other",
",",
")",
",",
")"
] | 38.411765 | 0.001494 | [
"def eq(self, other):\n",
" \"\"\"\n",
" Construct a Filter returning True for asset/date pairs where the output\n",
" of ``self`` matches ``other``.\n",
" \"\"\"\n",
" # We treat this as an error because missing_values have NaN semantics,\n",
" # which means this would return an array of all False, which is almost\n",
" # certainly not what the user wants.\n",
" if other == self.missing_value:\n",
" raise ValueError(\n",
" \"Comparison against self.missing_value ({value!r}) in\"\n",
" \" {typename}.eq().\\n\"\n",
" \"Missing values have NaN semantics, so the \"\n",
" \"requested comparison would always produce False.\\n\"\n",
" \"Use the isnull() method to check for missing values.\".format(\n",
" value=other,\n",
" typename=(type(self).__name__),\n",
" )\n",
" )\n",
"\n",
" if isinstance(other, Number) != (self.dtype == int64_dtype):\n",
" raise InvalidClassifierComparison(self, other)\n",
"\n",
" if isinstance(other, Number):\n",
" return NumExprFilter.create(\n",
" \"x_0 == {other}\".format(other=int(other)),\n",
" binds=(self,),\n",
" )\n",
" else:\n",
" return ArrayPredicate(\n",
" term=self,\n",
" op=operator.eq,\n",
" opargs=(other,),\n",
" )"
] | [
0,
0.08333333333333333,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.07692307692307693
] | 34 | 0.004713 |
def classifyplot_from_valfile(val_file, outtype="png", title=None, size=None,
samples=None, callers=None):
"""Create a plot from a summarized validation file.
Does new-style plotting of summarized metrics of
false negative rate and false discovery rate.
https://en.wikipedia.org/wiki/Sensitivity_and_specificity
"""
mpl.use('Agg', force=True)
df = pd.read_csv(val_file)
grouped = df.groupby(["sample", "caller", "vtype"])
df = grouped.apply(_calculate_fnr_fdr)
df = df.reset_index()
if len(df) == 0:
return []
else:
out_file = "%s.%s" % (os.path.splitext(val_file)[0], outtype)
_do_classifyplot(df, out_file, title, size, samples, callers)
return [out_file] | [
"def",
"classifyplot_from_valfile",
"(",
"val_file",
",",
"outtype",
"=",
"\"png\"",
",",
"title",
"=",
"None",
",",
"size",
"=",
"None",
",",
"samples",
"=",
"None",
",",
"callers",
"=",
"None",
")",
":",
"mpl",
".",
"use",
"(",
"'Agg'",
",",
"force",
"=",
"True",
")",
"df",
"=",
"pd",
".",
"read_csv",
"(",
"val_file",
")",
"grouped",
"=",
"df",
".",
"groupby",
"(",
"[",
"\"sample\"",
",",
"\"caller\"",
",",
"\"vtype\"",
"]",
")",
"df",
"=",
"grouped",
".",
"apply",
"(",
"_calculate_fnr_fdr",
")",
"df",
"=",
"df",
".",
"reset_index",
"(",
")",
"if",
"len",
"(",
"df",
")",
"==",
"0",
":",
"return",
"[",
"]",
"else",
":",
"out_file",
"=",
"\"%s.%s\"",
"%",
"(",
"os",
".",
"path",
".",
"splitext",
"(",
"val_file",
")",
"[",
"0",
"]",
",",
"outtype",
")",
"_do_classifyplot",
"(",
"df",
",",
"out_file",
",",
"title",
",",
"size",
",",
"samples",
",",
"callers",
")",
"return",
"[",
"out_file",
"]"
] | 39.473684 | 0.001302 | [
"def classifyplot_from_valfile(val_file, outtype=\"png\", title=None, size=None,\n",
" samples=None, callers=None):\n",
" \"\"\"Create a plot from a summarized validation file.\n",
"\n",
" Does new-style plotting of summarized metrics of\n",
" false negative rate and false discovery rate.\n",
" https://en.wikipedia.org/wiki/Sensitivity_and_specificity\n",
" \"\"\"\n",
" mpl.use('Agg', force=True)\n",
" df = pd.read_csv(val_file)\n",
" grouped = df.groupby([\"sample\", \"caller\", \"vtype\"])\n",
" df = grouped.apply(_calculate_fnr_fdr)\n",
" df = df.reset_index()\n",
" if len(df) == 0:\n",
" return []\n",
" else:\n",
" out_file = \"%s.%s\" % (os.path.splitext(val_file)[0], outtype)\n",
" _do_classifyplot(df, out_file, title, size, samples, callers)\n",
" return [out_file]"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.04
] | 19 | 0.002105 |
def history(self, hash):
"""
Retrieve the ownership tree of all editions of a piece given the hash.
Args:
hash (str): Hash of the file to check. Can be created with the
:class:`File` class
Returns:
dict: Ownsership tree of all editions of a piece.
.. note:: For now we only support searching the blockchain by
the piece hash.
"""
txs = self._t.get(hash, max_transactions=10000)['transactions']
tree = defaultdict(list)
number_editions = 0
for tx in txs:
_tx = self._t.get(tx['txid'])
txid = _tx['txid']
verb_str = BlockchainSpider.check_script(_tx['vouts'])
verb = Spoolverb.from_verb(verb_str)
from_address, to_address, piece_address = BlockchainSpider._get_addresses(_tx)
timestamp_utc = _tx['time']
action = verb.action
edition_number = 0
if action != 'EDITIONS':
edition_number = verb.edition_number
else:
number_editions = verb.num_editions
tree[edition_number].append({'txid': txid,
'verb': verb_str,
'from_address': from_address,
'to_address': to_address,
'piece_address': piece_address,
'timestamp_utc': timestamp_utc,
'action': action,
'number_editions': number_editions,
'edition_number': edition_number})
# lets update the records with the number of editions of the piece since we do not know
# this information before the EDITIONS transaction
for edition, chain in tree.items():
[d.update({'number_editions': number_editions}) for d in chain]
return dict(tree) | [
"def",
"history",
"(",
"self",
",",
"hash",
")",
":",
"txs",
"=",
"self",
".",
"_t",
".",
"get",
"(",
"hash",
",",
"max_transactions",
"=",
"10000",
")",
"[",
"'transactions'",
"]",
"tree",
"=",
"defaultdict",
"(",
"list",
")",
"number_editions",
"=",
"0",
"for",
"tx",
"in",
"txs",
":",
"_tx",
"=",
"self",
".",
"_t",
".",
"get",
"(",
"tx",
"[",
"'txid'",
"]",
")",
"txid",
"=",
"_tx",
"[",
"'txid'",
"]",
"verb_str",
"=",
"BlockchainSpider",
".",
"check_script",
"(",
"_tx",
"[",
"'vouts'",
"]",
")",
"verb",
"=",
"Spoolverb",
".",
"from_verb",
"(",
"verb_str",
")",
"from_address",
",",
"to_address",
",",
"piece_address",
"=",
"BlockchainSpider",
".",
"_get_addresses",
"(",
"_tx",
")",
"timestamp_utc",
"=",
"_tx",
"[",
"'time'",
"]",
"action",
"=",
"verb",
".",
"action",
"edition_number",
"=",
"0",
"if",
"action",
"!=",
"'EDITIONS'",
":",
"edition_number",
"=",
"verb",
".",
"edition_number",
"else",
":",
"number_editions",
"=",
"verb",
".",
"num_editions",
"tree",
"[",
"edition_number",
"]",
".",
"append",
"(",
"{",
"'txid'",
":",
"txid",
",",
"'verb'",
":",
"verb_str",
",",
"'from_address'",
":",
"from_address",
",",
"'to_address'",
":",
"to_address",
",",
"'piece_address'",
":",
"piece_address",
",",
"'timestamp_utc'",
":",
"timestamp_utc",
",",
"'action'",
":",
"action",
",",
"'number_editions'",
":",
"number_editions",
",",
"'edition_number'",
":",
"edition_number",
"}",
")",
"# lets update the records with the number of editions of the piece since we do not know",
"# this information before the EDITIONS transaction",
"for",
"edition",
",",
"chain",
"in",
"tree",
".",
"items",
"(",
")",
":",
"[",
"d",
".",
"update",
"(",
"{",
"'number_editions'",
":",
"number_editions",
"}",
")",
"for",
"d",
"in",
"chain",
"]",
"return",
"dict",
"(",
"tree",
")"
] | 40.816327 | 0.001953 | [
"def history(self, hash):\n",
" \"\"\"\n",
" Retrieve the ownership tree of all editions of a piece given the hash.\n",
"\n",
" Args:\n",
" hash (str): Hash of the file to check. Can be created with the\n",
" :class:`File` class\n",
"\n",
" Returns:\n",
" dict: Ownsership tree of all editions of a piece.\n",
"\n",
" .. note:: For now we only support searching the blockchain by\n",
" the piece hash.\n",
"\n",
" \"\"\"\n",
" txs = self._t.get(hash, max_transactions=10000)['transactions']\n",
" tree = defaultdict(list)\n",
" number_editions = 0\n",
"\n",
" for tx in txs:\n",
" _tx = self._t.get(tx['txid'])\n",
" txid = _tx['txid']\n",
" verb_str = BlockchainSpider.check_script(_tx['vouts'])\n",
" verb = Spoolverb.from_verb(verb_str)\n",
" from_address, to_address, piece_address = BlockchainSpider._get_addresses(_tx)\n",
" timestamp_utc = _tx['time']\n",
" action = verb.action\n",
"\n",
" edition_number = 0\n",
" if action != 'EDITIONS':\n",
" edition_number = verb.edition_number\n",
" else:\n",
" number_editions = verb.num_editions\n",
"\n",
" tree[edition_number].append({'txid': txid,\n",
" 'verb': verb_str,\n",
" 'from_address': from_address,\n",
" 'to_address': to_address,\n",
" 'piece_address': piece_address,\n",
" 'timestamp_utc': timestamp_utc,\n",
" 'action': action,\n",
" 'number_editions': number_editions,\n",
" 'edition_number': edition_number})\n",
"\n",
" # lets update the records with the number of editions of the piece since we do not know\n",
" # this information before the EDITIONS transaction\n",
" for edition, chain in tree.items():\n",
" [d.update({'number_editions': number_editions}) for d in chain]\n",
" return dict(tree)"
] | [
0,
0.08333333333333333,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.01098901098901099,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.010416666666666666,
0,
0,
0,
0.04
] | 49 | 0.002954 |
def kill(self):
"""Kill instantiated process
:raises: `AttributeError` if instantiated process doesn't seem to satisfy `constraints <relshell.daemon_shelloperator.DaemonShellOperator>`_
"""
BaseShellOperator._close_process_input_stdin(self._batcmd.batch_to_file_s)
BaseShellOperator._wait_process(self._process, self._batcmd.sh_cmd, self._success_exitcodes)
BaseShellOperator._rm_process_input_tmpfiles(self._batcmd.batch_to_file_s)
self._process = None | [
"def",
"kill",
"(",
"self",
")",
":",
"BaseShellOperator",
".",
"_close_process_input_stdin",
"(",
"self",
".",
"_batcmd",
".",
"batch_to_file_s",
")",
"BaseShellOperator",
".",
"_wait_process",
"(",
"self",
".",
"_process",
",",
"self",
".",
"_batcmd",
".",
"sh_cmd",
",",
"self",
".",
"_success_exitcodes",
")",
"BaseShellOperator",
".",
"_rm_process_input_tmpfiles",
"(",
"self",
".",
"_batcmd",
".",
"batch_to_file_s",
")",
"self",
".",
"_process",
"=",
"None"
] | 55.777778 | 0.011765 | [
"def kill(self):\n",
" \"\"\"Kill instantiated process\n",
"\n",
" :raises: `AttributeError` if instantiated process doesn't seem to satisfy `constraints <relshell.daemon_shelloperator.DaemonShellOperator>`_\n",
" \"\"\"\n",
" BaseShellOperator._close_process_input_stdin(self._batcmd.batch_to_file_s)\n",
" BaseShellOperator._wait_process(self._process, self._batcmd.sh_cmd, self._success_exitcodes)\n",
" BaseShellOperator._rm_process_input_tmpfiles(self._batcmd.batch_to_file_s)\n",
" self._process = None"
] | [
0,
0.02702702702702703,
0,
0.006711409395973154,
0,
0.012048192771084338,
0.009900990099009901,
0.012048192771084338,
0.03571428571428571
] | 9 | 0.011494 |
def get_all_settings(profile, store='local'):
'''
Gets all the properties for the specified profile in the specified store
Args:
profile (str):
The firewall profile to query. Valid options are:
- domain
- public
- private
store (str):
The store to use. This is either the local firewall policy or the
policy defined by local group policy. Valid options are:
- lgpo
- local
Default is ``local``
Returns:
dict: A dictionary containing the specified settings
'''
ret = dict()
ret.update(get_settings(profile=profile, section='state', store=store))
ret.update(get_settings(profile=profile, section='firewallpolicy', store=store))
ret.update(get_settings(profile=profile, section='settings', store=store))
ret.update(get_settings(profile=profile, section='logging', store=store))
return ret | [
"def",
"get_all_settings",
"(",
"profile",
",",
"store",
"=",
"'local'",
")",
":",
"ret",
"=",
"dict",
"(",
")",
"ret",
".",
"update",
"(",
"get_settings",
"(",
"profile",
"=",
"profile",
",",
"section",
"=",
"'state'",
",",
"store",
"=",
"store",
")",
")",
"ret",
".",
"update",
"(",
"get_settings",
"(",
"profile",
"=",
"profile",
",",
"section",
"=",
"'firewallpolicy'",
",",
"store",
"=",
"store",
")",
")",
"ret",
".",
"update",
"(",
"get_settings",
"(",
"profile",
"=",
"profile",
",",
"section",
"=",
"'settings'",
",",
"store",
"=",
"store",
")",
")",
"ret",
".",
"update",
"(",
"get_settings",
"(",
"profile",
"=",
"profile",
",",
"section",
"=",
"'logging'",
",",
"store",
"=",
"store",
")",
")",
"return",
"ret"
] | 30.258065 | 0.002066 | [
"def get_all_settings(profile, store='local'):\n",
" '''\n",
" Gets all the properties for the specified profile in the specified store\n",
"\n",
" Args:\n",
"\n",
" profile (str):\n",
" The firewall profile to query. Valid options are:\n",
"\n",
" - domain\n",
" - public\n",
" - private\n",
"\n",
" store (str):\n",
" The store to use. This is either the local firewall policy or the\n",
" policy defined by local group policy. Valid options are:\n",
"\n",
" - lgpo\n",
" - local\n",
"\n",
" Default is ``local``\n",
"\n",
" Returns:\n",
" dict: A dictionary containing the specified settings\n",
" '''\n",
" ret = dict()\n",
" ret.update(get_settings(profile=profile, section='state', store=store))\n",
" ret.update(get_settings(profile=profile, section='firewallpolicy', store=store))\n",
" ret.update(get_settings(profile=profile, section='settings', store=store))\n",
" ret.update(get_settings(profile=profile, section='logging', store=store))\n",
" return ret"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011764705882352941,
0,
0,
0.07142857142857142
] | 31 | 0.002684 |
def get_forecast(self):
'''
If configured to do so, make an API request to retrieve the forecast
data for the configured/queried weather station, and return the low and
high temperatures. Otherwise, return two empty strings.
'''
no_data = ('', '')
if self.forecast:
query_url = STATION_QUERY_URL % (self.api_key,
'forecast',
self.station_id)
try:
response = self.api_request(query_url)['forecast']
response = response['simpleforecast']['forecastday'][0]
except (KeyError, IndexError, TypeError):
self.logger.error(
'No forecast data found for %s', self.station_id)
self.data['update_error'] = self.update_error
return no_data
unit = 'celsius' if self.units == 'metric' else 'fahrenheit'
low_temp = response.get('low', {}).get(unit, '')
high_temp = response.get('high', {}).get(unit, '')
return low_temp, high_temp
else:
return no_data | [
"def",
"get_forecast",
"(",
"self",
")",
":",
"no_data",
"=",
"(",
"''",
",",
"''",
")",
"if",
"self",
".",
"forecast",
":",
"query_url",
"=",
"STATION_QUERY_URL",
"%",
"(",
"self",
".",
"api_key",
",",
"'forecast'",
",",
"self",
".",
"station_id",
")",
"try",
":",
"response",
"=",
"self",
".",
"api_request",
"(",
"query_url",
")",
"[",
"'forecast'",
"]",
"response",
"=",
"response",
"[",
"'simpleforecast'",
"]",
"[",
"'forecastday'",
"]",
"[",
"0",
"]",
"except",
"(",
"KeyError",
",",
"IndexError",
",",
"TypeError",
")",
":",
"self",
".",
"logger",
".",
"error",
"(",
"'No forecast data found for %s'",
",",
"self",
".",
"station_id",
")",
"self",
".",
"data",
"[",
"'update_error'",
"]",
"=",
"self",
".",
"update_error",
"return",
"no_data",
"unit",
"=",
"'celsius'",
"if",
"self",
".",
"units",
"==",
"'metric'",
"else",
"'fahrenheit'",
"low_temp",
"=",
"response",
".",
"get",
"(",
"'low'",
",",
"{",
"}",
")",
".",
"get",
"(",
"unit",
",",
"''",
")",
"high_temp",
"=",
"response",
".",
"get",
"(",
"'high'",
",",
"{",
"}",
")",
".",
"get",
"(",
"unit",
",",
"''",
")",
"return",
"low_temp",
",",
"high_temp",
"else",
":",
"return",
"no_data"
] | 44.615385 | 0.001688 | [
"def get_forecast(self):\n",
" '''\n",
" If configured to do so, make an API request to retrieve the forecast\n",
" data for the configured/queried weather station, and return the low and\n",
" high temperatures. Otherwise, return two empty strings.\n",
" '''\n",
" no_data = ('', '')\n",
" if self.forecast:\n",
" query_url = STATION_QUERY_URL % (self.api_key,\n",
" 'forecast',\n",
" self.station_id)\n",
" try:\n",
" response = self.api_request(query_url)['forecast']\n",
" response = response['simpleforecast']['forecastday'][0]\n",
" except (KeyError, IndexError, TypeError):\n",
" self.logger.error(\n",
" 'No forecast data found for %s', self.station_id)\n",
" self.data['update_error'] = self.update_error\n",
" return no_data\n",
"\n",
" unit = 'celsius' if self.units == 'metric' else 'fahrenheit'\n",
" low_temp = response.get('low', {}).get(unit, '')\n",
" high_temp = response.get('high', {}).get(unit, '')\n",
" return low_temp, high_temp\n",
" else:\n",
" return no_data"
] | [
0,
0.08333333333333333,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.038461538461538464
] | 26 | 0.004684 |
def prepare_sentence(self, sentence):
"""Prepare the sentence for segment detection."""
# depending on how the morphological analysis was added, there may be
# phonetic markup. Remove it, if it exists.
for word in sentence:
for analysis in word[ANALYSIS]:
analysis[ROOT] = analysis[ROOT].replace('~', '')
analysis[ROOT] = re.sub('[?<\]]([aioueöäõü])', '\\1', analysis[ROOT])
return json.dumps({WORDS: sentence}) | [
"def",
"prepare_sentence",
"(",
"self",
",",
"sentence",
")",
":",
"# depending on how the morphological analysis was added, there may be",
"# phonetic markup. Remove it, if it exists.",
"for",
"word",
"in",
"sentence",
":",
"for",
"analysis",
"in",
"word",
"[",
"ANALYSIS",
"]",
":",
"analysis",
"[",
"ROOT",
"]",
"=",
"analysis",
"[",
"ROOT",
"]",
".",
"replace",
"(",
"'~'",
",",
"''",
")",
"analysis",
"[",
"ROOT",
"]",
"=",
"re",
".",
"sub",
"(",
"'[?<\\]]([aioueöäõü])', '\\",
"\\",
"', an",
"a",
"ysis[ROO",
"T",
"])",
"",
"",
"return",
"json",
".",
"dumps",
"(",
"{",
"WORDS",
":",
"sentence",
"}",
")"
] | 54.111111 | 0.008081 | [
"def prepare_sentence(self, sentence):\n",
" \"\"\"Prepare the sentence for segment detection.\"\"\"\n",
" # depending on how the morphological analysis was added, there may be\n",
" # phonetic markup. Remove it, if it exists.\n",
" for word in sentence:\n",
" for analysis in word[ANALYSIS]:\n",
" analysis[ROOT] = analysis[ROOT].replace('~', '')\n",
" analysis[ROOT] = re.sub('[?<\\]]([aioueöäõü])', '\\\\1', analysis[ROOT])\n",
" return json.dumps({WORDS: sentence})"
] | [
0,
0.017241379310344827,
0,
0,
0,
0,
0,
0.023255813953488372,
0.022727272727272728
] | 9 | 0.007025 |
def do_not_cache():
""" Return whether we should cache a page render """
from . import index # pylint: disable=cyclic-import
if index.in_progress():
# We are reindexing the site
return True
if request.if_none_match or request.if_modified_since:
# we might be returning a 304 NOT MODIFIED based on a client request,
# and we don't want to cache that as the result for *all* client
# requests to this URI
return True
return False | [
"def",
"do_not_cache",
"(",
")",
":",
"from",
".",
"import",
"index",
"# pylint: disable=cyclic-import",
"if",
"index",
".",
"in_progress",
"(",
")",
":",
"# We are reindexing the site",
"return",
"True",
"if",
"request",
".",
"if_none_match",
"or",
"request",
".",
"if_modified_since",
":",
"# we might be returning a 304 NOT MODIFIED based on a client request,",
"# and we don't want to cache that as the result for *all* client",
"# requests to this URI",
"return",
"True",
"return",
"False"
] | 30.3125 | 0.002 | [
"def do_not_cache():\n",
" \"\"\" Return whether we should cache a page render \"\"\"\n",
"\n",
" from . import index # pylint: disable=cyclic-import\n",
"\n",
" if index.in_progress():\n",
" # We are reindexing the site\n",
" return True\n",
"\n",
" if request.if_none_match or request.if_modified_since:\n",
" # we might be returning a 304 NOT MODIFIED based on a client request,\n",
" # and we don't want to cache that as the result for *all* client\n",
" # requests to this URI\n",
" return True\n",
"\n",
" return False"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.0625
] | 16 | 0.003906 |
def is_value_from_deprecated_setting(self, setting_name, deprecated_setting_name):
"""
Helps developers to determine where the settings helper got it's value
from when dealing with settings that replace deprecated settings.
Returns ``True`` when the new setting (with the name ``setting_name``)
is a replacement for a deprecated setting (with the name
``deprecated_setting_name``) and the user is using the deprecated
setting in their Django settings to override behaviour.
"""
if not self.in_defaults(setting_name):
self._raise_invalid_setting_name_error(setting_name)
if not self.in_defaults(deprecated_setting_name):
self._raise_invalid_setting_name_error(deprecated_setting_name)
if deprecated_setting_name not in self._deprecated_settings:
raise ValueError(
"The '%s' setting is not deprecated. When using "
"settings.is_value_from_deprecated_setting(), the deprecated "
"setting name should be supplied as the second argument." %
deprecated_setting_name
)
if(
not self.is_overridden(setting_name) and
setting_name in self._replacement_settings
):
deprecations = self._replacement_settings[setting_name]
for item in deprecations:
if(
item.setting_name == deprecated_setting_name and
self.is_overridden(item.setting_name)
):
return True
return False | [
"def",
"is_value_from_deprecated_setting",
"(",
"self",
",",
"setting_name",
",",
"deprecated_setting_name",
")",
":",
"if",
"not",
"self",
".",
"in_defaults",
"(",
"setting_name",
")",
":",
"self",
".",
"_raise_invalid_setting_name_error",
"(",
"setting_name",
")",
"if",
"not",
"self",
".",
"in_defaults",
"(",
"deprecated_setting_name",
")",
":",
"self",
".",
"_raise_invalid_setting_name_error",
"(",
"deprecated_setting_name",
")",
"if",
"deprecated_setting_name",
"not",
"in",
"self",
".",
"_deprecated_settings",
":",
"raise",
"ValueError",
"(",
"\"The '%s' setting is not deprecated. When using \"",
"\"settings.is_value_from_deprecated_setting(), the deprecated \"",
"\"setting name should be supplied as the second argument.\"",
"%",
"deprecated_setting_name",
")",
"if",
"(",
"not",
"self",
".",
"is_overridden",
"(",
"setting_name",
")",
"and",
"setting_name",
"in",
"self",
".",
"_replacement_settings",
")",
":",
"deprecations",
"=",
"self",
".",
"_replacement_settings",
"[",
"setting_name",
"]",
"for",
"item",
"in",
"deprecations",
":",
"if",
"(",
"item",
".",
"setting_name",
"==",
"deprecated_setting_name",
"and",
"self",
".",
"is_overridden",
"(",
"item",
".",
"setting_name",
")",
")",
":",
"return",
"True",
"return",
"False"
] | 48.060606 | 0.001854 | [
"def is_value_from_deprecated_setting(self, setting_name, deprecated_setting_name):\n",
" \"\"\"\n",
" Helps developers to determine where the settings helper got it's value\n",
" from when dealing with settings that replace deprecated settings.\n",
"\n",
" Returns ``True`` when the new setting (with the name ``setting_name``)\n",
" is a replacement for a deprecated setting (with the name\n",
" ``deprecated_setting_name``) and the user is using the deprecated\n",
" setting in their Django settings to override behaviour.\n",
" \"\"\"\n",
" if not self.in_defaults(setting_name):\n",
" self._raise_invalid_setting_name_error(setting_name)\n",
" if not self.in_defaults(deprecated_setting_name):\n",
" self._raise_invalid_setting_name_error(deprecated_setting_name)\n",
" if deprecated_setting_name not in self._deprecated_settings:\n",
" raise ValueError(\n",
" \"The '%s' setting is not deprecated. When using \"\n",
" \"settings.is_value_from_deprecated_setting(), the deprecated \"\n",
" \"setting name should be supplied as the second argument.\" %\n",
" deprecated_setting_name\n",
" )\n",
" if(\n",
" not self.is_overridden(setting_name) and\n",
" setting_name in self._replacement_settings\n",
" ):\n",
" deprecations = self._replacement_settings[setting_name]\n",
" for item in deprecations:\n",
" if(\n",
" item.setting_name == deprecated_setting_name and\n",
" self.is_overridden(item.setting_name)\n",
" ):\n",
" return True\n",
" return False"
] | [
0.012048192771084338,
0.08333333333333333,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.05
] | 33 | 0.004406 |
def write(self, filepath, magicc_version):
"""
Write an input file to disk.
Parameters
----------
filepath : str
Filepath of the file to write.
magicc_version : int
The MAGICC version for which we want to write files. MAGICC7 and MAGICC6
namelists are incompatible hence we need to know which one we're writing
for.
"""
writer = determine_tool(filepath, "writer")(magicc_version=magicc_version)
writer.write(self, filepath) | [
"def",
"write",
"(",
"self",
",",
"filepath",
",",
"magicc_version",
")",
":",
"writer",
"=",
"determine_tool",
"(",
"filepath",
",",
"\"writer\"",
")",
"(",
"magicc_version",
"=",
"magicc_version",
")",
"writer",
".",
"write",
"(",
"self",
",",
"filepath",
")"
] | 33.125 | 0.009174 | [
"def write(self, filepath, magicc_version):\n",
" \"\"\"\n",
" Write an input file to disk.\n",
"\n",
" Parameters\n",
" ----------\n",
" filepath : str\n",
" Filepath of the file to write.\n",
"\n",
" magicc_version : int\n",
" The MAGICC version for which we want to write files. MAGICC7 and MAGICC6\n",
" namelists are incompatible hence we need to know which one we're writing\n",
" for.\n",
" \"\"\"\n",
" writer = determine_tool(filepath, \"writer\")(magicc_version=magicc_version)\n",
" writer.write(self, filepath)"
] | [
0,
0.08333333333333333,
0,
0,
0,
0,
0,
0,
0,
0,
0.011764705882352941,
0.011764705882352941,
0,
0,
0.012048192771084338,
0.027777777777777776
] | 16 | 0.009168 |
def updateMigrationRequestStatus(self, migration_status, migration_request_id):
"""
migration_status:
0=PENDING
1=IN PROGRESS
2=COMPLETED
3=FAILED (will be retried)
9=Terminally FAILED
status change:
0 -> 1
1 -> 2
1 -> 3
1 -> 9
are only allowed changes for working through migration.
3 -> 1 is allowed for retrying and retry count +1.
"""
conn = self.dbi.connection()
tran = conn.begin()
try:
upst = dict(migration_status=migration_status,
migration_request_id=migration_request_id,
last_modification_date=dbsUtils().getTime())
self.mgrRqUp.execute(conn, upst)
except:
if tran:tran.rollback()
raise
else:
if tran:tran.commit()
finally:
#open transaction is committed when conn closed.
if conn:conn.close() | [
"def",
"updateMigrationRequestStatus",
"(",
"self",
",",
"migration_status",
",",
"migration_request_id",
")",
":",
"conn",
"=",
"self",
".",
"dbi",
".",
"connection",
"(",
")",
"tran",
"=",
"conn",
".",
"begin",
"(",
")",
"try",
":",
"upst",
"=",
"dict",
"(",
"migration_status",
"=",
"migration_status",
",",
"migration_request_id",
"=",
"migration_request_id",
",",
"last_modification_date",
"=",
"dbsUtils",
"(",
")",
".",
"getTime",
"(",
")",
")",
"self",
".",
"mgrRqUp",
".",
"execute",
"(",
"conn",
",",
"upst",
")",
"except",
":",
"if",
"tran",
":",
"tran",
".",
"rollback",
"(",
")",
"raise",
"else",
":",
"if",
"tran",
":",
"tran",
".",
"commit",
"(",
")",
"finally",
":",
"#open transaction is committed when conn closed.",
"if",
"conn",
":",
"conn",
".",
"close",
"(",
")"
] | 29.575758 | 0.010913 | [
"def updateMigrationRequestStatus(self, migration_status, migration_request_id):\n",
" \"\"\"\n",
" migration_status:\n",
" 0=PENDING\n",
" 1=IN PROGRESS\n",
" 2=COMPLETED\n",
" 3=FAILED (will be retried)\n",
" 9=Terminally FAILED \n",
" status change:\n",
" 0 -> 1\n",
" 1 -> 2\n",
" 1 -> 3\n",
" 1 -> 9\n",
" are only allowed changes for working through migration.\n",
" 3 -> 1 is allowed for retrying and retry count +1.\n",
"\n",
" \"\"\"\n",
"\n",
" conn = self.dbi.connection()\n",
" tran = conn.begin()\n",
" try:\n",
" upst = dict(migration_status=migration_status,\n",
" migration_request_id=migration_request_id,\n",
" last_modification_date=dbsUtils().getTime())\n",
" self.mgrRqUp.execute(conn, upst)\n",
" except:\n",
" if tran:tran.rollback()\n",
" raise\n",
" else:\n",
" if tran:tran.commit()\n",
" finally:\n",
" #open transaction is committed when conn closed.\n",
" if conn:conn.close()"
] | [
0,
0.08333333333333333,
0,
0,
0,
0,
0,
0.034482758620689655,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.0625,
0.05555555555555555,
0,
0,
0.058823529411764705,
0,
0.01639344262295082,
0.09375
] | 33 | 0.012268 |
def read_data(self, chan=[], ref_chan=[], grp_name=None, concat_chan=False,
max_s_freq=30000, parent=None):
"""Read data for analysis. Adds data as 'data' in each dict.
Parameters
----------
chan : list of str
active channel names as they appear in record, without ref or group
If given an empty list, the channel specified in seg['chan'] will
be read for each segment
ref_chan : list of str
reference channel names as they appear in record, without group
grp_name : str
name of the channel group, required in GUI
concat_chan : bool
if True, data from all channels will be concatenated
max_s_freq: : int
maximum sampling frequency
parent : QWidget
for GUI only. Identifies parent widget for display of progress
dialog.
"""
output = []
# Set up Progress Bar
if parent:
n_subseg = sum([len(x['times']) for x in self.segments])
progress = QProgressDialog('Fetching signal', 'Abort', 0, n_subseg,
parent)
progress.setWindowModality(Qt.ApplicationModal)
counter = 0
# Begin bundle loop; will yield one segment per loop
for i, seg in enumerate(self.segments):
one_segment = ChanTime()
one_segment.axis['chan'] = empty(1, dtype='O')
one_segment.axis['time'] = empty(1, dtype='O')
one_segment.data = empty(1, dtype='O')
subseg = []
# Subsegment loop; subsegments will be concatenated
for t0, t1 in seg['times']:
if parent:
progress.setValue(counter)
counter += 1
# if channel not specified, use segment channel
if chan:
active_chan = chan
elif seg['chan']:
active_chan = [seg['chan'].split(' (')[0]]
else:
raise ValueError('No channel was specified and the '
'segment at {}-{} has no channel.'.format(
t0, t1))
active_chan = chan if chan else [seg['chan'].split(' (')[0]]
chan_to_read = active_chan + ref_chan
data = self.dataset.read_data(chan=chan_to_read, begtime=t0,
endtime=t1)
# Downsample if necessary
if data.s_freq > max_s_freq:
q = int(data.s_freq / max_s_freq)
lg.debug('Decimate (no low-pass filter) at ' + str(q))
data.data[0] = data.data[0][:, slice(None, None, q)]
data.axis['time'][0] = data.axis['time'][0][slice(
None, None, q)]
data.s_freq = int(data.s_freq / q)
# read data from disk
subseg.append(_create_data(
data, active_chan, ref_chan=ref_chan, grp_name=grp_name))
one_segment.s_freq = s_freq = data.s_freq
one_segment.axis['chan'][0] = chs = subseg[0].axis['chan'][0]
one_segment.axis['time'][0] = timeline = hstack(
[x.axis['time'][0] for x in subseg])
one_segment.data[0] = empty((len(active_chan), len(timeline)),
dtype='f')
n_stitch = sum(asarray(diff(timeline) > 2/s_freq, dtype=bool))
for i, ch in enumerate(subseg[0].axis['chan'][0]):
one_segment.data[0][i, :] = hstack(
[x(chan=ch)[0] for x in subseg])
# For channel concatenation
if concat_chan and len(chs) > 1:
one_segment.data[0] = ravel(one_segment.data[0])
one_segment.axis['chan'][0] = asarray([(', ').join(chs)],
dtype='U')
# axis['time'] should not be used in this case
output.append({'data': one_segment,
'chan': active_chan,
'stage': seg['stage'],
'cycle': seg['cycle'],
'name': seg['name'],
'n_stitch': n_stitch
})
if parent:
if progress.wasCanceled():
parent.parent.statusBar().showMessage('Process canceled by'
' user.')
return
if parent:
progress.setValue(counter)
self.segments = output
return 1 | [
"def",
"read_data",
"(",
"self",
",",
"chan",
"=",
"[",
"]",
",",
"ref_chan",
"=",
"[",
"]",
",",
"grp_name",
"=",
"None",
",",
"concat_chan",
"=",
"False",
",",
"max_s_freq",
"=",
"30000",
",",
"parent",
"=",
"None",
")",
":",
"output",
"=",
"[",
"]",
"# Set up Progress Bar",
"if",
"parent",
":",
"n_subseg",
"=",
"sum",
"(",
"[",
"len",
"(",
"x",
"[",
"'times'",
"]",
")",
"for",
"x",
"in",
"self",
".",
"segments",
"]",
")",
"progress",
"=",
"QProgressDialog",
"(",
"'Fetching signal'",
",",
"'Abort'",
",",
"0",
",",
"n_subseg",
",",
"parent",
")",
"progress",
".",
"setWindowModality",
"(",
"Qt",
".",
"ApplicationModal",
")",
"counter",
"=",
"0",
"# Begin bundle loop; will yield one segment per loop",
"for",
"i",
",",
"seg",
"in",
"enumerate",
"(",
"self",
".",
"segments",
")",
":",
"one_segment",
"=",
"ChanTime",
"(",
")",
"one_segment",
".",
"axis",
"[",
"'chan'",
"]",
"=",
"empty",
"(",
"1",
",",
"dtype",
"=",
"'O'",
")",
"one_segment",
".",
"axis",
"[",
"'time'",
"]",
"=",
"empty",
"(",
"1",
",",
"dtype",
"=",
"'O'",
")",
"one_segment",
".",
"data",
"=",
"empty",
"(",
"1",
",",
"dtype",
"=",
"'O'",
")",
"subseg",
"=",
"[",
"]",
"# Subsegment loop; subsegments will be concatenated",
"for",
"t0",
",",
"t1",
"in",
"seg",
"[",
"'times'",
"]",
":",
"if",
"parent",
":",
"progress",
".",
"setValue",
"(",
"counter",
")",
"counter",
"+=",
"1",
"# if channel not specified, use segment channel",
"if",
"chan",
":",
"active_chan",
"=",
"chan",
"elif",
"seg",
"[",
"'chan'",
"]",
":",
"active_chan",
"=",
"[",
"seg",
"[",
"'chan'",
"]",
".",
"split",
"(",
"' ('",
")",
"[",
"0",
"]",
"]",
"else",
":",
"raise",
"ValueError",
"(",
"'No channel was specified and the '",
"'segment at {}-{} has no channel.'",
".",
"format",
"(",
"t0",
",",
"t1",
")",
")",
"active_chan",
"=",
"chan",
"if",
"chan",
"else",
"[",
"seg",
"[",
"'chan'",
"]",
".",
"split",
"(",
"' ('",
")",
"[",
"0",
"]",
"]",
"chan_to_read",
"=",
"active_chan",
"+",
"ref_chan",
"data",
"=",
"self",
".",
"dataset",
".",
"read_data",
"(",
"chan",
"=",
"chan_to_read",
",",
"begtime",
"=",
"t0",
",",
"endtime",
"=",
"t1",
")",
"# Downsample if necessary",
"if",
"data",
".",
"s_freq",
">",
"max_s_freq",
":",
"q",
"=",
"int",
"(",
"data",
".",
"s_freq",
"/",
"max_s_freq",
")",
"lg",
".",
"debug",
"(",
"'Decimate (no low-pass filter) at '",
"+",
"str",
"(",
"q",
")",
")",
"data",
".",
"data",
"[",
"0",
"]",
"=",
"data",
".",
"data",
"[",
"0",
"]",
"[",
":",
",",
"slice",
"(",
"None",
",",
"None",
",",
"q",
")",
"]",
"data",
".",
"axis",
"[",
"'time'",
"]",
"[",
"0",
"]",
"=",
"data",
".",
"axis",
"[",
"'time'",
"]",
"[",
"0",
"]",
"[",
"slice",
"(",
"None",
",",
"None",
",",
"q",
")",
"]",
"data",
".",
"s_freq",
"=",
"int",
"(",
"data",
".",
"s_freq",
"/",
"q",
")",
"# read data from disk",
"subseg",
".",
"append",
"(",
"_create_data",
"(",
"data",
",",
"active_chan",
",",
"ref_chan",
"=",
"ref_chan",
",",
"grp_name",
"=",
"grp_name",
")",
")",
"one_segment",
".",
"s_freq",
"=",
"s_freq",
"=",
"data",
".",
"s_freq",
"one_segment",
".",
"axis",
"[",
"'chan'",
"]",
"[",
"0",
"]",
"=",
"chs",
"=",
"subseg",
"[",
"0",
"]",
".",
"axis",
"[",
"'chan'",
"]",
"[",
"0",
"]",
"one_segment",
".",
"axis",
"[",
"'time'",
"]",
"[",
"0",
"]",
"=",
"timeline",
"=",
"hstack",
"(",
"[",
"x",
".",
"axis",
"[",
"'time'",
"]",
"[",
"0",
"]",
"for",
"x",
"in",
"subseg",
"]",
")",
"one_segment",
".",
"data",
"[",
"0",
"]",
"=",
"empty",
"(",
"(",
"len",
"(",
"active_chan",
")",
",",
"len",
"(",
"timeline",
")",
")",
",",
"dtype",
"=",
"'f'",
")",
"n_stitch",
"=",
"sum",
"(",
"asarray",
"(",
"diff",
"(",
"timeline",
")",
">",
"2",
"/",
"s_freq",
",",
"dtype",
"=",
"bool",
")",
")",
"for",
"i",
",",
"ch",
"in",
"enumerate",
"(",
"subseg",
"[",
"0",
"]",
".",
"axis",
"[",
"'chan'",
"]",
"[",
"0",
"]",
")",
":",
"one_segment",
".",
"data",
"[",
"0",
"]",
"[",
"i",
",",
":",
"]",
"=",
"hstack",
"(",
"[",
"x",
"(",
"chan",
"=",
"ch",
")",
"[",
"0",
"]",
"for",
"x",
"in",
"subseg",
"]",
")",
"# For channel concatenation",
"if",
"concat_chan",
"and",
"len",
"(",
"chs",
")",
">",
"1",
":",
"one_segment",
".",
"data",
"[",
"0",
"]",
"=",
"ravel",
"(",
"one_segment",
".",
"data",
"[",
"0",
"]",
")",
"one_segment",
".",
"axis",
"[",
"'chan'",
"]",
"[",
"0",
"]",
"=",
"asarray",
"(",
"[",
"(",
"', '",
")",
".",
"join",
"(",
"chs",
")",
"]",
",",
"dtype",
"=",
"'U'",
")",
"# axis['time'] should not be used in this case",
"output",
".",
"append",
"(",
"{",
"'data'",
":",
"one_segment",
",",
"'chan'",
":",
"active_chan",
",",
"'stage'",
":",
"seg",
"[",
"'stage'",
"]",
",",
"'cycle'",
":",
"seg",
"[",
"'cycle'",
"]",
",",
"'name'",
":",
"seg",
"[",
"'name'",
"]",
",",
"'n_stitch'",
":",
"n_stitch",
"}",
")",
"if",
"parent",
":",
"if",
"progress",
".",
"wasCanceled",
"(",
")",
":",
"parent",
".",
"parent",
".",
"statusBar",
"(",
")",
".",
"showMessage",
"(",
"'Process canceled by'",
"' user.'",
")",
"return",
"if",
"parent",
":",
"progress",
".",
"setValue",
"(",
"counter",
")",
"self",
".",
"segments",
"=",
"output",
"return",
"1"
] | 41.026316 | 0.00167 | [
"def read_data(self, chan=[], ref_chan=[], grp_name=None, concat_chan=False, \n",
" max_s_freq=30000, parent=None):\n",
" \"\"\"Read data for analysis. Adds data as 'data' in each dict.\n",
"\n",
" Parameters\n",
" ----------\n",
" chan : list of str\n",
" active channel names as they appear in record, without ref or group\n",
" If given an empty list, the channel specified in seg['chan'] will\n",
" be read for each segment\n",
" ref_chan : list of str\n",
" reference channel names as they appear in record, without group\n",
" grp_name : str\n",
" name of the channel group, required in GUI\n",
" concat_chan : bool\n",
" if True, data from all channels will be concatenated\n",
" max_s_freq: : int\n",
" maximum sampling frequency\n",
" parent : QWidget\n",
" for GUI only. Identifies parent widget for display of progress\n",
" dialog.\n",
" \"\"\"\n",
" output = []\n",
"\n",
" # Set up Progress Bar\n",
" if parent:\n",
" n_subseg = sum([len(x['times']) for x in self.segments])\n",
" progress = QProgressDialog('Fetching signal', 'Abort', 0, n_subseg,\n",
" parent)\n",
" progress.setWindowModality(Qt.ApplicationModal)\n",
" counter = 0\n",
"\n",
" # Begin bundle loop; will yield one segment per loop\n",
" for i, seg in enumerate(self.segments):\n",
" one_segment = ChanTime()\n",
" one_segment.axis['chan'] = empty(1, dtype='O')\n",
" one_segment.axis['time'] = empty(1, dtype='O')\n",
" one_segment.data = empty(1, dtype='O')\n",
" subseg = []\n",
"\n",
" # Subsegment loop; subsegments will be concatenated\n",
" for t0, t1 in seg['times']:\n",
" if parent:\n",
" progress.setValue(counter)\n",
" counter += 1\n",
"\n",
" # if channel not specified, use segment channel\n",
" if chan:\n",
" active_chan = chan\n",
" elif seg['chan']:\n",
" active_chan = [seg['chan'].split(' (')[0]]\n",
" else:\n",
" raise ValueError('No channel was specified and the '\n",
" 'segment at {}-{} has no channel.'.format(\n",
" t0, t1))\n",
" active_chan = chan if chan else [seg['chan'].split(' (')[0]]\n",
" chan_to_read = active_chan + ref_chan\n",
"\n",
" data = self.dataset.read_data(chan=chan_to_read, begtime=t0,\n",
" endtime=t1)\n",
"\n",
" # Downsample if necessary\n",
" if data.s_freq > max_s_freq:\n",
" q = int(data.s_freq / max_s_freq)\n",
" lg.debug('Decimate (no low-pass filter) at ' + str(q))\n",
"\n",
" data.data[0] = data.data[0][:, slice(None, None, q)]\n",
" data.axis['time'][0] = data.axis['time'][0][slice(\n",
" None, None, q)]\n",
" data.s_freq = int(data.s_freq / q)\n",
"\n",
" # read data from disk\n",
" subseg.append(_create_data(\n",
" data, active_chan, ref_chan=ref_chan, grp_name=grp_name))\n",
"\n",
" one_segment.s_freq = s_freq = data.s_freq\n",
" one_segment.axis['chan'][0] = chs = subseg[0].axis['chan'][0]\n",
" one_segment.axis['time'][0] = timeline = hstack(\n",
" [x.axis['time'][0] for x in subseg])\n",
" one_segment.data[0] = empty((len(active_chan), len(timeline)),\n",
" dtype='f')\n",
" n_stitch = sum(asarray(diff(timeline) > 2/s_freq, dtype=bool))\n",
"\n",
" for i, ch in enumerate(subseg[0].axis['chan'][0]):\n",
" one_segment.data[0][i, :] = hstack(\n",
" [x(chan=ch)[0] for x in subseg])\n",
"\n",
" # For channel concatenation\n",
" if concat_chan and len(chs) > 1:\n",
" one_segment.data[0] = ravel(one_segment.data[0])\n",
" one_segment.axis['chan'][0] = asarray([(', ').join(chs)],\n",
" dtype='U')\n",
" # axis['time'] should not be used in this case\n",
"\n",
" output.append({'data': one_segment,\n",
" 'chan': active_chan,\n",
" 'stage': seg['stage'],\n",
" 'cycle': seg['cycle'],\n",
" 'name': seg['name'],\n",
" 'n_stitch': n_stitch\n",
" })\n",
"\n",
" if parent:\n",
" if progress.wasCanceled():\n",
" parent.parent.statusBar().showMessage('Process canceled by'\n",
" ' user.')\n",
" return\n",
"\n",
" if parent:\n",
" progress.setValue(counter)\n",
"\n",
" self.segments = output\n",
"\n",
" return 1"
] | [
0.012987012987012988,
0.02,
0.014492753623188406,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.018867924528301886,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.017857142857142856,
0,
0,
0,
0,
0,
0,
0.023255813953488372,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.018867924528301886,
0,
0,
0,
0,
0,
0,
0,
0.0625
] | 114 | 0.001656 |
def fftconv(a, b, axes=(0, 1)):
"""
Compute a multi-dimensional convolution via the Discrete Fourier
Transform. Note that the output has a phase shift relative to the
output of :func:`scipy.ndimage.convolve` with the default ``origin``
parameter.
Parameters
----------
a : array_like
Input array
b : array_like
Input array
axes : sequence of ints, optional (default (0, 1))
Axes on which to perform convolution
Returns
-------
ab : ndarray
Convolution of input arrays, a and b, along specified axes
"""
if np.isrealobj(a) and np.isrealobj(b):
fft = rfftn
ifft = irfftn
else:
fft = fftn
ifft = ifftn
dims = np.maximum([a.shape[i] for i in axes], [b.shape[i] for i in axes])
af = fft(a, dims, axes)
bf = fft(b, dims, axes)
return ifft(af * bf, dims, axes) | [
"def",
"fftconv",
"(",
"a",
",",
"b",
",",
"axes",
"=",
"(",
"0",
",",
"1",
")",
")",
":",
"if",
"np",
".",
"isrealobj",
"(",
"a",
")",
"and",
"np",
".",
"isrealobj",
"(",
"b",
")",
":",
"fft",
"=",
"rfftn",
"ifft",
"=",
"irfftn",
"else",
":",
"fft",
"=",
"fftn",
"ifft",
"=",
"ifftn",
"dims",
"=",
"np",
".",
"maximum",
"(",
"[",
"a",
".",
"shape",
"[",
"i",
"]",
"for",
"i",
"in",
"axes",
"]",
",",
"[",
"b",
".",
"shape",
"[",
"i",
"]",
"for",
"i",
"in",
"axes",
"]",
")",
"af",
"=",
"fft",
"(",
"a",
",",
"dims",
",",
"axes",
")",
"bf",
"=",
"fft",
"(",
"b",
",",
"dims",
",",
"axes",
")",
"return",
"ifft",
"(",
"af",
"*",
"bf",
",",
"dims",
",",
"axes",
")"
] | 26.90625 | 0.001121 | [
"def fftconv(a, b, axes=(0, 1)):\n",
" \"\"\"\n",
" Compute a multi-dimensional convolution via the Discrete Fourier\n",
" Transform. Note that the output has a phase shift relative to the\n",
" output of :func:`scipy.ndimage.convolve` with the default ``origin``\n",
" parameter.\n",
"\n",
" Parameters\n",
" ----------\n",
" a : array_like\n",
" Input array\n",
" b : array_like\n",
" Input array\n",
" axes : sequence of ints, optional (default (0, 1))\n",
" Axes on which to perform convolution\n",
"\n",
" Returns\n",
" -------\n",
" ab : ndarray\n",
" Convolution of input arrays, a and b, along specified axes\n",
" \"\"\"\n",
"\n",
" if np.isrealobj(a) and np.isrealobj(b):\n",
" fft = rfftn\n",
" ifft = irfftn\n",
" else:\n",
" fft = fftn\n",
" ifft = ifftn\n",
" dims = np.maximum([a.shape[i] for i in axes], [b.shape[i] for i in axes])\n",
" af = fft(a, dims, axes)\n",
" bf = fft(b, dims, axes)\n",
" return ifft(af * bf, dims, axes)"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.027777777777777776
] | 32 | 0.000868 |
def _record_values_for_fit_summary_and_statsmodels(self):
"""
Store the various estimation results that are used to describe how well
the estimated model fits the given dataset, and record the values that
are needed for the statsmodels estimation results table. All values are
stored on the model instance.
Returns
-------
None.
"""
# Make sure we have all attributes needed to create the results summary
needed_attributes = ["fitted_probs",
"params",
"log_likelihood",
"standard_errors"]
try:
assert all([hasattr(self, attr) for attr in needed_attributes])
assert all([getattr(self, attr) is not None
for attr in needed_attributes])
except AssertionError:
msg = "Call this function only after setting/calculating all other"
msg_2 = " estimation results attributes"
raise NotImplementedError(msg + msg_2)
# Record the number of observations
self.nobs = self.fitted_probs.shape[0]
# This is the number of estimated parameters
self.df_model = self.params.shape[0]
# The number of observations minus the number of estimated parameters
self.df_resid = self.nobs - self.df_model
# This is just the log-likelihood. The opaque name is used for
# conformance with statsmodels
self.llf = self.log_likelihood
# This is just a repeat of the standard errors
self.bse = self.standard_errors
# These are the penalized measures of fit used for model comparison
self.aic = compute_aic(self)
self.bic = compute_bic(self)
return None | [
"def",
"_record_values_for_fit_summary_and_statsmodels",
"(",
"self",
")",
":",
"# Make sure we have all attributes needed to create the results summary",
"needed_attributes",
"=",
"[",
"\"fitted_probs\"",
",",
"\"params\"",
",",
"\"log_likelihood\"",
",",
"\"standard_errors\"",
"]",
"try",
":",
"assert",
"all",
"(",
"[",
"hasattr",
"(",
"self",
",",
"attr",
")",
"for",
"attr",
"in",
"needed_attributes",
"]",
")",
"assert",
"all",
"(",
"[",
"getattr",
"(",
"self",
",",
"attr",
")",
"is",
"not",
"None",
"for",
"attr",
"in",
"needed_attributes",
"]",
")",
"except",
"AssertionError",
":",
"msg",
"=",
"\"Call this function only after setting/calculating all other\"",
"msg_2",
"=",
"\" estimation results attributes\"",
"raise",
"NotImplementedError",
"(",
"msg",
"+",
"msg_2",
")",
"# Record the number of observations",
"self",
".",
"nobs",
"=",
"self",
".",
"fitted_probs",
".",
"shape",
"[",
"0",
"]",
"# This is the number of estimated parameters",
"self",
".",
"df_model",
"=",
"self",
".",
"params",
".",
"shape",
"[",
"0",
"]",
"# The number of observations minus the number of estimated parameters",
"self",
".",
"df_resid",
"=",
"self",
".",
"nobs",
"-",
"self",
".",
"df_model",
"# This is just the log-likelihood. The opaque name is used for",
"# conformance with statsmodels",
"self",
".",
"llf",
"=",
"self",
".",
"log_likelihood",
"# This is just a repeat of the standard errors",
"self",
".",
"bse",
"=",
"self",
".",
"standard_errors",
"# These are the penalized measures of fit used for model comparison",
"self",
".",
"aic",
"=",
"compute_aic",
"(",
"self",
")",
"self",
".",
"bic",
"=",
"compute_bic",
"(",
"self",
")",
"return",
"None"
] | 43.243902 | 0.001103 | [
"def _record_values_for_fit_summary_and_statsmodels(self):\n",
" \"\"\"\n",
" Store the various estimation results that are used to describe how well\n",
" the estimated model fits the given dataset, and record the values that\n",
" are needed for the statsmodels estimation results table. All values are\n",
" stored on the model instance.\n",
"\n",
" Returns\n",
" -------\n",
" None.\n",
" \"\"\"\n",
" # Make sure we have all attributes needed to create the results summary\n",
" needed_attributes = [\"fitted_probs\",\n",
" \"params\",\n",
" \"log_likelihood\",\n",
" \"standard_errors\"]\n",
" try:\n",
" assert all([hasattr(self, attr) for attr in needed_attributes])\n",
" assert all([getattr(self, attr) is not None\n",
" for attr in needed_attributes])\n",
" except AssertionError:\n",
" msg = \"Call this function only after setting/calculating all other\"\n",
" msg_2 = \" estimation results attributes\"\n",
" raise NotImplementedError(msg + msg_2)\n",
"\n",
" # Record the number of observations\n",
" self.nobs = self.fitted_probs.shape[0]\n",
" # This is the number of estimated parameters\n",
" self.df_model = self.params.shape[0]\n",
" # The number of observations minus the number of estimated parameters\n",
" self.df_resid = self.nobs - self.df_model\n",
" # This is just the log-likelihood. The opaque name is used for\n",
" # conformance with statsmodels\n",
" self.llf = self.log_likelihood\n",
" # This is just a repeat of the standard errors\n",
" self.bse = self.standard_errors\n",
" # These are the penalized measures of fit used for model comparison\n",
" self.aic = compute_aic(self)\n",
" self.bic = compute_bic(self)\n",
"\n",
" return None"
] | [
0,
0.08333333333333333,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.05263157894736842
] | 41 | 0.003316 |
def _function_lookup(name, module):
"""Searches the function between the registered ones.
If not found, it imports the module forcing its registration.
"""
try:
return _registered_functions[name]
except KeyError: # force function registering
__import__(module)
mod = sys.modules[module]
getattr(mod, name)
return _registered_functions[name] | [
"def",
"_function_lookup",
"(",
"name",
",",
"module",
")",
":",
"try",
":",
"return",
"_registered_functions",
"[",
"name",
"]",
"except",
"KeyError",
":",
"# force function registering",
"__import__",
"(",
"module",
")",
"mod",
"=",
"sys",
".",
"modules",
"[",
"module",
"]",
"getattr",
"(",
"mod",
",",
"name",
")",
"return",
"_registered_functions",
"[",
"name",
"]"
] | 30.076923 | 0.002481 | [
"def _function_lookup(name, module):\n",
" \"\"\"Searches the function between the registered ones.\n",
" If not found, it imports the module forcing its registration.\n",
"\n",
" \"\"\"\n",
" try:\n",
" return _registered_functions[name]\n",
" except KeyError: # force function registering\n",
" __import__(module)\n",
" mod = sys.modules[module]\n",
" getattr(mod, name)\n",
"\n",
" return _registered_functions[name]"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.023809523809523808
] | 13 | 0.001832 |
def remove_patch(self, patch):
""" Remove a patch from the patches list """
self._check_patch(patch)
patchline = self.patch2line[patch]
del self.patch2line[patch]
self.patchlines.remove(patchline) | [
"def",
"remove_patch",
"(",
"self",
",",
"patch",
")",
":",
"self",
".",
"_check_patch",
"(",
"patch",
")",
"patchline",
"=",
"self",
".",
"patch2line",
"[",
"patch",
"]",
"del",
"self",
".",
"patch2line",
"[",
"patch",
"]",
"self",
".",
"patchlines",
".",
"remove",
"(",
"patchline",
")"
] | 38.5 | 0.008475 | [
"def remove_patch(self, patch):\n",
" \"\"\" Remove a patch from the patches list \"\"\"\n",
" self._check_patch(patch)\n",
" patchline = self.patch2line[patch]\n",
" del self.patch2line[patch]\n",
" self.patchlines.remove(patchline)"
] | [
0,
0.018867924528301886,
0,
0,
0,
0.024390243902439025
] | 6 | 0.00721 |
def fill_traversals(traversals, edges, edges_hash=None):
"""
Convert a traversal of a list of edges into a sequence of
traversals where every pair of consecutive node indexes
is an edge in a passed edge list
Parameters
-------------
traversals : sequence of (m,) int
Node indexes of traversals of a graph
edges : (n, 2) int
Pairs of connected node indexes
edges_hash : None, or (n,) int
Edges sorted along axis 1 then hashed
using grouping.hashable_rows
Returns
--------------
splits : sequence of (p,) int
Node indexes of connected traversals
"""
# make sure edges are correct type
edges = np.asanyarray(edges, dtype=np.int64)
# make sure edges are sorted
edges.sort(axis=1)
# if there are no traversals just return edges
if len(traversals) == 0:
return edges.copy()
# hash edges for contains checks
if edges_hash is None:
edges_hash = grouping.hashable_rows(edges)
splits = []
for nodes in traversals:
# split traversals to remove edges
# that don't actually exist
splits.extend(split_traversal(
traversal=nodes,
edges=edges,
edges_hash=edges_hash))
# turn the split traversals back into (n,2) edges
included = util.vstack_empty([np.column_stack((i[:-1], i[1:]))
for i in splits])
if len(included) > 0:
# sort included edges in place
included.sort(axis=1)
# make sure any edges not included in split traversals
# are just added as a length 2 traversal
splits.extend(grouping.boolean_rows(
edges,
included,
operation=np.setdiff1d))
else:
# no edges were included, so our filled traversal
# is just the original edges copied over
splits = edges.copy()
return splits | [
"def",
"fill_traversals",
"(",
"traversals",
",",
"edges",
",",
"edges_hash",
"=",
"None",
")",
":",
"# make sure edges are correct type",
"edges",
"=",
"np",
".",
"asanyarray",
"(",
"edges",
",",
"dtype",
"=",
"np",
".",
"int64",
")",
"# make sure edges are sorted",
"edges",
".",
"sort",
"(",
"axis",
"=",
"1",
")",
"# if there are no traversals just return edges",
"if",
"len",
"(",
"traversals",
")",
"==",
"0",
":",
"return",
"edges",
".",
"copy",
"(",
")",
"# hash edges for contains checks",
"if",
"edges_hash",
"is",
"None",
":",
"edges_hash",
"=",
"grouping",
".",
"hashable_rows",
"(",
"edges",
")",
"splits",
"=",
"[",
"]",
"for",
"nodes",
"in",
"traversals",
":",
"# split traversals to remove edges",
"# that don't actually exist",
"splits",
".",
"extend",
"(",
"split_traversal",
"(",
"traversal",
"=",
"nodes",
",",
"edges",
"=",
"edges",
",",
"edges_hash",
"=",
"edges_hash",
")",
")",
"# turn the split traversals back into (n,2) edges",
"included",
"=",
"util",
".",
"vstack_empty",
"(",
"[",
"np",
".",
"column_stack",
"(",
"(",
"i",
"[",
":",
"-",
"1",
"]",
",",
"i",
"[",
"1",
":",
"]",
")",
")",
"for",
"i",
"in",
"splits",
"]",
")",
"if",
"len",
"(",
"included",
")",
">",
"0",
":",
"# sort included edges in place",
"included",
".",
"sort",
"(",
"axis",
"=",
"1",
")",
"# make sure any edges not included in split traversals",
"# are just added as a length 2 traversal",
"splits",
".",
"extend",
"(",
"grouping",
".",
"boolean_rows",
"(",
"edges",
",",
"included",
",",
"operation",
"=",
"np",
".",
"setdiff1d",
")",
")",
"else",
":",
"# no edges were included, so our filled traversal",
"# is just the original edges copied over",
"splits",
"=",
"edges",
".",
"copy",
"(",
")",
"return",
"splits"
] | 31.15 | 0.000519 | [
"def fill_traversals(traversals, edges, edges_hash=None):\n",
" \"\"\"\n",
" Convert a traversal of a list of edges into a sequence of\n",
" traversals where every pair of consecutive node indexes\n",
" is an edge in a passed edge list\n",
"\n",
" Parameters\n",
" -------------\n",
" traversals : sequence of (m,) int\n",
" Node indexes of traversals of a graph\n",
" edges : (n, 2) int\n",
" Pairs of connected node indexes\n",
" edges_hash : None, or (n,) int\n",
" Edges sorted along axis 1 then hashed\n",
" using grouping.hashable_rows\n",
"\n",
" Returns\n",
" --------------\n",
" splits : sequence of (p,) int\n",
" Node indexes of connected traversals\n",
" \"\"\"\n",
" # make sure edges are correct type\n",
" edges = np.asanyarray(edges, dtype=np.int64)\n",
" # make sure edges are sorted\n",
" edges.sort(axis=1)\n",
"\n",
" # if there are no traversals just return edges\n",
" if len(traversals) == 0:\n",
" return edges.copy()\n",
"\n",
" # hash edges for contains checks\n",
" if edges_hash is None:\n",
" edges_hash = grouping.hashable_rows(edges)\n",
"\n",
" splits = []\n",
" for nodes in traversals:\n",
" # split traversals to remove edges\n",
" # that don't actually exist\n",
" splits.extend(split_traversal(\n",
" traversal=nodes,\n",
" edges=edges,\n",
" edges_hash=edges_hash))\n",
" # turn the split traversals back into (n,2) edges\n",
" included = util.vstack_empty([np.column_stack((i[:-1], i[1:]))\n",
" for i in splits])\n",
" if len(included) > 0:\n",
" # sort included edges in place\n",
" included.sort(axis=1)\n",
" # make sure any edges not included in split traversals\n",
" # are just added as a length 2 traversal\n",
" splits.extend(grouping.boolean_rows(\n",
" edges,\n",
" included,\n",
" operation=np.setdiff1d))\n",
" else:\n",
" # no edges were included, so our filled traversal\n",
" # is just the original edges copied over\n",
" splits = edges.copy()\n",
"\n",
" return splits"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.058823529411764705
] | 60 | 0.00098 |
def ensembl_to_kegg(organism,kegg_db):
"""
Looks up KEGG mappings of KEGG ids to ensembl ids
:param organism: an organisms as listed in organismsKEGG()
:param kegg_db: a matching KEGG db as reported in databasesKEGG
:returns: a Pandas dataframe of with 'KEGGid' and 'ENSid'.
"""
print("KEGG API: http://rest.genome.jp/link/"+kegg_db+"/"+organism)
sys.stdout.flush()
kegg_ens=urlopen("http://rest.genome.jp/link/"+kegg_db+"/"+organism).read()
kegg_ens=kegg_ens.split("\n")
final=[]
for i in kegg_ens:
final.append(i.split("\t"))
df=pd.DataFrame(final[0:len(final)-1])[[0,1]]
ens_id=pd.DataFrame(df[1].str.split(":").tolist())[1]
df=pd.concat([df,ens_id],axis=1)
df.columns=['KEGGid','ensDB','ENSid']
df=df[['KEGGid','ENSid']]
return df | [
"def",
"ensembl_to_kegg",
"(",
"organism",
",",
"kegg_db",
")",
":",
"print",
"(",
"\"KEGG API: http://rest.genome.jp/link/\"",
"+",
"kegg_db",
"+",
"\"/\"",
"+",
"organism",
")",
"sys",
".",
"stdout",
".",
"flush",
"(",
")",
"kegg_ens",
"=",
"urlopen",
"(",
"\"http://rest.genome.jp/link/\"",
"+",
"kegg_db",
"+",
"\"/\"",
"+",
"organism",
")",
".",
"read",
"(",
")",
"kegg_ens",
"=",
"kegg_ens",
".",
"split",
"(",
"\"\\n\"",
")",
"final",
"=",
"[",
"]",
"for",
"i",
"in",
"kegg_ens",
":",
"final",
".",
"append",
"(",
"i",
".",
"split",
"(",
"\"\\t\"",
")",
")",
"df",
"=",
"pd",
".",
"DataFrame",
"(",
"final",
"[",
"0",
":",
"len",
"(",
"final",
")",
"-",
"1",
"]",
")",
"[",
"[",
"0",
",",
"1",
"]",
"]",
"ens_id",
"=",
"pd",
".",
"DataFrame",
"(",
"df",
"[",
"1",
"]",
".",
"str",
".",
"split",
"(",
"\":\"",
")",
".",
"tolist",
"(",
")",
")",
"[",
"1",
"]",
"df",
"=",
"pd",
".",
"concat",
"(",
"[",
"df",
",",
"ens_id",
"]",
",",
"axis",
"=",
"1",
")",
"df",
".",
"columns",
"=",
"[",
"'KEGGid'",
",",
"'ensDB'",
",",
"'ENSid'",
"]",
"df",
"=",
"df",
"[",
"[",
"'KEGGid'",
",",
"'ENSid'",
"]",
"]",
"return",
"df"
] | 34.565217 | 0.019584 | [
"def ensembl_to_kegg(organism,kegg_db):\n",
" \"\"\"\n",
" Looks up KEGG mappings of KEGG ids to ensembl ids\n",
"\n",
" :param organism: an organisms as listed in organismsKEGG()\n",
" :param kegg_db: a matching KEGG db as reported in databasesKEGG\n",
"\n",
" :returns: a Pandas dataframe of with 'KEGGid' and 'ENSid'.\n",
"\n",
" \"\"\"\n",
" print(\"KEGG API: http://rest.genome.jp/link/\"+kegg_db+\"/\"+organism)\n",
" sys.stdout.flush()\n",
" kegg_ens=urlopen(\"http://rest.genome.jp/link/\"+kegg_db+\"/\"+organism).read()\n",
" kegg_ens=kegg_ens.split(\"\\n\")\n",
" final=[]\n",
" for i in kegg_ens:\n",
" final.append(i.split(\"\\t\"))\n",
" df=pd.DataFrame(final[0:len(final)-1])[[0,1]]\n",
" ens_id=pd.DataFrame(df[1].str.split(\":\").tolist())[1]\n",
" df=pd.concat([df,ens_id],axis=1)\n",
" df.columns=['KEGGid','ensDB','ENSid']\n",
" df=df[['KEGGid','ENSid']]\n",
" return df"
] | [
0.02564102564102564,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.0125,
0.029411764705882353,
0.07692307692307693,
0,
0,
0.04,
0.017241379310344827,
0.08108108108108109,
0.07142857142857142,
0.06666666666666667,
0.07692307692307693
] | 23 | 0.021644 |
def add(self, func, priority=0):
""" Add a func to the cmd chain with given priority """
self.chain.append((priority, func))
self.chain.sort(key=lambda x: x[0]) | [
"def",
"add",
"(",
"self",
",",
"func",
",",
"priority",
"=",
"0",
")",
":",
"self",
".",
"chain",
".",
"append",
"(",
"(",
"priority",
",",
"func",
")",
")",
"self",
".",
"chain",
".",
"sort",
"(",
"key",
"=",
"lambda",
"x",
":",
"x",
"[",
"0",
"]",
")"
] | 45.25 | 0.01087 | [
"def add(self, func, priority=0):\n",
" \"\"\" Add a func to the cmd chain with given priority \"\"\"\n",
" self.chain.append((priority, func))\n",
" self.chain.sort(key=lambda x: x[0])"
] | [
0,
0.015625,
0,
0.023255813953488372
] | 4 | 0.00972 |
def compute_stable_poses(mesh,
center_mass=None,
sigma=0.0,
n_samples=1,
threshold=0.0):
"""
Computes stable orientations of a mesh and their quasi-static probabilites.
This method samples the location of the center of mass from a multivariate
gaussian with the mean at the center of mass, and a covariance
equal to and identity matrix times sigma, over n_samples.
For each sample, it computes the stable resting poses of the mesh on a
a planar workspace and evaulates the probabilities of landing in
each pose if the object is dropped onto the table randomly.
This method returns the 4x4 homogenous transform matrices that place
the shape against the planar surface with the z-axis pointing upwards
and a list of the probabilities for each pose.
The transforms and probabilties that are returned are sorted, with the
most probable pose first.
Parameters
----------
mesh : trimesh.Trimesh
The target mesh
com : (3,) float
Rhe object center of mass. If None, this method
assumes uniform density and watertightness and
computes a center of mass explicitly
sigma : float
Rhe covariance for the multivariate gaussian used
to sample center of mass locations
n_samples : int
The number of samples of the center of mass location
threshold : float
The probability value at which to threshold
returned stable poses
Returns
-------
transforms : (n, 4, 4) float
The homogenous matrices that transform the
object to rest in a stable pose, with the
new z-axis pointing upwards from the table
and the object just touching the table.
probs : (n,) float
Probability in (0, 1) for each pose
"""
# save convex hull mesh to avoid a cache check
cvh = mesh.convex_hull
if center_mass is None:
center_mass = mesh.center_mass
# Sample center of mass, rejecting points outside of conv hull
sample_coms = []
while len(sample_coms) < n_samples:
remaining = n_samples - len(sample_coms)
coms = np.random.multivariate_normal(center_mass,
sigma * np.eye(3),
remaining)
for c in coms:
dots = np.einsum('ij,ij->i',
c - cvh.triangles_center,
cvh.face_normals)
if np.all(dots < 0):
sample_coms.append(c)
norms_to_probs = {} # Map from normal to probabilities
# For each sample, compute the stable poses
for sample_com in sample_coms:
# Create toppling digraph
dg = _create_topple_graph(cvh, sample_com)
# Propagate probabilites to sink nodes with a breadth-first traversal
nodes = [n for n in dg.nodes() if dg.in_degree(n) == 0]
n_iters = 0
while len(nodes) > 0 and n_iters <= len(mesh.faces):
new_nodes = []
for node in nodes:
if dg.out_degree(node) == 0:
continue
successor = next(iter(dg.successors(node)))
dg.node[successor]['prob'] += dg.node[node]['prob']
dg.node[node]['prob'] = 0.0
new_nodes.append(successor)
nodes = new_nodes
n_iters += 1
# Collect stable poses
for node in dg.nodes():
if dg.node[node]['prob'] > 0.0:
normal = cvh.face_normals[node]
prob = dg.node[node]['prob']
key = tuple(np.around(normal, decimals=3))
if key in norms_to_probs:
norms_to_probs[key]['prob'] += 1.0 / n_samples * prob
else:
norms_to_probs[key] = {
'prob': 1.0 / n_samples * prob,
'normal': normal
}
transforms = []
probs = []
# Filter stable poses
for key in norms_to_probs:
prob = norms_to_probs[key]['prob']
if prob > threshold:
tf = np.eye(4)
# Compute a rotation matrix for this stable pose
z = -1.0 * norms_to_probs[key]['normal']
x = np.array([-z[1], z[0], 0])
if np.linalg.norm(x) == 0.0:
x = np.array([1, 0, 0])
else:
x = x / np.linalg.norm(x)
y = np.cross(z, x)
y = y / np.linalg.norm(y)
tf[:3, :3] = np.array([x, y, z])
# Compute the necessary translation for this stable pose
m = cvh.copy()
m.apply_transform(tf)
z = -m.bounds[0][2]
tf[:3, 3] = np.array([0, 0, z])
transforms.append(tf)
probs.append(prob)
# Sort the results
transforms = np.array(transforms)
probs = np.array(probs)
inds = np.argsort(-probs)
return transforms[inds], probs[inds] | [
"def",
"compute_stable_poses",
"(",
"mesh",
",",
"center_mass",
"=",
"None",
",",
"sigma",
"=",
"0.0",
",",
"n_samples",
"=",
"1",
",",
"threshold",
"=",
"0.0",
")",
":",
"# save convex hull mesh to avoid a cache check",
"cvh",
"=",
"mesh",
".",
"convex_hull",
"if",
"center_mass",
"is",
"None",
":",
"center_mass",
"=",
"mesh",
".",
"center_mass",
"# Sample center of mass, rejecting points outside of conv hull",
"sample_coms",
"=",
"[",
"]",
"while",
"len",
"(",
"sample_coms",
")",
"<",
"n_samples",
":",
"remaining",
"=",
"n_samples",
"-",
"len",
"(",
"sample_coms",
")",
"coms",
"=",
"np",
".",
"random",
".",
"multivariate_normal",
"(",
"center_mass",
",",
"sigma",
"*",
"np",
".",
"eye",
"(",
"3",
")",
",",
"remaining",
")",
"for",
"c",
"in",
"coms",
":",
"dots",
"=",
"np",
".",
"einsum",
"(",
"'ij,ij->i'",
",",
"c",
"-",
"cvh",
".",
"triangles_center",
",",
"cvh",
".",
"face_normals",
")",
"if",
"np",
".",
"all",
"(",
"dots",
"<",
"0",
")",
":",
"sample_coms",
".",
"append",
"(",
"c",
")",
"norms_to_probs",
"=",
"{",
"}",
"# Map from normal to probabilities",
"# For each sample, compute the stable poses",
"for",
"sample_com",
"in",
"sample_coms",
":",
"# Create toppling digraph",
"dg",
"=",
"_create_topple_graph",
"(",
"cvh",
",",
"sample_com",
")",
"# Propagate probabilites to sink nodes with a breadth-first traversal",
"nodes",
"=",
"[",
"n",
"for",
"n",
"in",
"dg",
".",
"nodes",
"(",
")",
"if",
"dg",
".",
"in_degree",
"(",
"n",
")",
"==",
"0",
"]",
"n_iters",
"=",
"0",
"while",
"len",
"(",
"nodes",
")",
">",
"0",
"and",
"n_iters",
"<=",
"len",
"(",
"mesh",
".",
"faces",
")",
":",
"new_nodes",
"=",
"[",
"]",
"for",
"node",
"in",
"nodes",
":",
"if",
"dg",
".",
"out_degree",
"(",
"node",
")",
"==",
"0",
":",
"continue",
"successor",
"=",
"next",
"(",
"iter",
"(",
"dg",
".",
"successors",
"(",
"node",
")",
")",
")",
"dg",
".",
"node",
"[",
"successor",
"]",
"[",
"'prob'",
"]",
"+=",
"dg",
".",
"node",
"[",
"node",
"]",
"[",
"'prob'",
"]",
"dg",
".",
"node",
"[",
"node",
"]",
"[",
"'prob'",
"]",
"=",
"0.0",
"new_nodes",
".",
"append",
"(",
"successor",
")",
"nodes",
"=",
"new_nodes",
"n_iters",
"+=",
"1",
"# Collect stable poses",
"for",
"node",
"in",
"dg",
".",
"nodes",
"(",
")",
":",
"if",
"dg",
".",
"node",
"[",
"node",
"]",
"[",
"'prob'",
"]",
">",
"0.0",
":",
"normal",
"=",
"cvh",
".",
"face_normals",
"[",
"node",
"]",
"prob",
"=",
"dg",
".",
"node",
"[",
"node",
"]",
"[",
"'prob'",
"]",
"key",
"=",
"tuple",
"(",
"np",
".",
"around",
"(",
"normal",
",",
"decimals",
"=",
"3",
")",
")",
"if",
"key",
"in",
"norms_to_probs",
":",
"norms_to_probs",
"[",
"key",
"]",
"[",
"'prob'",
"]",
"+=",
"1.0",
"/",
"n_samples",
"*",
"prob",
"else",
":",
"norms_to_probs",
"[",
"key",
"]",
"=",
"{",
"'prob'",
":",
"1.0",
"/",
"n_samples",
"*",
"prob",
",",
"'normal'",
":",
"normal",
"}",
"transforms",
"=",
"[",
"]",
"probs",
"=",
"[",
"]",
"# Filter stable poses",
"for",
"key",
"in",
"norms_to_probs",
":",
"prob",
"=",
"norms_to_probs",
"[",
"key",
"]",
"[",
"'prob'",
"]",
"if",
"prob",
">",
"threshold",
":",
"tf",
"=",
"np",
".",
"eye",
"(",
"4",
")",
"# Compute a rotation matrix for this stable pose",
"z",
"=",
"-",
"1.0",
"*",
"norms_to_probs",
"[",
"key",
"]",
"[",
"'normal'",
"]",
"x",
"=",
"np",
".",
"array",
"(",
"[",
"-",
"z",
"[",
"1",
"]",
",",
"z",
"[",
"0",
"]",
",",
"0",
"]",
")",
"if",
"np",
".",
"linalg",
".",
"norm",
"(",
"x",
")",
"==",
"0.0",
":",
"x",
"=",
"np",
".",
"array",
"(",
"[",
"1",
",",
"0",
",",
"0",
"]",
")",
"else",
":",
"x",
"=",
"x",
"/",
"np",
".",
"linalg",
".",
"norm",
"(",
"x",
")",
"y",
"=",
"np",
".",
"cross",
"(",
"z",
",",
"x",
")",
"y",
"=",
"y",
"/",
"np",
".",
"linalg",
".",
"norm",
"(",
"y",
")",
"tf",
"[",
":",
"3",
",",
":",
"3",
"]",
"=",
"np",
".",
"array",
"(",
"[",
"x",
",",
"y",
",",
"z",
"]",
")",
"# Compute the necessary translation for this stable pose",
"m",
"=",
"cvh",
".",
"copy",
"(",
")",
"m",
".",
"apply_transform",
"(",
"tf",
")",
"z",
"=",
"-",
"m",
".",
"bounds",
"[",
"0",
"]",
"[",
"2",
"]",
"tf",
"[",
":",
"3",
",",
"3",
"]",
"=",
"np",
".",
"array",
"(",
"[",
"0",
",",
"0",
",",
"z",
"]",
")",
"transforms",
".",
"append",
"(",
"tf",
")",
"probs",
".",
"append",
"(",
"prob",
")",
"# Sort the results",
"transforms",
"=",
"np",
".",
"array",
"(",
"transforms",
")",
"probs",
"=",
"np",
".",
"array",
"(",
"probs",
")",
"inds",
"=",
"np",
".",
"argsort",
"(",
"-",
"probs",
")",
"return",
"transforms",
"[",
"inds",
"]",
",",
"probs",
"[",
"inds",
"]"
] | 34.440559 | 0.000197 | [
"def compute_stable_poses(mesh,\n",
" center_mass=None,\n",
" sigma=0.0,\n",
" n_samples=1,\n",
" threshold=0.0):\n",
" \"\"\"\n",
" Computes stable orientations of a mesh and their quasi-static probabilites.\n",
"\n",
" This method samples the location of the center of mass from a multivariate\n",
" gaussian with the mean at the center of mass, and a covariance\n",
" equal to and identity matrix times sigma, over n_samples.\n",
"\n",
" For each sample, it computes the stable resting poses of the mesh on a\n",
" a planar workspace and evaulates the probabilities of landing in\n",
" each pose if the object is dropped onto the table randomly.\n",
"\n",
" This method returns the 4x4 homogenous transform matrices that place\n",
" the shape against the planar surface with the z-axis pointing upwards\n",
" and a list of the probabilities for each pose.\n",
"\n",
" The transforms and probabilties that are returned are sorted, with the\n",
" most probable pose first.\n",
"\n",
" Parameters\n",
" ----------\n",
" mesh : trimesh.Trimesh\n",
" The target mesh\n",
" com : (3,) float\n",
" Rhe object center of mass. If None, this method\n",
" assumes uniform density and watertightness and\n",
" computes a center of mass explicitly\n",
" sigma : float\n",
" Rhe covariance for the multivariate gaussian used\n",
" to sample center of mass locations\n",
" n_samples : int\n",
" The number of samples of the center of mass location\n",
" threshold : float\n",
" The probability value at which to threshold\n",
" returned stable poses\n",
"\n",
" Returns\n",
" -------\n",
" transforms : (n, 4, 4) float\n",
" The homogenous matrices that transform the\n",
" object to rest in a stable pose, with the\n",
" new z-axis pointing upwards from the table\n",
" and the object just touching the table.\n",
" probs : (n,) float\n",
" Probability in (0, 1) for each pose\n",
" \"\"\"\n",
"\n",
" # save convex hull mesh to avoid a cache check\n",
" cvh = mesh.convex_hull\n",
"\n",
" if center_mass is None:\n",
" center_mass = mesh.center_mass\n",
"\n",
" # Sample center of mass, rejecting points outside of conv hull\n",
" sample_coms = []\n",
" while len(sample_coms) < n_samples:\n",
" remaining = n_samples - len(sample_coms)\n",
" coms = np.random.multivariate_normal(center_mass,\n",
" sigma * np.eye(3),\n",
" remaining)\n",
" for c in coms:\n",
" dots = np.einsum('ij,ij->i',\n",
" c - cvh.triangles_center,\n",
" cvh.face_normals)\n",
" if np.all(dots < 0):\n",
" sample_coms.append(c)\n",
"\n",
" norms_to_probs = {} # Map from normal to probabilities\n",
"\n",
" # For each sample, compute the stable poses\n",
" for sample_com in sample_coms:\n",
"\n",
" # Create toppling digraph\n",
" dg = _create_topple_graph(cvh, sample_com)\n",
"\n",
" # Propagate probabilites to sink nodes with a breadth-first traversal\n",
" nodes = [n for n in dg.nodes() if dg.in_degree(n) == 0]\n",
" n_iters = 0\n",
" while len(nodes) > 0 and n_iters <= len(mesh.faces):\n",
" new_nodes = []\n",
" for node in nodes:\n",
" if dg.out_degree(node) == 0:\n",
" continue\n",
" successor = next(iter(dg.successors(node)))\n",
" dg.node[successor]['prob'] += dg.node[node]['prob']\n",
" dg.node[node]['prob'] = 0.0\n",
" new_nodes.append(successor)\n",
" nodes = new_nodes\n",
" n_iters += 1\n",
"\n",
" # Collect stable poses\n",
" for node in dg.nodes():\n",
" if dg.node[node]['prob'] > 0.0:\n",
" normal = cvh.face_normals[node]\n",
" prob = dg.node[node]['prob']\n",
" key = tuple(np.around(normal, decimals=3))\n",
" if key in norms_to_probs:\n",
" norms_to_probs[key]['prob'] += 1.0 / n_samples * prob\n",
" else:\n",
" norms_to_probs[key] = {\n",
" 'prob': 1.0 / n_samples * prob,\n",
" 'normal': normal\n",
" }\n",
"\n",
" transforms = []\n",
" probs = []\n",
"\n",
" # Filter stable poses\n",
" for key in norms_to_probs:\n",
" prob = norms_to_probs[key]['prob']\n",
" if prob > threshold:\n",
" tf = np.eye(4)\n",
"\n",
" # Compute a rotation matrix for this stable pose\n",
" z = -1.0 * norms_to_probs[key]['normal']\n",
" x = np.array([-z[1], z[0], 0])\n",
" if np.linalg.norm(x) == 0.0:\n",
" x = np.array([1, 0, 0])\n",
" else:\n",
" x = x / np.linalg.norm(x)\n",
" y = np.cross(z, x)\n",
" y = y / np.linalg.norm(y)\n",
" tf[:3, :3] = np.array([x, y, z])\n",
"\n",
" # Compute the necessary translation for this stable pose\n",
" m = cvh.copy()\n",
" m.apply_transform(tf)\n",
" z = -m.bounds[0][2]\n",
" tf[:3, 3] = np.array([0, 0, z])\n",
"\n",
" transforms.append(tf)\n",
" probs.append(prob)\n",
"\n",
" # Sort the results\n",
" transforms = np.array(transforms)\n",
" probs = np.array(probs)\n",
" inds = np.argsort(-probs)\n",
"\n",
" return transforms[inds], probs[inds]"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.025
] | 143 | 0.000175 |
def download_layers(self, repo_name, digest=None, destination=None):
''' download layers is a wrapper to do the following for a client loaded
with a manifest for an image:
1. use the manifests to retrieve list of digests (get_digests)
2. atomically download the list to destination (get_layers)
This function uses the MultiProcess client to download layers
at the same time.
'''
from sregistry.main.workers import Workers
from sregistry.main.workers.aws import download_task
# Obtain list of digets, and destination for download
self._get_manifest(repo_name, digest)
digests = self._get_digests(repo_name, digest)
destination = self._get_download_cache(destination)
# Create multiprocess download client
workers = Workers()
# Download each layer atomically
tasks = []
layers = []
# Start with a fresh token
self._update_token()
for digest in digests:
targz = "%s/%s.tar.gz" % (destination, digest['digest'])
url = '%s/%s/blobs/%s' % (self.base, repo_name, digest['digest'])
# Only download if not in cache already
if not os.path.exists(targz):
tasks.append((url, self.headers, targz))
layers.append(targz)
# Download layers with multiprocess workers
if len(tasks) > 0:
download_layers = workers.run(func=download_task,
tasks=tasks)
return layers, url | [
"def",
"download_layers",
"(",
"self",
",",
"repo_name",
",",
"digest",
"=",
"None",
",",
"destination",
"=",
"None",
")",
":",
"from",
"sregistry",
".",
"main",
".",
"workers",
"import",
"Workers",
"from",
"sregistry",
".",
"main",
".",
"workers",
".",
"aws",
"import",
"download_task",
"# Obtain list of digets, and destination for download",
"self",
".",
"_get_manifest",
"(",
"repo_name",
",",
"digest",
")",
"digests",
"=",
"self",
".",
"_get_digests",
"(",
"repo_name",
",",
"digest",
")",
"destination",
"=",
"self",
".",
"_get_download_cache",
"(",
"destination",
")",
"# Create multiprocess download client",
"workers",
"=",
"Workers",
"(",
")",
"# Download each layer atomically",
"tasks",
"=",
"[",
"]",
"layers",
"=",
"[",
"]",
"# Start with a fresh token",
"self",
".",
"_update_token",
"(",
")",
"for",
"digest",
"in",
"digests",
":",
"targz",
"=",
"\"%s/%s.tar.gz\"",
"%",
"(",
"destination",
",",
"digest",
"[",
"'digest'",
"]",
")",
"url",
"=",
"'%s/%s/blobs/%s'",
"%",
"(",
"self",
".",
"base",
",",
"repo_name",
",",
"digest",
"[",
"'digest'",
"]",
")",
"# Only download if not in cache already",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"targz",
")",
":",
"tasks",
".",
"append",
"(",
"(",
"url",
",",
"self",
".",
"headers",
",",
"targz",
")",
")",
"layers",
".",
"append",
"(",
"targz",
")",
"# Download layers with multiprocess workers",
"if",
"len",
"(",
"tasks",
")",
">",
"0",
":",
"download_layers",
"=",
"workers",
".",
"run",
"(",
"func",
"=",
"download_task",
",",
"tasks",
"=",
"tasks",
")",
"return",
"layers",
",",
"url"
] | 32.155556 | 0.002012 | [
"def download_layers(self, repo_name, digest=None, destination=None):\n",
" ''' download layers is a wrapper to do the following for a client loaded\n",
" with a manifest for an image:\n",
" \n",
" 1. use the manifests to retrieve list of digests (get_digests)\n",
" 2. atomically download the list to destination (get_layers)\n",
"\n",
" This function uses the MultiProcess client to download layers\n",
" at the same time.\n",
" '''\n",
" from sregistry.main.workers import Workers\n",
" from sregistry.main.workers.aws import download_task\n",
"\n",
" # Obtain list of digets, and destination for download\n",
" self._get_manifest(repo_name, digest)\n",
" digests = self._get_digests(repo_name, digest)\n",
" destination = self._get_download_cache(destination)\n",
"\n",
" # Create multiprocess download client\n",
" workers = Workers()\n",
"\n",
" # Download each layer atomically\n",
" tasks = []\n",
" layers = []\n",
"\n",
" # Start with a fresh token\n",
" self._update_token()\n",
"\n",
" for digest in digests:\n",
"\n",
" targz = \"%s/%s.tar.gz\" % (destination, digest['digest'])\n",
" url = '%s/%s/blobs/%s' % (self.base, repo_name, digest['digest'])\n",
" \n",
" # Only download if not in cache already\n",
" if not os.path.exists(targz):\n",
" tasks.append((url, self.headers, targz))\n",
" layers.append(targz)\n",
"\n",
" # Download layers with multiprocess workers\n",
" if len(tasks) > 0:\n",
"\n",
" download_layers = workers.run(func=download_task,\n",
" tasks=tasks)\n",
"\n",
" return layers, url"
] | [
0,
0,
0,
0.14285714285714285,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.045454545454545456
] | 45 | 0.006407 |
def autologin(function, timeout=TIMEOUT):
"""Decorator that will try to login and redo an action before failing."""
@wraps(function)
async def wrapper(self, *args, **kwargs):
"""Wrap a function with timeout."""
try:
async with async_timeout.timeout(timeout):
return await function(self, *args, **kwargs)
except (asyncio.TimeoutError, ClientError, Error):
pass
_LOGGER.debug("autologin")
try:
async with async_timeout.timeout(timeout):
await self.login()
return await function(self, *args, **kwargs)
except (asyncio.TimeoutError, ClientError, Error):
raise Error(str(function))
return wrapper | [
"def",
"autologin",
"(",
"function",
",",
"timeout",
"=",
"TIMEOUT",
")",
":",
"@",
"wraps",
"(",
"function",
")",
"async",
"def",
"wrapper",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"\"\"\"Wrap a function with timeout.\"\"\"",
"try",
":",
"async",
"with",
"async_timeout",
".",
"timeout",
"(",
"timeout",
")",
":",
"return",
"await",
"function",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"except",
"(",
"asyncio",
".",
"TimeoutError",
",",
"ClientError",
",",
"Error",
")",
":",
"pass",
"_LOGGER",
".",
"debug",
"(",
"\"autologin\"",
")",
"try",
":",
"async",
"with",
"async_timeout",
".",
"timeout",
"(",
"timeout",
")",
":",
"await",
"self",
".",
"login",
"(",
")",
"return",
"await",
"function",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"except",
"(",
"asyncio",
".",
"TimeoutError",
",",
"ClientError",
",",
"Error",
")",
":",
"raise",
"Error",
"(",
"str",
"(",
"function",
")",
")",
"return",
"wrapper"
] | 36.7 | 0.001328 | [
"def autologin(function, timeout=TIMEOUT):\n",
" \"\"\"Decorator that will try to login and redo an action before failing.\"\"\"\n",
" @wraps(function)\n",
" async def wrapper(self, *args, **kwargs):\n",
" \"\"\"Wrap a function with timeout.\"\"\"\n",
" try:\n",
" async with async_timeout.timeout(timeout):\n",
" return await function(self, *args, **kwargs)\n",
" except (asyncio.TimeoutError, ClientError, Error):\n",
" pass\n",
"\n",
" _LOGGER.debug(\"autologin\")\n",
" try:\n",
" async with async_timeout.timeout(timeout):\n",
" await self.login()\n",
" return await function(self, *args, **kwargs)\n",
" except (asyncio.TimeoutError, ClientError, Error):\n",
" raise Error(str(function))\n",
"\n",
" return wrapper"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.05555555555555555
] | 20 | 0.002778 |
def browse_stations(self, station_category_id):
"""Get the stations for a category from Browse Stations.
Parameters:
station_category_id (str): A station category ID as
found with :meth:`browse_stations_categories`.
Returns:
list: Station dicts.
"""
response = self._call(
mc_calls.BrowseStations,
station_category_id
)
stations = response.body.get('stations', [])
return stations | [
"def",
"browse_stations",
"(",
"self",
",",
"station_category_id",
")",
":",
"response",
"=",
"self",
".",
"_call",
"(",
"mc_calls",
".",
"BrowseStations",
",",
"station_category_id",
")",
"stations",
"=",
"response",
".",
"body",
".",
"get",
"(",
"'stations'",
",",
"[",
"]",
")",
"return",
"stations"
] | 22.166667 | 0.036058 | [
"def browse_stations(self, station_category_id):\n",
"\t\t\"\"\"Get the stations for a category from Browse Stations.\n",
"\n",
"\t\tParameters:\n",
"\t\t\tstation_category_id (str): A station category ID as\n",
"\t\t\t\tfound with :meth:`browse_stations_categories`.\n",
"\n",
"\t\tReturns:\n",
"\t\t\tlist: Station dicts.\n",
"\t\t\"\"\"\n",
"\n",
"\t\tresponse = self._call(\n",
"\t\t\tmc_calls.BrowseStations,\n",
"\t\t\tstation_category_id\n",
"\t\t)\n",
"\t\tstations = response.body.get('stations', [])\n",
"\n",
"\t\treturn stations"
] | [
0,
0.03389830508474576,
0,
0.07142857142857142,
0.01818181818181818,
0.0196078431372549,
0,
0.09090909090909091,
0.041666666666666664,
0.16666666666666666,
0,
0.04,
0.03571428571428571,
0.043478260869565216,
0.25,
0.02127659574468085,
0,
0.11764705882352941
] | 18 | 0.052804 |
def msg(self, level, s, *args):
"""
Print a debug message with the given level
"""
if s and level <= self.debug:
print "%s%s %s" % (" " * self.indent, s, ' '.join(map(repr, args))) | [
"def",
"msg",
"(",
"self",
",",
"level",
",",
"s",
",",
"*",
"args",
")",
":",
"if",
"s",
"and",
"level",
"<=",
"self",
".",
"debug",
":",
"print",
"\"%s%s %s\"",
"%",
"(",
"\" \"",
"*",
"self",
".",
"indent",
",",
"s",
",",
"' '",
".",
"join",
"(",
"map",
"(",
"repr",
",",
"args",
")",
")",
")"
] | 36.666667 | 0.013333 | [
"def msg(self, level, s, *args):\n",
" \"\"\"\n",
" Print a debug message with the given level\n",
" \"\"\"\n",
" if s and level <= self.debug:\n",
" print \"%s%s %s\" % (\" \" * self.indent, s, ' '.join(map(repr, args)))"
] | [
0,
0.08333333333333333,
0,
0,
0,
0.025
] | 6 | 0.018056 |
def check_password(self, raw_password):
"""Validates the given raw password against the intance's encrypted one.
:param raw_password: Raw password to be checked against.
:type raw_password: unicode
:returns: True if comparison was successful, False otherwise.
:rtype: bool
:raises: :exc:`ImportError` if `py-bcrypt` was not found.
"""
bcrypt = self.get_bcrypt()
return bcrypt.hashpw(raw_password, self.value)==self.value | [
"def",
"check_password",
"(",
"self",
",",
"raw_password",
")",
":",
"bcrypt",
"=",
"self",
".",
"get_bcrypt",
"(",
")",
"return",
"bcrypt",
".",
"hashpw",
"(",
"raw_password",
",",
"self",
".",
"value",
")",
"==",
"self",
".",
"value"
] | 43.909091 | 0.008114 | [
"def check_password(self, raw_password):\n",
" \"\"\"Validates the given raw password against the intance's encrypted one.\n",
"\n",
" :param raw_password: Raw password to be checked against.\n",
" :type raw_password: unicode\n",
" :returns: True if comparison was successful, False otherwise.\n",
" :rtype: bool\n",
" :raises: :exc:`ImportError` if `py-bcrypt` was not found.\n",
" \"\"\"\n",
" bcrypt = self.get_bcrypt()\n",
" return bcrypt.hashpw(raw_password, self.value)==self.value"
] | [
0,
0.024691358024691357,
0,
0,
0,
0,
0,
0,
0,
0,
0.030303030303030304
] | 11 | 0.004999 |
def toggle(self, key):
""" Toggles a boolean key """
val = self[key]
assert isinstance(val, bool), 'key[%r] = %r is not a bool' % (key, val)
self.pref_update(key, not val) | [
"def",
"toggle",
"(",
"self",
",",
"key",
")",
":",
"val",
"=",
"self",
"[",
"key",
"]",
"assert",
"isinstance",
"(",
"val",
",",
"bool",
")",
",",
"'key[%r] = %r is not a bool'",
"%",
"(",
"key",
",",
"val",
")",
"self",
".",
"pref_update",
"(",
"key",
",",
"not",
"val",
")"
] | 39.8 | 0.009852 | [
"def toggle(self, key):\n",
" \"\"\" Toggles a boolean key \"\"\"\n",
" val = self[key]\n",
" assert isinstance(val, bool), 'key[%r] = %r is not a bool' % (key, val)\n",
" self.pref_update(key, not val)"
] | [
0,
0.02631578947368421,
0,
0,
0.02631578947368421
] | 5 | 0.010526 |
def models(cls, api_version=DEFAULT_API_VERSION):
"""Module depends on the API version:
* 2016-06-01: :mod:`v2016_06_01.models<azure.mgmt.resource.subscriptions.v2016_06_01.models>`
"""
if api_version == '2016-06-01':
from .v2016_06_01 import models
return models
raise NotImplementedError("APIVersion {} is not available".format(api_version)) | [
"def",
"models",
"(",
"cls",
",",
"api_version",
"=",
"DEFAULT_API_VERSION",
")",
":",
"if",
"api_version",
"==",
"'2016-06-01'",
":",
"from",
".",
"v2016_06_01",
"import",
"models",
"return",
"models",
"raise",
"NotImplementedError",
"(",
"\"APIVersion {} is not available\"",
".",
"format",
"(",
"api_version",
")",
")"
] | 44.777778 | 0.009732 | [
"def models(cls, api_version=DEFAULT_API_VERSION):\n",
" \"\"\"Module depends on the API version:\n",
"\n",
" * 2016-06-01: :mod:`v2016_06_01.models<azure.mgmt.resource.subscriptions.v2016_06_01.models>`\n",
" \"\"\"\n",
" if api_version == '2016-06-01':\n",
" from .v2016_06_01 import models\n",
" return models\n",
" raise NotImplementedError(\"APIVersion {} is not available\".format(api_version))"
] | [
0,
0.021739130434782608,
0,
0.009523809523809525,
0,
0,
0,
0,
0.022988505747126436
] | 9 | 0.006028 |
def strip_pad(hdu):
"""Remove the padding lines that CFHT adds to headers"""
l = hdu.header.ascardlist()
d = []
for index in range(len(l)):
if l[index].key in __comment_keys and str(l[index])==__cfht_padding:
d.append(index)
d.reverse()
for index in d:
del l[index]
return(0) | [
"def",
"strip_pad",
"(",
"hdu",
")",
":",
"l",
"=",
"hdu",
".",
"header",
".",
"ascardlist",
"(",
")",
"d",
"=",
"[",
"]",
"for",
"index",
"in",
"range",
"(",
"len",
"(",
"l",
")",
")",
":",
"if",
"l",
"[",
"index",
"]",
".",
"key",
"in",
"__comment_keys",
"and",
"str",
"(",
"l",
"[",
"index",
"]",
")",
"==",
"__cfht_padding",
":",
"d",
".",
"append",
"(",
"index",
")",
"d",
".",
"reverse",
"(",
")",
"for",
"index",
"in",
"d",
":",
"del",
"l",
"[",
"index",
"]",
"return",
"(",
"0",
")"
] | 27.083333 | 0.011905 | [
"def strip_pad(hdu):\n",
" \"\"\"Remove the padding lines that CFHT adds to headers\"\"\"\n",
" \n",
" l = hdu.header.ascardlist()\n",
" d = []\n",
" for index in range(len(l)):\n",
" if l[index].key in __comment_keys and str(l[index])==__cfht_padding:\n",
" d.append(index)\n",
" d.reverse()\n",
" for index in d:\n",
" del l[index]\n",
" return(0)"
] | [
0,
0,
0.2,
0.03125,
0,
0,
0.012987012987012988,
0,
0,
0,
0,
0.07692307692307693
] | 12 | 0.026763 |
def updateAccountValue(self, key, val, currency, accountName):
"""updateAccountValue(EWrapper self, IBString const & key, IBString const & val, IBString const & currency, IBString const & accountName)"""
return _swigibpy.EWrapper_updateAccountValue(self, key, val, currency, accountName) | [
"def",
"updateAccountValue",
"(",
"self",
",",
"key",
",",
"val",
",",
"currency",
",",
"accountName",
")",
":",
"return",
"_swigibpy",
".",
"EWrapper_updateAccountValue",
"(",
"self",
",",
"key",
",",
"val",
",",
"currency",
",",
"accountName",
")"
] | 100.333333 | 0.013201 | [
"def updateAccountValue(self, key, val, currency, accountName):\n",
" \"\"\"updateAccountValue(EWrapper self, IBString const & key, IBString const & val, IBString const & currency, IBString const & accountName)\"\"\"\n",
" return _swigibpy.EWrapper_updateAccountValue(self, key, val, currency, accountName)"
] | [
0,
0.013422818791946308,
0.02197802197802198
] | 3 | 0.0118 |
def get_model(model, ctx, opt):
"""Model initialization."""
kwargs = {'ctx': ctx, 'pretrained': opt.use_pretrained, 'classes': classes}
if model.startswith('resnet'):
kwargs['thumbnail'] = opt.use_thumbnail
elif model.startswith('vgg'):
kwargs['batch_norm'] = opt.batch_norm
net = models.get_model(model, **kwargs)
if opt.resume:
net.load_parameters(opt.resume)
elif not opt.use_pretrained:
if model in ['alexnet']:
net.initialize(mx.init.Normal())
else:
net.initialize(mx.init.Xavier(magnitude=2))
net.cast(opt.dtype)
return net | [
"def",
"get_model",
"(",
"model",
",",
"ctx",
",",
"opt",
")",
":",
"kwargs",
"=",
"{",
"'ctx'",
":",
"ctx",
",",
"'pretrained'",
":",
"opt",
".",
"use_pretrained",
",",
"'classes'",
":",
"classes",
"}",
"if",
"model",
".",
"startswith",
"(",
"'resnet'",
")",
":",
"kwargs",
"[",
"'thumbnail'",
"]",
"=",
"opt",
".",
"use_thumbnail",
"elif",
"model",
".",
"startswith",
"(",
"'vgg'",
")",
":",
"kwargs",
"[",
"'batch_norm'",
"]",
"=",
"opt",
".",
"batch_norm",
"net",
"=",
"models",
".",
"get_model",
"(",
"model",
",",
"*",
"*",
"kwargs",
")",
"if",
"opt",
".",
"resume",
":",
"net",
".",
"load_parameters",
"(",
"opt",
".",
"resume",
")",
"elif",
"not",
"opt",
".",
"use_pretrained",
":",
"if",
"model",
"in",
"[",
"'alexnet'",
"]",
":",
"net",
".",
"initialize",
"(",
"mx",
".",
"init",
".",
"Normal",
"(",
")",
")",
"else",
":",
"net",
".",
"initialize",
"(",
"mx",
".",
"init",
".",
"Xavier",
"(",
"magnitude",
"=",
"2",
")",
")",
"net",
".",
"cast",
"(",
"opt",
".",
"dtype",
")",
"return",
"net"
] | 34.055556 | 0.001587 | [
"def get_model(model, ctx, opt):\n",
" \"\"\"Model initialization.\"\"\"\n",
" kwargs = {'ctx': ctx, 'pretrained': opt.use_pretrained, 'classes': classes}\n",
" if model.startswith('resnet'):\n",
" kwargs['thumbnail'] = opt.use_thumbnail\n",
" elif model.startswith('vgg'):\n",
" kwargs['batch_norm'] = opt.batch_norm\n",
"\n",
" net = models.get_model(model, **kwargs)\n",
" if opt.resume:\n",
" net.load_parameters(opt.resume)\n",
" elif not opt.use_pretrained:\n",
" if model in ['alexnet']:\n",
" net.initialize(mx.init.Normal())\n",
" else:\n",
" net.initialize(mx.init.Xavier(magnitude=2))\n",
" net.cast(opt.dtype)\n",
" return net"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.07142857142857142
] | 18 | 0.003968 |
def spawn(self):
"""Spawn the fake executable using subprocess.Popen."""
self._process = subprocess.Popen(
[self.path], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
self.addCleanup(self._process_kill) | [
"def",
"spawn",
"(",
"self",
")",
":",
"self",
".",
"_process",
"=",
"subprocess",
".",
"Popen",
"(",
"[",
"self",
".",
"path",
"]",
",",
"stdout",
"=",
"subprocess",
".",
"PIPE",
",",
"stderr",
"=",
"subprocess",
".",
"STDOUT",
")",
"self",
".",
"addCleanup",
"(",
"self",
".",
"_process_kill",
")"
] | 47.4 | 0.008299 | [
"def spawn(self):\n",
" \"\"\"Spawn the fake executable using subprocess.Popen.\"\"\"\n",
" self._process = subprocess.Popen(\n",
" [self.path], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n",
" self.addCleanup(self._process_kill)"
] | [
0,
0.015625,
0,
0,
0.023255813953488372
] | 5 | 0.007776 |
async def release(data):
"""
Release a session
:param data: Information obtained from a POST request.
The content type is application/json.
The correct packet form should be as follows:
{
'token': UUID token from current session start
'command': 'release'
}
"""
global session
if not feature_flags.use_protocol_api_v2():
session.adapter.remove_instrument('left')
session.adapter.remove_instrument('right')
else:
session.adapter.cache_instruments()
session = None
return web.json_response({"message": "calibration session released"}) | [
"async",
"def",
"release",
"(",
"data",
")",
":",
"global",
"session",
"if",
"not",
"feature_flags",
".",
"use_protocol_api_v2",
"(",
")",
":",
"session",
".",
"adapter",
".",
"remove_instrument",
"(",
"'left'",
")",
"session",
".",
"adapter",
".",
"remove_instrument",
"(",
"'right'",
")",
"else",
":",
"session",
".",
"adapter",
".",
"cache_instruments",
"(",
")",
"session",
"=",
"None",
"return",
"web",
".",
"json_response",
"(",
"{",
"\"message\"",
":",
"\"calibration session released\"",
"}",
")"
] | 30.1 | 0.00161 | [
"async def release(data):\n",
" \"\"\"\n",
" Release a session\n",
"\n",
" :param data: Information obtained from a POST request.\n",
" The content type is application/json.\n",
" The correct packet form should be as follows:\n",
" {\n",
" 'token': UUID token from current session start\n",
" 'command': 'release'\n",
" }\n",
" \"\"\"\n",
" global session\n",
" if not feature_flags.use_protocol_api_v2():\n",
" session.adapter.remove_instrument('left')\n",
" session.adapter.remove_instrument('right')\n",
" else:\n",
" session.adapter.cache_instruments()\n",
" session = None\n",
" return web.json_response({\"message\": \"calibration session released\"})"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.0136986301369863
] | 20 | 0.000685 |
def list_nodes_min(call=None):
'''
Return a list of the VMs that are on the provider. Only a list of VM names,
and their state, is returned. This is the minimum amount of information
needed to check for existing VMs.
'''
if call == 'action':
raise SaltCloudSystemExit(
'The list_nodes_min function must be called with -f or --function.'
)
ret = {}
location = get_location()
params = {
'Action': 'DescribeInstanceStatus',
'RegionId': location,
}
nodes = query(params)
log.debug(
'Total %s instance found in Region %s',
nodes['TotalCount'], location
)
if 'Code' in nodes or nodes['TotalCount'] == 0:
return ret
for node in nodes['InstanceStatuses']['InstanceStatus']:
ret[node['InstanceId']] = {}
for item in node:
ret[node['InstanceId']][item] = node[item]
return ret | [
"def",
"list_nodes_min",
"(",
"call",
"=",
"None",
")",
":",
"if",
"call",
"==",
"'action'",
":",
"raise",
"SaltCloudSystemExit",
"(",
"'The list_nodes_min function must be called with -f or --function.'",
")",
"ret",
"=",
"{",
"}",
"location",
"=",
"get_location",
"(",
")",
"params",
"=",
"{",
"'Action'",
":",
"'DescribeInstanceStatus'",
",",
"'RegionId'",
":",
"location",
",",
"}",
"nodes",
"=",
"query",
"(",
"params",
")",
"log",
".",
"debug",
"(",
"'Total %s instance found in Region %s'",
",",
"nodes",
"[",
"'TotalCount'",
"]",
",",
"location",
")",
"if",
"'Code'",
"in",
"nodes",
"or",
"nodes",
"[",
"'TotalCount'",
"]",
"==",
"0",
":",
"return",
"ret",
"for",
"node",
"in",
"nodes",
"[",
"'InstanceStatuses'",
"]",
"[",
"'InstanceStatus'",
"]",
":",
"ret",
"[",
"node",
"[",
"'InstanceId'",
"]",
"]",
"=",
"{",
"}",
"for",
"item",
"in",
"node",
":",
"ret",
"[",
"node",
"[",
"'InstanceId'",
"]",
"]",
"[",
"item",
"]",
"=",
"node",
"[",
"item",
"]",
"return",
"ret"
] | 28.09375 | 0.001075 | [
"def list_nodes_min(call=None):\n",
" '''\n",
" Return a list of the VMs that are on the provider. Only a list of VM names,\n",
" and their state, is returned. This is the minimum amount of information\n",
" needed to check for existing VMs.\n",
" '''\n",
" if call == 'action':\n",
" raise SaltCloudSystemExit(\n",
" 'The list_nodes_min function must be called with -f or --function.'\n",
" )\n",
"\n",
" ret = {}\n",
" location = get_location()\n",
" params = {\n",
" 'Action': 'DescribeInstanceStatus',\n",
" 'RegionId': location,\n",
" }\n",
" nodes = query(params)\n",
"\n",
" log.debug(\n",
" 'Total %s instance found in Region %s',\n",
" nodes['TotalCount'], location\n",
" )\n",
" if 'Code' in nodes or nodes['TotalCount'] == 0:\n",
" return ret\n",
"\n",
" for node in nodes['InstanceStatuses']['InstanceStatus']:\n",
" ret[node['InstanceId']] = {}\n",
" for item in node:\n",
" ret[node['InstanceId']][item] = node[item]\n",
"\n",
" return ret"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.07142857142857142
] | 32 | 0.002232 |
def _find_topics_with_wrong_rp(topics, zk, default_min_isr):
"""Returns topics with wrong replication factor."""
topics_with_wrong_rf = []
for topic_name, partitions in topics.items():
min_isr = get_min_isr(zk, topic_name) or default_min_isr
replication_factor = len(partitions[0].replicas)
if replication_factor >= min_isr + 1:
continue
topics_with_wrong_rf.append({
'replication_factor': replication_factor,
'min_isr': min_isr,
'topic': topic_name,
})
return topics_with_wrong_rf | [
"def",
"_find_topics_with_wrong_rp",
"(",
"topics",
",",
"zk",
",",
"default_min_isr",
")",
":",
"topics_with_wrong_rf",
"=",
"[",
"]",
"for",
"topic_name",
",",
"partitions",
"in",
"topics",
".",
"items",
"(",
")",
":",
"min_isr",
"=",
"get_min_isr",
"(",
"zk",
",",
"topic_name",
")",
"or",
"default_min_isr",
"replication_factor",
"=",
"len",
"(",
"partitions",
"[",
"0",
"]",
".",
"replicas",
")",
"if",
"replication_factor",
">=",
"min_isr",
"+",
"1",
":",
"continue",
"topics_with_wrong_rf",
".",
"append",
"(",
"{",
"'replication_factor'",
":",
"replication_factor",
",",
"'min_isr'",
":",
"min_isr",
",",
"'topic'",
":",
"topic_name",
",",
"}",
")",
"return",
"topics_with_wrong_rf"
] | 31.777778 | 0.001698 | [
"def _find_topics_with_wrong_rp(topics, zk, default_min_isr):\n",
" \"\"\"Returns topics with wrong replication factor.\"\"\"\n",
" topics_with_wrong_rf = []\n",
"\n",
" for topic_name, partitions in topics.items():\n",
" min_isr = get_min_isr(zk, topic_name) or default_min_isr\n",
" replication_factor = len(partitions[0].replicas)\n",
"\n",
" if replication_factor >= min_isr + 1:\n",
" continue\n",
"\n",
" topics_with_wrong_rf.append({\n",
" 'replication_factor': replication_factor,\n",
" 'min_isr': min_isr,\n",
" 'topic': topic_name,\n",
" })\n",
"\n",
" return topics_with_wrong_rf"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.03225806451612903
] | 18 | 0.001792 |
def simulate_run(res, rstate=None, return_idx=False, approx=False):
"""
Probes **combined uncertainties** (statistical and sampling) on a nested
sampling run by wrapping :meth:`jitter_run` and :meth:`resample_run`.
Parameters
----------
res : :class:`~dynesty.results.Results` instance
The :class:`~dynesty.results.Results` instance taken from a previous
nested sampling run.
rstate : `~numpy.random.RandomState`, optional
`~numpy.random.RandomState` instance.
return_idx : bool, optional
Whether to return the list of resampled indices used to construct
the new run. Default is `False`.
approx : bool, optional
Whether to approximate all sets of uniform order statistics by their
associated marginals (from the Beta distribution). Default is `False`.
Returns
-------
new_res : :class:`~dynesty.results.Results` instance
A new :class:`~dynesty.results.Results` instance with corresponding
samples and weights based on our "simulated" samples and
prior volumes.
"""
if rstate is None:
rstate = np.random
# Resample run.
new_res, samp_idx = resample_run(res, rstate=rstate, return_idx=True)
# Jitter run.
new_res = jitter_run(new_res, rstate=rstate, approx=approx)
if return_idx:
return new_res, samp_idx
else:
return new_res | [
"def",
"simulate_run",
"(",
"res",
",",
"rstate",
"=",
"None",
",",
"return_idx",
"=",
"False",
",",
"approx",
"=",
"False",
")",
":",
"if",
"rstate",
"is",
"None",
":",
"rstate",
"=",
"np",
".",
"random",
"# Resample run.",
"new_res",
",",
"samp_idx",
"=",
"resample_run",
"(",
"res",
",",
"rstate",
"=",
"rstate",
",",
"return_idx",
"=",
"True",
")",
"# Jitter run.",
"new_res",
"=",
"jitter_run",
"(",
"new_res",
",",
"rstate",
"=",
"rstate",
",",
"approx",
"=",
"approx",
")",
"if",
"return_idx",
":",
"return",
"new_res",
",",
"samp_idx",
"else",
":",
"return",
"new_res"
] | 31.227273 | 0.000706 | [
"def simulate_run(res, rstate=None, return_idx=False, approx=False):\n",
" \"\"\"\n",
" Probes **combined uncertainties** (statistical and sampling) on a nested\n",
" sampling run by wrapping :meth:`jitter_run` and :meth:`resample_run`.\n",
"\n",
" Parameters\n",
" ----------\n",
" res : :class:`~dynesty.results.Results` instance\n",
" The :class:`~dynesty.results.Results` instance taken from a previous\n",
" nested sampling run.\n",
"\n",
" rstate : `~numpy.random.RandomState`, optional\n",
" `~numpy.random.RandomState` instance.\n",
"\n",
" return_idx : bool, optional\n",
" Whether to return the list of resampled indices used to construct\n",
" the new run. Default is `False`.\n",
"\n",
" approx : bool, optional\n",
" Whether to approximate all sets of uniform order statistics by their\n",
" associated marginals (from the Beta distribution). Default is `False`.\n",
"\n",
" Returns\n",
" -------\n",
" new_res : :class:`~dynesty.results.Results` instance\n",
" A new :class:`~dynesty.results.Results` instance with corresponding\n",
" samples and weights based on our \"simulated\" samples and\n",
" prior volumes.\n",
"\n",
" \"\"\"\n",
"\n",
" if rstate is None:\n",
" rstate = np.random\n",
"\n",
" # Resample run.\n",
" new_res, samp_idx = resample_run(res, rstate=rstate, return_idx=True)\n",
"\n",
" # Jitter run.\n",
" new_res = jitter_run(new_res, rstate=rstate, approx=approx)\n",
"\n",
" if return_idx:\n",
" return new_res, samp_idx\n",
" else:\n",
" return new_res"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.045454545454545456
] | 44 | 0.001033 |
def load(cls, data):
"""Construct a Constant class from it's dict data.
.. versionadded:: 0.0.2
"""
if len(data) == 1:
for key, value in data.items():
if "__classname__" not in value: # pragma: no cover
raise ValueError
name = key
bases = (Constant,)
attrs = dict()
for k, v in value.items():
if isinstance(v, dict):
if "__classname__" in v:
attrs[k] = cls.load({k: v})
else:
attrs[k] = v
else:
attrs[k] = v
return type(name, bases, attrs)
else: # pragma: no cover
raise ValueError | [
"def",
"load",
"(",
"cls",
",",
"data",
")",
":",
"if",
"len",
"(",
"data",
")",
"==",
"1",
":",
"for",
"key",
",",
"value",
"in",
"data",
".",
"items",
"(",
")",
":",
"if",
"\"__classname__\"",
"not",
"in",
"value",
":",
"# pragma: no cover",
"raise",
"ValueError",
"name",
"=",
"key",
"bases",
"=",
"(",
"Constant",
",",
")",
"attrs",
"=",
"dict",
"(",
")",
"for",
"k",
",",
"v",
"in",
"value",
".",
"items",
"(",
")",
":",
"if",
"isinstance",
"(",
"v",
",",
"dict",
")",
":",
"if",
"\"__classname__\"",
"in",
"v",
":",
"attrs",
"[",
"k",
"]",
"=",
"cls",
".",
"load",
"(",
"{",
"k",
":",
"v",
"}",
")",
"else",
":",
"attrs",
"[",
"k",
"]",
"=",
"v",
"else",
":",
"attrs",
"[",
"k",
"]",
"=",
"v",
"return",
"type",
"(",
"name",
",",
"bases",
",",
"attrs",
")",
"else",
":",
"# pragma: no cover",
"raise",
"ValueError"
] | 35.043478 | 0.002415 | [
"def load(cls, data):\n",
" \"\"\"Construct a Constant class from it's dict data.\n",
"\n",
" .. versionadded:: 0.0.2\n",
" \"\"\"\n",
" if len(data) == 1:\n",
" for key, value in data.items():\n",
" if \"__classname__\" not in value: # pragma: no cover\n",
" raise ValueError\n",
" name = key\n",
" bases = (Constant,)\n",
" attrs = dict()\n",
" for k, v in value.items():\n",
" if isinstance(v, dict):\n",
" if \"__classname__\" in v:\n",
" attrs[k] = cls.load({k: v})\n",
" else:\n",
" attrs[k] = v\n",
" else:\n",
" attrs[k] = v\n",
" return type(name, bases, attrs)\n",
" else: # pragma: no cover\n",
" raise ValueError"
] | [
0,
0.01694915254237288,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.03571428571428571
] | 23 | 0.00229 |
def format_by_pattern(numobj, number_format, user_defined_formats):
"""Formats a phone number using client-defined formatting rules."
Note that if the phone number has a country calling code of zero or an
otherwise invalid country calling code, we cannot work out things like
whether there should be a national prefix applied, or how to format
extensions, so we return the national significant number with no
formatting applied.
Arguments:
numobj -- The phone number to be formatted
num_format -- The format the phone number should be formatted into
user_defined_formats -- formatting rules specified by clients
Returns the formatted phone number.
"""
country_code = numobj.country_code
nsn = national_significant_number(numobj)
if not _has_valid_country_calling_code(country_code):
return nsn
# Note region_code_for_country_code() is used because formatting
# information for regions which share a country calling code is contained
# by only one region for performance reasons. For example, for NANPA
# regions it will be contained in the metadata for US.
region_code = region_code_for_country_code(country_code)
# Metadata cannot be None because the country calling code is valid.
metadata = PhoneMetadata.metadata_for_region_or_calling_code(country_code, region_code)
formatted_number = U_EMPTY_STRING
formatting_pattern = _choose_formatting_pattern_for_number(user_defined_formats, nsn)
if formatting_pattern is None:
# If no pattern above is matched, we format the number as a whole.
formatted_number = nsn
else:
num_format_copy = _copy_number_format(formatting_pattern)
# Before we do a replacement of the national prefix pattern $NP with
# the national prefix, we need to copy the rule so that subsequent
# replacements for different numbers have the appropriate national
# prefix.
np_formatting_rule = formatting_pattern.national_prefix_formatting_rule
if np_formatting_rule:
national_prefix = metadata.national_prefix
if national_prefix:
# Replace $NP with national prefix and $FG with the first
# group (\1) matcher.
np_formatting_rule = np_formatting_rule.replace(_NP_STRING, national_prefix)
np_formatting_rule = np_formatting_rule.replace(_FG_STRING, unicod("\\1"))
num_format_copy.national_prefix_formatting_rule = np_formatting_rule
else:
# We don't want to have a rule for how to format the national
# prefix if there isn't one.
num_format_copy.national_prefix_formatting_rule = None
formatted_number = _format_nsn_using_pattern(nsn, num_format_copy, number_format)
formatted_number = _maybe_append_formatted_extension(numobj,
metadata,
number_format,
formatted_number)
formatted_number = _prefix_number_with_country_calling_code(country_code,
number_format,
formatted_number)
return formatted_number | [
"def",
"format_by_pattern",
"(",
"numobj",
",",
"number_format",
",",
"user_defined_formats",
")",
":",
"country_code",
"=",
"numobj",
".",
"country_code",
"nsn",
"=",
"national_significant_number",
"(",
"numobj",
")",
"if",
"not",
"_has_valid_country_calling_code",
"(",
"country_code",
")",
":",
"return",
"nsn",
"# Note region_code_for_country_code() is used because formatting",
"# information for regions which share a country calling code is contained",
"# by only one region for performance reasons. For example, for NANPA",
"# regions it will be contained in the metadata for US.",
"region_code",
"=",
"region_code_for_country_code",
"(",
"country_code",
")",
"# Metadata cannot be None because the country calling code is valid.",
"metadata",
"=",
"PhoneMetadata",
".",
"metadata_for_region_or_calling_code",
"(",
"country_code",
",",
"region_code",
")",
"formatted_number",
"=",
"U_EMPTY_STRING",
"formatting_pattern",
"=",
"_choose_formatting_pattern_for_number",
"(",
"user_defined_formats",
",",
"nsn",
")",
"if",
"formatting_pattern",
"is",
"None",
":",
"# If no pattern above is matched, we format the number as a whole.",
"formatted_number",
"=",
"nsn",
"else",
":",
"num_format_copy",
"=",
"_copy_number_format",
"(",
"formatting_pattern",
")",
"# Before we do a replacement of the national prefix pattern $NP with",
"# the national prefix, we need to copy the rule so that subsequent",
"# replacements for different numbers have the appropriate national",
"# prefix.",
"np_formatting_rule",
"=",
"formatting_pattern",
".",
"national_prefix_formatting_rule",
"if",
"np_formatting_rule",
":",
"national_prefix",
"=",
"metadata",
".",
"national_prefix",
"if",
"national_prefix",
":",
"# Replace $NP with national prefix and $FG with the first",
"# group (\\1) matcher.",
"np_formatting_rule",
"=",
"np_formatting_rule",
".",
"replace",
"(",
"_NP_STRING",
",",
"national_prefix",
")",
"np_formatting_rule",
"=",
"np_formatting_rule",
".",
"replace",
"(",
"_FG_STRING",
",",
"unicod",
"(",
"\"\\\\1\"",
")",
")",
"num_format_copy",
".",
"national_prefix_formatting_rule",
"=",
"np_formatting_rule",
"else",
":",
"# We don't want to have a rule for how to format the national",
"# prefix if there isn't one.",
"num_format_copy",
".",
"national_prefix_formatting_rule",
"=",
"None",
"formatted_number",
"=",
"_format_nsn_using_pattern",
"(",
"nsn",
",",
"num_format_copy",
",",
"number_format",
")",
"formatted_number",
"=",
"_maybe_append_formatted_extension",
"(",
"numobj",
",",
"metadata",
",",
"number_format",
",",
"formatted_number",
")",
"formatted_number",
"=",
"_prefix_number_with_country_calling_code",
"(",
"country_code",
",",
"number_format",
",",
"formatted_number",
")",
"return",
"formatted_number"
] | 54.540984 | 0.002362 | [
"def format_by_pattern(numobj, number_format, user_defined_formats):\n",
" \"\"\"Formats a phone number using client-defined formatting rules.\"\n",
"\n",
" Note that if the phone number has a country calling code of zero or an\n",
" otherwise invalid country calling code, we cannot work out things like\n",
" whether there should be a national prefix applied, or how to format\n",
" extensions, so we return the national significant number with no\n",
" formatting applied.\n",
"\n",
" Arguments:\n",
" numobj -- The phone number to be formatted\n",
" num_format -- The format the phone number should be formatted into\n",
" user_defined_formats -- formatting rules specified by clients\n",
"\n",
" Returns the formatted phone number.\n",
" \"\"\"\n",
" country_code = numobj.country_code\n",
" nsn = national_significant_number(numobj)\n",
" if not _has_valid_country_calling_code(country_code):\n",
" return nsn\n",
" # Note region_code_for_country_code() is used because formatting\n",
" # information for regions which share a country calling code is contained\n",
" # by only one region for performance reasons. For example, for NANPA\n",
" # regions it will be contained in the metadata for US.\n",
" region_code = region_code_for_country_code(country_code)\n",
" # Metadata cannot be None because the country calling code is valid.\n",
" metadata = PhoneMetadata.metadata_for_region_or_calling_code(country_code, region_code)\n",
"\n",
" formatted_number = U_EMPTY_STRING\n",
" formatting_pattern = _choose_formatting_pattern_for_number(user_defined_formats, nsn)\n",
" if formatting_pattern is None:\n",
" # If no pattern above is matched, we format the number as a whole.\n",
" formatted_number = nsn\n",
" else:\n",
" num_format_copy = _copy_number_format(formatting_pattern)\n",
" # Before we do a replacement of the national prefix pattern $NP with\n",
" # the national prefix, we need to copy the rule so that subsequent\n",
" # replacements for different numbers have the appropriate national\n",
" # prefix.\n",
" np_formatting_rule = formatting_pattern.national_prefix_formatting_rule\n",
" if np_formatting_rule:\n",
" national_prefix = metadata.national_prefix\n",
" if national_prefix:\n",
" # Replace $NP with national prefix and $FG with the first\n",
" # group (\\1) matcher.\n",
" np_formatting_rule = np_formatting_rule.replace(_NP_STRING, national_prefix)\n",
" np_formatting_rule = np_formatting_rule.replace(_FG_STRING, unicod(\"\\\\1\"))\n",
" num_format_copy.national_prefix_formatting_rule = np_formatting_rule\n",
" else:\n",
" # We don't want to have a rule for how to format the national\n",
" # prefix if there isn't one.\n",
" num_format_copy.national_prefix_formatting_rule = None\n",
" formatted_number = _format_nsn_using_pattern(nsn, num_format_copy, number_format)\n",
" formatted_number = _maybe_append_formatted_extension(numobj,\n",
" metadata,\n",
" number_format,\n",
" formatted_number)\n",
" formatted_number = _prefix_number_with_country_calling_code(country_code,\n",
" number_format,\n",
" formatted_number)\n",
" return formatted_number"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.010869565217391304,
0,
0,
0.011111111111111112,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.010752688172043012,
0.01098901098901099,
0.011764705882352941,
0,
0,
0,
0,
0.011111111111111112,
0,
0,
0,
0,
0,
0,
0.012195121951219513,
0.037037037037037035
] | 61 | 0.001899 |
def findpath(self, points, curvature=1.0):
"""Constructs a path between the given list of points.
Interpolates the list of points and determines
a smooth bezier path betweem them.
The curvature parameter offers some control on
how separate segments are stitched together:
from straight angles to smooth curves.
Curvature is only useful if the path has more than three points.
"""
# The list of points consists of Point objects,
# but it shouldn't crash on something straightforward
# as someone supplying a list of (x,y)-tuples.
for i, pt in enumerate(points):
if type(pt) == TupleType:
points[i] = Point(pt[0], pt[1])
if len(points) == 0:
return None
if len(points) == 1:
path = self.BezierPath(None)
path.moveto(points[0].x, points[0].y)
return path
if len(points) == 2:
path = self.BezierPath(None)
path.moveto(points[0].x, points[0].y)
path.lineto(points[1].x, points[1].y)
return path
# Zero curvature means straight lines.
curvature = max(0, min(1, curvature))
if curvature == 0:
path = self.BezierPath(None)
path.moveto(points[0].x, points[0].y)
for i in range(len(points)):
path.lineto(points[i].x, points[i].y)
return path
curvature = 4 + (1.0 - curvature) * 40
dx = {0: 0, len(points) - 1: 0}
dy = {0: 0, len(points) - 1: 0}
bi = {1: -0.25}
ax = {1: (points[2].x - points[0].x - dx[0]) / 4}
ay = {1: (points[2].y - points[0].y - dy[0]) / 4}
for i in range(2, len(points) - 1):
bi[i] = -1 / (curvature + bi[i - 1])
ax[i] = -(points[i + 1].x - points[i - 1].x - ax[i - 1]) * bi[i]
ay[i] = -(points[i + 1].y - points[i - 1].y - ay[i - 1]) * bi[i]
r = range(1, len(points) - 1)
r.reverse()
for i in r:
dx[i] = ax[i] + dx[i + 1] * bi[i]
dy[i] = ay[i] + dy[i + 1] * bi[i]
path = self.BezierPath(None)
path.moveto(points[0].x, points[0].y)
for i in range(len(points) - 1):
path.curveto(points[i].x + dx[i],
points[i].y + dy[i],
points[i + 1].x - dx[i + 1],
points[i + 1].y - dy[i + 1],
points[i + 1].x,
points[i + 1].y)
return path | [
"def",
"findpath",
"(",
"self",
",",
"points",
",",
"curvature",
"=",
"1.0",
")",
":",
"# The list of points consists of Point objects,",
"# but it shouldn't crash on something straightforward",
"# as someone supplying a list of (x,y)-tuples.",
"for",
"i",
",",
"pt",
"in",
"enumerate",
"(",
"points",
")",
":",
"if",
"type",
"(",
"pt",
")",
"==",
"TupleType",
":",
"points",
"[",
"i",
"]",
"=",
"Point",
"(",
"pt",
"[",
"0",
"]",
",",
"pt",
"[",
"1",
"]",
")",
"if",
"len",
"(",
"points",
")",
"==",
"0",
":",
"return",
"None",
"if",
"len",
"(",
"points",
")",
"==",
"1",
":",
"path",
"=",
"self",
".",
"BezierPath",
"(",
"None",
")",
"path",
".",
"moveto",
"(",
"points",
"[",
"0",
"]",
".",
"x",
",",
"points",
"[",
"0",
"]",
".",
"y",
")",
"return",
"path",
"if",
"len",
"(",
"points",
")",
"==",
"2",
":",
"path",
"=",
"self",
".",
"BezierPath",
"(",
"None",
")",
"path",
".",
"moveto",
"(",
"points",
"[",
"0",
"]",
".",
"x",
",",
"points",
"[",
"0",
"]",
".",
"y",
")",
"path",
".",
"lineto",
"(",
"points",
"[",
"1",
"]",
".",
"x",
",",
"points",
"[",
"1",
"]",
".",
"y",
")",
"return",
"path",
"# Zero curvature means straight lines.",
"curvature",
"=",
"max",
"(",
"0",
",",
"min",
"(",
"1",
",",
"curvature",
")",
")",
"if",
"curvature",
"==",
"0",
":",
"path",
"=",
"self",
".",
"BezierPath",
"(",
"None",
")",
"path",
".",
"moveto",
"(",
"points",
"[",
"0",
"]",
".",
"x",
",",
"points",
"[",
"0",
"]",
".",
"y",
")",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"points",
")",
")",
":",
"path",
".",
"lineto",
"(",
"points",
"[",
"i",
"]",
".",
"x",
",",
"points",
"[",
"i",
"]",
".",
"y",
")",
"return",
"path",
"curvature",
"=",
"4",
"+",
"(",
"1.0",
"-",
"curvature",
")",
"*",
"40",
"dx",
"=",
"{",
"0",
":",
"0",
",",
"len",
"(",
"points",
")",
"-",
"1",
":",
"0",
"}",
"dy",
"=",
"{",
"0",
":",
"0",
",",
"len",
"(",
"points",
")",
"-",
"1",
":",
"0",
"}",
"bi",
"=",
"{",
"1",
":",
"-",
"0.25",
"}",
"ax",
"=",
"{",
"1",
":",
"(",
"points",
"[",
"2",
"]",
".",
"x",
"-",
"points",
"[",
"0",
"]",
".",
"x",
"-",
"dx",
"[",
"0",
"]",
")",
"/",
"4",
"}",
"ay",
"=",
"{",
"1",
":",
"(",
"points",
"[",
"2",
"]",
".",
"y",
"-",
"points",
"[",
"0",
"]",
".",
"y",
"-",
"dy",
"[",
"0",
"]",
")",
"/",
"4",
"}",
"for",
"i",
"in",
"range",
"(",
"2",
",",
"len",
"(",
"points",
")",
"-",
"1",
")",
":",
"bi",
"[",
"i",
"]",
"=",
"-",
"1",
"/",
"(",
"curvature",
"+",
"bi",
"[",
"i",
"-",
"1",
"]",
")",
"ax",
"[",
"i",
"]",
"=",
"-",
"(",
"points",
"[",
"i",
"+",
"1",
"]",
".",
"x",
"-",
"points",
"[",
"i",
"-",
"1",
"]",
".",
"x",
"-",
"ax",
"[",
"i",
"-",
"1",
"]",
")",
"*",
"bi",
"[",
"i",
"]",
"ay",
"[",
"i",
"]",
"=",
"-",
"(",
"points",
"[",
"i",
"+",
"1",
"]",
".",
"y",
"-",
"points",
"[",
"i",
"-",
"1",
"]",
".",
"y",
"-",
"ay",
"[",
"i",
"-",
"1",
"]",
")",
"*",
"bi",
"[",
"i",
"]",
"r",
"=",
"range",
"(",
"1",
",",
"len",
"(",
"points",
")",
"-",
"1",
")",
"r",
".",
"reverse",
"(",
")",
"for",
"i",
"in",
"r",
":",
"dx",
"[",
"i",
"]",
"=",
"ax",
"[",
"i",
"]",
"+",
"dx",
"[",
"i",
"+",
"1",
"]",
"*",
"bi",
"[",
"i",
"]",
"dy",
"[",
"i",
"]",
"=",
"ay",
"[",
"i",
"]",
"+",
"dy",
"[",
"i",
"+",
"1",
"]",
"*",
"bi",
"[",
"i",
"]",
"path",
"=",
"self",
".",
"BezierPath",
"(",
"None",
")",
"path",
".",
"moveto",
"(",
"points",
"[",
"0",
"]",
".",
"x",
",",
"points",
"[",
"0",
"]",
".",
"y",
")",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"points",
")",
"-",
"1",
")",
":",
"path",
".",
"curveto",
"(",
"points",
"[",
"i",
"]",
".",
"x",
"+",
"dx",
"[",
"i",
"]",
",",
"points",
"[",
"i",
"]",
".",
"y",
"+",
"dy",
"[",
"i",
"]",
",",
"points",
"[",
"i",
"+",
"1",
"]",
".",
"x",
"-",
"dx",
"[",
"i",
"+",
"1",
"]",
",",
"points",
"[",
"i",
"+",
"1",
"]",
".",
"y",
"-",
"dy",
"[",
"i",
"+",
"1",
"]",
",",
"points",
"[",
"i",
"+",
"1",
"]",
".",
"x",
",",
"points",
"[",
"i",
"+",
"1",
"]",
".",
"y",
")",
"return",
"path"
] | 34.493151 | 0.000772 | [
"def findpath(self, points, curvature=1.0):\n",
"\n",
" \"\"\"Constructs a path between the given list of points.\n",
"\n",
" Interpolates the list of points and determines\n",
" a smooth bezier path betweem them.\n",
"\n",
" The curvature parameter offers some control on\n",
" how separate segments are stitched together:\n",
" from straight angles to smooth curves.\n",
" Curvature is only useful if the path has more than three points.\n",
" \"\"\"\n",
"\n",
" # The list of points consists of Point objects,\n",
" # but it shouldn't crash on something straightforward\n",
" # as someone supplying a list of (x,y)-tuples.\n",
"\n",
" for i, pt in enumerate(points):\n",
" if type(pt) == TupleType:\n",
" points[i] = Point(pt[0], pt[1])\n",
"\n",
" if len(points) == 0:\n",
" return None\n",
" if len(points) == 1:\n",
" path = self.BezierPath(None)\n",
" path.moveto(points[0].x, points[0].y)\n",
" return path\n",
" if len(points) == 2:\n",
" path = self.BezierPath(None)\n",
" path.moveto(points[0].x, points[0].y)\n",
" path.lineto(points[1].x, points[1].y)\n",
" return path\n",
"\n",
" # Zero curvature means straight lines.\n",
"\n",
" curvature = max(0, min(1, curvature))\n",
" if curvature == 0:\n",
" path = self.BezierPath(None)\n",
" path.moveto(points[0].x, points[0].y)\n",
" for i in range(len(points)):\n",
" path.lineto(points[i].x, points[i].y)\n",
" return path\n",
"\n",
" curvature = 4 + (1.0 - curvature) * 40\n",
"\n",
" dx = {0: 0, len(points) - 1: 0}\n",
" dy = {0: 0, len(points) - 1: 0}\n",
" bi = {1: -0.25}\n",
" ax = {1: (points[2].x - points[0].x - dx[0]) / 4}\n",
" ay = {1: (points[2].y - points[0].y - dy[0]) / 4}\n",
"\n",
" for i in range(2, len(points) - 1):\n",
" bi[i] = -1 / (curvature + bi[i - 1])\n",
" ax[i] = -(points[i + 1].x - points[i - 1].x - ax[i - 1]) * bi[i]\n",
" ay[i] = -(points[i + 1].y - points[i - 1].y - ay[i - 1]) * bi[i]\n",
"\n",
" r = range(1, len(points) - 1)\n",
" r.reverse()\n",
" for i in r:\n",
" dx[i] = ax[i] + dx[i + 1] * bi[i]\n",
" dy[i] = ay[i] + dy[i + 1] * bi[i]\n",
"\n",
" path = self.BezierPath(None)\n",
" path.moveto(points[0].x, points[0].y)\n",
" for i in range(len(points) - 1):\n",
" path.curveto(points[i].x + dx[i],\n",
" points[i].y + dy[i],\n",
" points[i + 1].x - dx[i + 1],\n",
" points[i + 1].y - dy[i + 1],\n",
" points[i + 1].x,\n",
" points[i + 1].y)\n",
"\n",
" return path"
] | [
0,
0,
0.015873015873015872,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.05263157894736842
] | 73 | 0.000938 |
def runStickyEregressionsInStata(infile_name,interval_size,meas_err,sticky,all_specs,stata_exe):
'''
Runs regressions for the main tables of the StickyC paper in Stata and produces a
LaTeX table with results for one "panel". Running in Stata allows production of
the KP-statistic, for which there is currently no command in statsmodels.api.
Parameters
----------
infile_name : str
Name of tab-delimited text file with simulation data. Assumed to be in
the results directory, and was almost surely generated by makeStickyEdataFile
unless we resort to fabricating simulated data. THAT'S A JOKE, FUTURE REFEREES.
interval_size : int
Number of periods in each regression sample (or interval).
meas_err : bool
Indicator for whether to add measurement error to DeltaLogC.
sticky : bool
Indicator for whether these results used sticky expectations.
all_specs : bool
Indicator for whether this panel should include all specifications or
just the OLS on lagged consumption growth.
stata_exe : str
Absolute location where the Stata executable can be found on the computer
running this code. Usually set at the top of StickyEparams.py.
Returns
-------
panel_text : str
String with one panel's worth of LaTeX input.
'''
dofile = "StickyETimeSeries.do"
infile_name_full = os.path.abspath(results_dir + infile_name + ".txt")
temp_name_full = os.path.abspath(results_dir + "temp.txt")
if meas_err:
meas_err_stata = 1
else:
meas_err_stata = 0
# Define the command to run the Stata do file
cmd = [stata_exe, "do", dofile, infile_name_full, temp_name_full, str(interval_size), str(meas_err_stata)]
# Run Stata do-file
stata_status = subprocess.call(cmd,shell = 'true')
if stata_status!=0:
raise ValueError('Stata code could not run. Check the stata_exe in StickyEparams.py')
stata_output = pd.read_csv(temp_name_full, sep=',',header=0)
# Make results table and return it
panel_text = makeResultsPanel(Coeffs=stata_output.CoeffsArray,
StdErrs=stata_output.StdErrArray,
Rsq=stata_output.RsqArray,
Pvals=stata_output.PvalArray,
OID=stata_output.OIDarray,
Counts=stata_output.ExtraInfo,
meas_err=meas_err,
sticky=sticky,
all_specs=all_specs)
return panel_text | [
"def",
"runStickyEregressionsInStata",
"(",
"infile_name",
",",
"interval_size",
",",
"meas_err",
",",
"sticky",
",",
"all_specs",
",",
"stata_exe",
")",
":",
"dofile",
"=",
"\"StickyETimeSeries.do\"",
"infile_name_full",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"results_dir",
"+",
"infile_name",
"+",
"\".txt\"",
")",
"temp_name_full",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"results_dir",
"+",
"\"temp.txt\"",
")",
"if",
"meas_err",
":",
"meas_err_stata",
"=",
"1",
"else",
":",
"meas_err_stata",
"=",
"0",
"# Define the command to run the Stata do file",
"cmd",
"=",
"[",
"stata_exe",
",",
"\"do\"",
",",
"dofile",
",",
"infile_name_full",
",",
"temp_name_full",
",",
"str",
"(",
"interval_size",
")",
",",
"str",
"(",
"meas_err_stata",
")",
"]",
"# Run Stata do-file",
"stata_status",
"=",
"subprocess",
".",
"call",
"(",
"cmd",
",",
"shell",
"=",
"'true'",
")",
"if",
"stata_status",
"!=",
"0",
":",
"raise",
"ValueError",
"(",
"'Stata code could not run. Check the stata_exe in StickyEparams.py'",
")",
"stata_output",
"=",
"pd",
".",
"read_csv",
"(",
"temp_name_full",
",",
"sep",
"=",
"','",
",",
"header",
"=",
"0",
")",
"# Make results table and return it",
"panel_text",
"=",
"makeResultsPanel",
"(",
"Coeffs",
"=",
"stata_output",
".",
"CoeffsArray",
",",
"StdErrs",
"=",
"stata_output",
".",
"StdErrArray",
",",
"Rsq",
"=",
"stata_output",
".",
"RsqArray",
",",
"Pvals",
"=",
"stata_output",
".",
"PvalArray",
",",
"OID",
"=",
"stata_output",
".",
"OIDarray",
",",
"Counts",
"=",
"stata_output",
".",
"ExtraInfo",
",",
"meas_err",
"=",
"meas_err",
",",
"sticky",
"=",
"sticky",
",",
"all_specs",
"=",
"all_specs",
")",
"return",
"panel_text"
] | 42.896552 | 0.011002 | [
"def runStickyEregressionsInStata(infile_name,interval_size,meas_err,sticky,all_specs,stata_exe):\n",
" '''\n",
" Runs regressions for the main tables of the StickyC paper in Stata and produces a\n",
" LaTeX table with results for one \"panel\". Running in Stata allows production of\n",
" the KP-statistic, for which there is currently no command in statsmodels.api.\n",
"\n",
" Parameters\n",
" ----------\n",
" infile_name : str\n",
" Name of tab-delimited text file with simulation data. Assumed to be in\n",
" the results directory, and was almost surely generated by makeStickyEdataFile\n",
" unless we resort to fabricating simulated data. THAT'S A JOKE, FUTURE REFEREES.\n",
" interval_size : int\n",
" Number of periods in each regression sample (or interval).\n",
" meas_err : bool\n",
" Indicator for whether to add measurement error to DeltaLogC.\n",
" sticky : bool\n",
" Indicator for whether these results used sticky expectations.\n",
" all_specs : bool\n",
" Indicator for whether this panel should include all specifications or\n",
" just the OLS on lagged consumption growth.\n",
" stata_exe : str\n",
" Absolute location where the Stata executable can be found on the computer\n",
" running this code. Usually set at the top of StickyEparams.py.\n",
"\n",
" Returns\n",
" -------\n",
" panel_text : str\n",
" String with one panel's worth of LaTeX input.\n",
" '''\n",
" dofile = \"StickyETimeSeries.do\"\n",
" infile_name_full = os.path.abspath(results_dir + infile_name + \".txt\")\n",
" temp_name_full = os.path.abspath(results_dir + \"temp.txt\")\n",
" if meas_err:\n",
" meas_err_stata = 1\n",
" else:\n",
" meas_err_stata = 0\n",
"\n",
" # Define the command to run the Stata do file\n",
" cmd = [stata_exe, \"do\", dofile, infile_name_full, temp_name_full, str(interval_size), str(meas_err_stata)]\n",
"\n",
" # Run Stata do-file\n",
" stata_status = subprocess.call(cmd,shell = 'true')\n",
" if stata_status!=0:\n",
" raise ValueError('Stata code could not run. Check the stata_exe in StickyEparams.py')\n",
" stata_output = pd.read_csv(temp_name_full, sep=',',header=0)\n",
"\n",
" # Make results table and return it\n",
" panel_text = makeResultsPanel(Coeffs=stata_output.CoeffsArray,\n",
" StdErrs=stata_output.StdErrArray,\n",
" Rsq=stata_output.RsqArray,\n",
" Pvals=stata_output.PvalArray,\n",
" OID=stata_output.OIDarray,\n",
" Counts=stata_output.ExtraInfo,\n",
" meas_err=meas_err,\n",
" sticky=sticky,\n",
" all_specs=all_specs)\n",
" return panel_text"
] | [
0.061855670103092786,
0,
0.011627906976744186,
0.011904761904761904,
0.012195121951219513,
0,
0,
0,
0,
0,
0.011627906976744186,
0.011235955056179775,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.012195121951219513,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.009009009009009009,
0,
0,
0.05454545454545454,
0.041666666666666664,
0.010638297872340425,
0.015384615384615385,
0,
0,
0,
0.01818181818181818,
0.020833333333333332,
0.0196078431372549,
0.020833333333333332,
0.019230769230769232,
0.025,
0.027777777777777776,
0.023809523809523808,
0.047619047619047616
] | 58 | 0.008393 |
def hot(self, limit=None):
"""GETs hot links from this subreddit. Calls :meth:`narwal.Reddit.hot`.
:param limit: max number of links to return
"""
return self._reddit.hot(self.display_name, limit=limit) | [
"def",
"hot",
"(",
"self",
",",
"limit",
"=",
"None",
")",
":",
"return",
"self",
".",
"_reddit",
".",
"hot",
"(",
"self",
".",
"display_name",
",",
"limit",
"=",
"limit",
")"
] | 39.833333 | 0.016393 | [
"def hot(self, limit=None):\n",
" \"\"\"GETs hot links from this subreddit. Calls :meth:`narwal.Reddit.hot`.\n",
" \n",
" :param limit: max number of links to return\n",
" \"\"\"\n",
" return self._reddit.hot(self.display_name, limit=limit)"
] | [
0,
0.024691358024691357,
0.1111111111111111,
0,
0,
0.015873015873015872
] | 6 | 0.025279 |
def get_buildroot(self, build_id):
"""
Build the buildroot entry of the metadata.
:return: dict, partial metadata
"""
docker_info = self.tasker.get_info()
host_arch, docker_version = get_docker_architecture(self.tasker)
buildroot = {
'id': 1,
'host': {
'os': docker_info['OperatingSystem'],
'arch': host_arch,
},
'content_generator': {
'name': PROG,
'version': atomic_reactor_version,
},
'container': {
'type': 'docker',
'arch': os.uname()[4],
},
'tools': [
{
'name': tool['name'],
'version': tool['version'],
}
for tool in get_version_of_tools()] + [
{
'name': 'docker',
'version': docker_version,
},
],
'components': self.get_rpms(),
'extra': {
'osbs': {
'build_id': build_id,
'builder_image_id': self.get_builder_image_id(),
}
},
}
return buildroot | [
"def",
"get_buildroot",
"(",
"self",
",",
"build_id",
")",
":",
"docker_info",
"=",
"self",
".",
"tasker",
".",
"get_info",
"(",
")",
"host_arch",
",",
"docker_version",
"=",
"get_docker_architecture",
"(",
"self",
".",
"tasker",
")",
"buildroot",
"=",
"{",
"'id'",
":",
"1",
",",
"'host'",
":",
"{",
"'os'",
":",
"docker_info",
"[",
"'OperatingSystem'",
"]",
",",
"'arch'",
":",
"host_arch",
",",
"}",
",",
"'content_generator'",
":",
"{",
"'name'",
":",
"PROG",
",",
"'version'",
":",
"atomic_reactor_version",
",",
"}",
",",
"'container'",
":",
"{",
"'type'",
":",
"'docker'",
",",
"'arch'",
":",
"os",
".",
"uname",
"(",
")",
"[",
"4",
"]",
",",
"}",
",",
"'tools'",
":",
"[",
"{",
"'name'",
":",
"tool",
"[",
"'name'",
"]",
",",
"'version'",
":",
"tool",
"[",
"'version'",
"]",
",",
"}",
"for",
"tool",
"in",
"get_version_of_tools",
"(",
")",
"]",
"+",
"[",
"{",
"'name'",
":",
"'docker'",
",",
"'version'",
":",
"docker_version",
",",
"}",
",",
"]",
",",
"'components'",
":",
"self",
".",
"get_rpms",
"(",
")",
",",
"'extra'",
":",
"{",
"'osbs'",
":",
"{",
"'build_id'",
":",
"build_id",
",",
"'builder_image_id'",
":",
"self",
".",
"get_builder_image_id",
"(",
")",
",",
"}",
"}",
",",
"}",
"return",
"buildroot"
] | 27.888889 | 0.00154 | [
"def get_buildroot(self, build_id):\n",
" \"\"\"\n",
" Build the buildroot entry of the metadata.\n",
"\n",
" :return: dict, partial metadata\n",
" \"\"\"\n",
"\n",
" docker_info = self.tasker.get_info()\n",
" host_arch, docker_version = get_docker_architecture(self.tasker)\n",
"\n",
" buildroot = {\n",
" 'id': 1,\n",
" 'host': {\n",
" 'os': docker_info['OperatingSystem'],\n",
" 'arch': host_arch,\n",
" },\n",
" 'content_generator': {\n",
" 'name': PROG,\n",
" 'version': atomic_reactor_version,\n",
" },\n",
" 'container': {\n",
" 'type': 'docker',\n",
" 'arch': os.uname()[4],\n",
" },\n",
" 'tools': [\n",
" {\n",
" 'name': tool['name'],\n",
" 'version': tool['version'],\n",
" }\n",
" for tool in get_version_of_tools()] + [\n",
" {\n",
" 'name': 'docker',\n",
" 'version': docker_version,\n",
" },\n",
" ],\n",
" 'components': self.get_rpms(),\n",
" 'extra': {\n",
" 'osbs': {\n",
" 'build_id': build_id,\n",
" 'builder_image_id': self.get_builder_image_id(),\n",
" }\n",
" },\n",
" }\n",
"\n",
" return buildroot"
] | [
0,
0.08333333333333333,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.041666666666666664
] | 45 | 0.002778 |
def verify_fft_options(opt, parser):
"""Parses the FFT options and verifies that they are
reasonable.
Parameters
----------
opt : object
Result of parsing the CLI with OptionParser, or any object with the
required attributes.
parser : object
OptionParser instance.
"""
if len(opt.fft_backends) > 0:
_all_backends = get_backend_names()
for backend in opt.fft_backends:
if backend not in _all_backends:
parser.error("Backend {0} is not available".format(backend))
for backend in get_backend_modules():
try:
backend.verify_fft_options(opt, parser)
except AttributeError:
pass | [
"def",
"verify_fft_options",
"(",
"opt",
",",
"parser",
")",
":",
"if",
"len",
"(",
"opt",
".",
"fft_backends",
")",
">",
"0",
":",
"_all_backends",
"=",
"get_backend_names",
"(",
")",
"for",
"backend",
"in",
"opt",
".",
"fft_backends",
":",
"if",
"backend",
"not",
"in",
"_all_backends",
":",
"parser",
".",
"error",
"(",
"\"Backend {0} is not available\"",
".",
"format",
"(",
"backend",
")",
")",
"for",
"backend",
"in",
"get_backend_modules",
"(",
")",
":",
"try",
":",
"backend",
".",
"verify_fft_options",
"(",
"opt",
",",
"parser",
")",
"except",
"AttributeError",
":",
"pass"
] | 29.125 | 0.001385 | [
"def verify_fft_options(opt, parser):\n",
" \"\"\"Parses the FFT options and verifies that they are\n",
" reasonable.\n",
"\n",
" Parameters\n",
" ----------\n",
" opt : object\n",
" Result of parsing the CLI with OptionParser, or any object with the\n",
" required attributes.\n",
" parser : object\n",
" OptionParser instance.\n",
" \"\"\"\n",
"\n",
" if len(opt.fft_backends) > 0:\n",
" _all_backends = get_backend_names()\n",
" for backend in opt.fft_backends:\n",
" if backend not in _all_backends:\n",
" parser.error(\"Backend {0} is not available\".format(backend))\n",
"\n",
" for backend in get_backend_modules():\n",
" try:\n",
" backend.verify_fft_options(opt, parser)\n",
" except AttributeError:\n",
" pass"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.0625
] | 24 | 0.002604 |
def exists(instance_id=None, name=None, tags=None, region=None, key=None,
keyid=None, profile=None, in_states=None, filters=None):
'''
Given an instance id, check to see if the given instance id exists.
Returns True if the given instance with the given id, name, or tags
exists; otherwise, False is returned.
CLI Example:
.. code-block:: bash
salt myminion boto_ec2.exists myinstance
'''
instances = find_instances(instance_id=instance_id, name=name, tags=tags,
region=region, key=key, keyid=keyid,
profile=profile, in_states=in_states, filters=filters)
if instances:
log.info('Instance exists.')
return True
else:
log.warning('Instance does not exist.')
return False | [
"def",
"exists",
"(",
"instance_id",
"=",
"None",
",",
"name",
"=",
"None",
",",
"tags",
"=",
"None",
",",
"region",
"=",
"None",
",",
"key",
"=",
"None",
",",
"keyid",
"=",
"None",
",",
"profile",
"=",
"None",
",",
"in_states",
"=",
"None",
",",
"filters",
"=",
"None",
")",
":",
"instances",
"=",
"find_instances",
"(",
"instance_id",
"=",
"instance_id",
",",
"name",
"=",
"name",
",",
"tags",
"=",
"tags",
",",
"region",
"=",
"region",
",",
"key",
"=",
"key",
",",
"keyid",
"=",
"keyid",
",",
"profile",
"=",
"profile",
",",
"in_states",
"=",
"in_states",
",",
"filters",
"=",
"filters",
")",
"if",
"instances",
":",
"log",
".",
"info",
"(",
"'Instance exists.'",
")",
"return",
"True",
"else",
":",
"log",
".",
"warning",
"(",
"'Instance does not exist.'",
")",
"return",
"False"
] | 34.869565 | 0.002427 | [
"def exists(instance_id=None, name=None, tags=None, region=None, key=None,\n",
" keyid=None, profile=None, in_states=None, filters=None):\n",
" '''\n",
" Given an instance id, check to see if the given instance id exists.\n",
"\n",
" Returns True if the given instance with the given id, name, or tags\n",
" exists; otherwise, False is returned.\n",
"\n",
" CLI Example:\n",
"\n",
" .. code-block:: bash\n",
"\n",
" salt myminion boto_ec2.exists myinstance\n",
" '''\n",
" instances = find_instances(instance_id=instance_id, name=name, tags=tags,\n",
" region=region, key=key, keyid=keyid,\n",
" profile=profile, in_states=in_states, filters=filters)\n",
" if instances:\n",
" log.info('Instance exists.')\n",
" return True\n",
" else:\n",
" log.warning('Instance does not exist.')\n",
" return False"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011627906976744186,
0,
0,
0,
0,
0,
0.05
] | 23 | 0.002679 |
def assign(self, check=None, content_object=None, generic=False):
"""
Assign a permission to a user.
To assign permission for all checks: let check=None.
To assign permission for all objects: let content_object=None.
If generic is True then "check" will be suffixed with _modelname.
"""
result = []
if not content_object:
content_objects = (self.model,)
elif not isinstance(content_object, (list, tuple)):
content_objects = (content_object,)
else:
content_objects = content_object
if not check:
checks = self.generic_checks + getattr(self, 'checks', [])
elif not isinstance(check, (list, tuple)):
checks = (check,)
else:
checks = check
for content_object in content_objects:
# raise an exception before adding any permission
# i think Django does not rollback by default
if not isinstance(content_object, (Model, ModelBase)):
raise NotAModel(content_object)
elif isinstance(content_object, Model) and not content_object.pk:
raise UnsavedModelInstance(content_object)
content_type = ContentType.objects.get_for_model(content_object)
for check in checks:
if isinstance(content_object, Model):
# make an authority per object permission
codename = self.get_codename(
check,
content_object,
generic,
)
try:
perm = Permission.objects.get(
user=self.user,
codename=codename,
approved=True,
content_type=content_type,
object_id=content_object.pk,
)
except Permission.DoesNotExist:
perm = Permission.objects.create(
user=self.user,
content_object=content_object,
codename=codename,
approved=True,
)
result.append(perm)
elif isinstance(content_object, ModelBase):
# make a Django permission
codename = self.get_django_codename(
check,
content_object,
generic,
without_left=True,
)
try:
perm = DjangoPermission.objects.get(codename=codename)
except DjangoPermission.DoesNotExist:
name = check
if '_' in name:
name = name[0:name.find('_')]
perm = DjangoPermission(
name=name,
codename=codename,
content_type=content_type,
)
perm.save()
self.user.user_permissions.add(perm)
result.append(perm)
return result | [
"def",
"assign",
"(",
"self",
",",
"check",
"=",
"None",
",",
"content_object",
"=",
"None",
",",
"generic",
"=",
"False",
")",
":",
"result",
"=",
"[",
"]",
"if",
"not",
"content_object",
":",
"content_objects",
"=",
"(",
"self",
".",
"model",
",",
")",
"elif",
"not",
"isinstance",
"(",
"content_object",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"content_objects",
"=",
"(",
"content_object",
",",
")",
"else",
":",
"content_objects",
"=",
"content_object",
"if",
"not",
"check",
":",
"checks",
"=",
"self",
".",
"generic_checks",
"+",
"getattr",
"(",
"self",
",",
"'checks'",
",",
"[",
"]",
")",
"elif",
"not",
"isinstance",
"(",
"check",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"checks",
"=",
"(",
"check",
",",
")",
"else",
":",
"checks",
"=",
"check",
"for",
"content_object",
"in",
"content_objects",
":",
"# raise an exception before adding any permission",
"# i think Django does not rollback by default",
"if",
"not",
"isinstance",
"(",
"content_object",
",",
"(",
"Model",
",",
"ModelBase",
")",
")",
":",
"raise",
"NotAModel",
"(",
"content_object",
")",
"elif",
"isinstance",
"(",
"content_object",
",",
"Model",
")",
"and",
"not",
"content_object",
".",
"pk",
":",
"raise",
"UnsavedModelInstance",
"(",
"content_object",
")",
"content_type",
"=",
"ContentType",
".",
"objects",
".",
"get_for_model",
"(",
"content_object",
")",
"for",
"check",
"in",
"checks",
":",
"if",
"isinstance",
"(",
"content_object",
",",
"Model",
")",
":",
"# make an authority per object permission",
"codename",
"=",
"self",
".",
"get_codename",
"(",
"check",
",",
"content_object",
",",
"generic",
",",
")",
"try",
":",
"perm",
"=",
"Permission",
".",
"objects",
".",
"get",
"(",
"user",
"=",
"self",
".",
"user",
",",
"codename",
"=",
"codename",
",",
"approved",
"=",
"True",
",",
"content_type",
"=",
"content_type",
",",
"object_id",
"=",
"content_object",
".",
"pk",
",",
")",
"except",
"Permission",
".",
"DoesNotExist",
":",
"perm",
"=",
"Permission",
".",
"objects",
".",
"create",
"(",
"user",
"=",
"self",
".",
"user",
",",
"content_object",
"=",
"content_object",
",",
"codename",
"=",
"codename",
",",
"approved",
"=",
"True",
",",
")",
"result",
".",
"append",
"(",
"perm",
")",
"elif",
"isinstance",
"(",
"content_object",
",",
"ModelBase",
")",
":",
"# make a Django permission",
"codename",
"=",
"self",
".",
"get_django_codename",
"(",
"check",
",",
"content_object",
",",
"generic",
",",
"without_left",
"=",
"True",
",",
")",
"try",
":",
"perm",
"=",
"DjangoPermission",
".",
"objects",
".",
"get",
"(",
"codename",
"=",
"codename",
")",
"except",
"DjangoPermission",
".",
"DoesNotExist",
":",
"name",
"=",
"check",
"if",
"'_'",
"in",
"name",
":",
"name",
"=",
"name",
"[",
"0",
":",
"name",
".",
"find",
"(",
"'_'",
")",
"]",
"perm",
"=",
"DjangoPermission",
"(",
"name",
"=",
"name",
",",
"codename",
"=",
"codename",
",",
"content_type",
"=",
"content_type",
",",
")",
"perm",
".",
"save",
"(",
")",
"self",
".",
"user",
".",
"user_permissions",
".",
"add",
"(",
"perm",
")",
"result",
".",
"append",
"(",
"perm",
")",
"return",
"result"
] | 38.588235 | 0.000595 | [
"def assign(self, check=None, content_object=None, generic=False):\n",
" \"\"\"\n",
" Assign a permission to a user.\n",
"\n",
" To assign permission for all checks: let check=None.\n",
" To assign permission for all objects: let content_object=None.\n",
"\n",
" If generic is True then \"check\" will be suffixed with _modelname.\n",
" \"\"\"\n",
" result = []\n",
"\n",
" if not content_object:\n",
" content_objects = (self.model,)\n",
" elif not isinstance(content_object, (list, tuple)):\n",
" content_objects = (content_object,)\n",
" else:\n",
" content_objects = content_object\n",
"\n",
" if not check:\n",
" checks = self.generic_checks + getattr(self, 'checks', [])\n",
" elif not isinstance(check, (list, tuple)):\n",
" checks = (check,)\n",
" else:\n",
" checks = check\n",
"\n",
" for content_object in content_objects:\n",
" # raise an exception before adding any permission\n",
" # i think Django does not rollback by default\n",
" if not isinstance(content_object, (Model, ModelBase)):\n",
" raise NotAModel(content_object)\n",
" elif isinstance(content_object, Model) and not content_object.pk:\n",
" raise UnsavedModelInstance(content_object)\n",
"\n",
" content_type = ContentType.objects.get_for_model(content_object)\n",
"\n",
" for check in checks:\n",
" if isinstance(content_object, Model):\n",
" # make an authority per object permission\n",
" codename = self.get_codename(\n",
" check,\n",
" content_object,\n",
" generic,\n",
" )\n",
" try:\n",
" perm = Permission.objects.get(\n",
" user=self.user,\n",
" codename=codename,\n",
" approved=True,\n",
" content_type=content_type,\n",
" object_id=content_object.pk,\n",
" )\n",
" except Permission.DoesNotExist:\n",
" perm = Permission.objects.create(\n",
" user=self.user,\n",
" content_object=content_object,\n",
" codename=codename,\n",
" approved=True,\n",
" )\n",
"\n",
" result.append(perm)\n",
"\n",
" elif isinstance(content_object, ModelBase):\n",
" # make a Django permission\n",
" codename = self.get_django_codename(\n",
" check,\n",
" content_object,\n",
" generic,\n",
" without_left=True,\n",
" )\n",
" try:\n",
" perm = DjangoPermission.objects.get(codename=codename)\n",
" except DjangoPermission.DoesNotExist:\n",
" name = check\n",
" if '_' in name:\n",
" name = name[0:name.find('_')]\n",
" perm = DjangoPermission(\n",
" name=name,\n",
" codename=codename,\n",
" content_type=content_type,\n",
" )\n",
" perm.save()\n",
" self.user.user_permissions.add(perm)\n",
" result.append(perm)\n",
"\n",
" return result"
] | [
0,
0.08333333333333333,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.047619047619047616
] | 85 | 0.001541 |
def set_edist_powerlaw(self, emin_mev, emax_mev, delta, ne_cc):
"""Set the energy distribution function to a power law.
**Call signature**
*emin_mev*
The minimum energy of the distribution, in MeV
*emax_mev*
The maximum energy of the distribution, in MeV
*delta*
The power-law index of the distribution
*ne_cc*
The number density of energetic electrons, in cm^-3.
Returns
*self* for convenience in chaining.
"""
if not (emin_mev >= 0):
raise ValueError('must have emin_mev >= 0; got %r' % (emin_mev,))
if not (emax_mev >= emin_mev):
raise ValueError('must have emax_mev >= emin_mev; got %r, %r' % (emax_mev, emin_mev))
if not (delta >= 0):
raise ValueError('must have delta >= 0; got %r, %r' % (delta,))
if not (ne_cc >= 0):
raise ValueError('must have ne_cc >= 0; got %r, %r' % (ne_cc,))
self.in_vals[IN_VAL_EDIST] = EDIST_PLW
self.in_vals[IN_VAL_EMIN] = emin_mev
self.in_vals[IN_VAL_EMAX] = emax_mev
self.in_vals[IN_VAL_DELTA1] = delta
self.in_vals[IN_VAL_NB] = ne_cc
return self | [
"def",
"set_edist_powerlaw",
"(",
"self",
",",
"emin_mev",
",",
"emax_mev",
",",
"delta",
",",
"ne_cc",
")",
":",
"if",
"not",
"(",
"emin_mev",
">=",
"0",
")",
":",
"raise",
"ValueError",
"(",
"'must have emin_mev >= 0; got %r'",
"%",
"(",
"emin_mev",
",",
")",
")",
"if",
"not",
"(",
"emax_mev",
">=",
"emin_mev",
")",
":",
"raise",
"ValueError",
"(",
"'must have emax_mev >= emin_mev; got %r, %r'",
"%",
"(",
"emax_mev",
",",
"emin_mev",
")",
")",
"if",
"not",
"(",
"delta",
">=",
"0",
")",
":",
"raise",
"ValueError",
"(",
"'must have delta >= 0; got %r, %r'",
"%",
"(",
"delta",
",",
")",
")",
"if",
"not",
"(",
"ne_cc",
">=",
"0",
")",
":",
"raise",
"ValueError",
"(",
"'must have ne_cc >= 0; got %r, %r'",
"%",
"(",
"ne_cc",
",",
")",
")",
"self",
".",
"in_vals",
"[",
"IN_VAL_EDIST",
"]",
"=",
"EDIST_PLW",
"self",
".",
"in_vals",
"[",
"IN_VAL_EMIN",
"]",
"=",
"emin_mev",
"self",
".",
"in_vals",
"[",
"IN_VAL_EMAX",
"]",
"=",
"emax_mev",
"self",
".",
"in_vals",
"[",
"IN_VAL_DELTA1",
"]",
"=",
"delta",
"self",
".",
"in_vals",
"[",
"IN_VAL_NB",
"]",
"=",
"ne_cc",
"return",
"self"
] | 38.580645 | 0.002447 | [
"def set_edist_powerlaw(self, emin_mev, emax_mev, delta, ne_cc):\n",
" \"\"\"Set the energy distribution function to a power law.\n",
"\n",
" **Call signature**\n",
"\n",
" *emin_mev*\n",
" The minimum energy of the distribution, in MeV\n",
" *emax_mev*\n",
" The maximum energy of the distribution, in MeV\n",
" *delta*\n",
" The power-law index of the distribution\n",
" *ne_cc*\n",
" The number density of energetic electrons, in cm^-3.\n",
" Returns\n",
" *self* for convenience in chaining.\n",
" \"\"\"\n",
" if not (emin_mev >= 0):\n",
" raise ValueError('must have emin_mev >= 0; got %r' % (emin_mev,))\n",
" if not (emax_mev >= emin_mev):\n",
" raise ValueError('must have emax_mev >= emin_mev; got %r, %r' % (emax_mev, emin_mev))\n",
" if not (delta >= 0):\n",
" raise ValueError('must have delta >= 0; got %r, %r' % (delta,))\n",
" if not (ne_cc >= 0):\n",
" raise ValueError('must have ne_cc >= 0; got %r, %r' % (ne_cc,))\n",
"\n",
" self.in_vals[IN_VAL_EDIST] = EDIST_PLW\n",
" self.in_vals[IN_VAL_EMIN] = emin_mev\n",
" self.in_vals[IN_VAL_EMAX] = emax_mev\n",
" self.in_vals[IN_VAL_DELTA1] = delta\n",
" self.in_vals[IN_VAL_NB] = ne_cc\n",
" return self"
] | [
0,
0.015625,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.01020408163265306,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.05263157894736842
] | 31 | 0.002531 |
def initialize_res(residue):
'''Creates a new structure containing a single amino acid. The type and
geometry of the amino acid are determined by the argument, which has to be
either a geometry object or a single-letter amino acid code.
The amino acid will be placed into chain A of model 0.'''
if isinstance( residue, Geo ):
geo = residue
else:
geo=geometry(residue)
segID=1
AA= geo.residue_name
CA_N_length=geo.CA_N_length
CA_C_length=geo.CA_C_length
N_CA_C_angle=geo.N_CA_C_angle
CA_coord= numpy.array([0.,0.,0.])
C_coord= numpy.array([CA_C_length,0,0])
N_coord = numpy.array([CA_N_length*math.cos(N_CA_C_angle*(math.pi/180.0)),CA_N_length*math.sin(N_CA_C_angle*(math.pi/180.0)),0])
N= Atom("N", N_coord, 0.0 , 1.0, " "," N", 0, "N")
CA=Atom("CA", CA_coord, 0.0 , 1.0, " "," CA", 0,"C")
C= Atom("C", C_coord, 0.0, 1.0, " ", " C",0,"C")
##Create Carbonyl atom (to be moved later)
C_O_length=geo.C_O_length
CA_C_O_angle=geo.CA_C_O_angle
N_CA_C_O_diangle=geo.N_CA_C_O_diangle
carbonyl=calculateCoordinates(N, CA, C, C_O_length, CA_C_O_angle, N_CA_C_O_diangle)
O= Atom("O",carbonyl , 0.0 , 1.0, " "," O", 0, "O")
if(AA=='G'):
res=makeGly(segID, N, CA, C, O, geo)
elif(AA=='A'):
res=makeAla(segID, N, CA, C, O, geo)
elif(AA=='S'):
res=makeSer(segID, N, CA, C, O, geo)
elif(AA=='C'):
res=makeCys(segID, N, CA, C, O, geo)
elif(AA=='V'):
res=makeVal(segID, N, CA, C, O, geo)
elif(AA=='I'):
res=makeIle(segID, N, CA, C, O, geo)
elif(AA=='L'):
res=makeLeu(segID, N, CA, C, O, geo)
elif(AA=='T'):
res=makeThr(segID, N, CA, C, O, geo)
elif(AA=='R'):
res=makeArg(segID, N, CA, C, O, geo)
elif(AA=='K'):
res=makeLys(segID, N, CA, C, O, geo)
elif(AA=='D'):
res=makeAsp(segID, N, CA, C, O, geo)
elif(AA=='E'):
res=makeGlu(segID, N, CA, C, O, geo)
elif(AA=='N'):
res=makeAsn(segID, N, CA, C, O, geo)
elif(AA=='Q'):
res=makeGln(segID, N, CA, C, O, geo)
elif(AA=='M'):
res=makeMet(segID, N, CA, C, O, geo)
elif(AA=='H'):
res=makeHis(segID, N, CA, C, O, geo)
elif(AA=='P'):
res=makePro(segID, N, CA, C, O, geo)
elif(AA=='F'):
res=makePhe(segID, N, CA, C, O, geo)
elif(AA=='Y'):
res=makeTyr(segID, N, CA, C, O, geo)
elif(AA=='W'):
res=makeTrp(segID, N, CA, C, O, geo)
else:
res=makeGly(segID, N, CA, C, O, geo)
cha= Chain('A')
cha.add(res)
mod= Model(0)
mod.add(cha)
struc= Structure('X')
struc.add(mod)
return struc | [
"def",
"initialize_res",
"(",
"residue",
")",
":",
"if",
"isinstance",
"(",
"residue",
",",
"Geo",
")",
":",
"geo",
"=",
"residue",
"else",
":",
"geo",
"=",
"geometry",
"(",
"residue",
")",
"segID",
"=",
"1",
"AA",
"=",
"geo",
".",
"residue_name",
"CA_N_length",
"=",
"geo",
".",
"CA_N_length",
"CA_C_length",
"=",
"geo",
".",
"CA_C_length",
"N_CA_C_angle",
"=",
"geo",
".",
"N_CA_C_angle",
"CA_coord",
"=",
"numpy",
".",
"array",
"(",
"[",
"0.",
",",
"0.",
",",
"0.",
"]",
")",
"C_coord",
"=",
"numpy",
".",
"array",
"(",
"[",
"CA_C_length",
",",
"0",
",",
"0",
"]",
")",
"N_coord",
"=",
"numpy",
".",
"array",
"(",
"[",
"CA_N_length",
"*",
"math",
".",
"cos",
"(",
"N_CA_C_angle",
"*",
"(",
"math",
".",
"pi",
"/",
"180.0",
")",
")",
",",
"CA_N_length",
"*",
"math",
".",
"sin",
"(",
"N_CA_C_angle",
"*",
"(",
"math",
".",
"pi",
"/",
"180.0",
")",
")",
",",
"0",
"]",
")",
"N",
"=",
"Atom",
"(",
"\"N\"",
",",
"N_coord",
",",
"0.0",
",",
"1.0",
",",
"\" \"",
",",
"\" N\"",
",",
"0",
",",
"\"N\"",
")",
"CA",
"=",
"Atom",
"(",
"\"CA\"",
",",
"CA_coord",
",",
"0.0",
",",
"1.0",
",",
"\" \"",
",",
"\" CA\"",
",",
"0",
",",
"\"C\"",
")",
"C",
"=",
"Atom",
"(",
"\"C\"",
",",
"C_coord",
",",
"0.0",
",",
"1.0",
",",
"\" \"",
",",
"\" C\"",
",",
"0",
",",
"\"C\"",
")",
"##Create Carbonyl atom (to be moved later)",
"C_O_length",
"=",
"geo",
".",
"C_O_length",
"CA_C_O_angle",
"=",
"geo",
".",
"CA_C_O_angle",
"N_CA_C_O_diangle",
"=",
"geo",
".",
"N_CA_C_O_diangle",
"carbonyl",
"=",
"calculateCoordinates",
"(",
"N",
",",
"CA",
",",
"C",
",",
"C_O_length",
",",
"CA_C_O_angle",
",",
"N_CA_C_O_diangle",
")",
"O",
"=",
"Atom",
"(",
"\"O\"",
",",
"carbonyl",
",",
"0.0",
",",
"1.0",
",",
"\" \"",
",",
"\" O\"",
",",
"0",
",",
"\"O\"",
")",
"if",
"(",
"AA",
"==",
"'G'",
")",
":",
"res",
"=",
"makeGly",
"(",
"segID",
",",
"N",
",",
"CA",
",",
"C",
",",
"O",
",",
"geo",
")",
"elif",
"(",
"AA",
"==",
"'A'",
")",
":",
"res",
"=",
"makeAla",
"(",
"segID",
",",
"N",
",",
"CA",
",",
"C",
",",
"O",
",",
"geo",
")",
"elif",
"(",
"AA",
"==",
"'S'",
")",
":",
"res",
"=",
"makeSer",
"(",
"segID",
",",
"N",
",",
"CA",
",",
"C",
",",
"O",
",",
"geo",
")",
"elif",
"(",
"AA",
"==",
"'C'",
")",
":",
"res",
"=",
"makeCys",
"(",
"segID",
",",
"N",
",",
"CA",
",",
"C",
",",
"O",
",",
"geo",
")",
"elif",
"(",
"AA",
"==",
"'V'",
")",
":",
"res",
"=",
"makeVal",
"(",
"segID",
",",
"N",
",",
"CA",
",",
"C",
",",
"O",
",",
"geo",
")",
"elif",
"(",
"AA",
"==",
"'I'",
")",
":",
"res",
"=",
"makeIle",
"(",
"segID",
",",
"N",
",",
"CA",
",",
"C",
",",
"O",
",",
"geo",
")",
"elif",
"(",
"AA",
"==",
"'L'",
")",
":",
"res",
"=",
"makeLeu",
"(",
"segID",
",",
"N",
",",
"CA",
",",
"C",
",",
"O",
",",
"geo",
")",
"elif",
"(",
"AA",
"==",
"'T'",
")",
":",
"res",
"=",
"makeThr",
"(",
"segID",
",",
"N",
",",
"CA",
",",
"C",
",",
"O",
",",
"geo",
")",
"elif",
"(",
"AA",
"==",
"'R'",
")",
":",
"res",
"=",
"makeArg",
"(",
"segID",
",",
"N",
",",
"CA",
",",
"C",
",",
"O",
",",
"geo",
")",
"elif",
"(",
"AA",
"==",
"'K'",
")",
":",
"res",
"=",
"makeLys",
"(",
"segID",
",",
"N",
",",
"CA",
",",
"C",
",",
"O",
",",
"geo",
")",
"elif",
"(",
"AA",
"==",
"'D'",
")",
":",
"res",
"=",
"makeAsp",
"(",
"segID",
",",
"N",
",",
"CA",
",",
"C",
",",
"O",
",",
"geo",
")",
"elif",
"(",
"AA",
"==",
"'E'",
")",
":",
"res",
"=",
"makeGlu",
"(",
"segID",
",",
"N",
",",
"CA",
",",
"C",
",",
"O",
",",
"geo",
")",
"elif",
"(",
"AA",
"==",
"'N'",
")",
":",
"res",
"=",
"makeAsn",
"(",
"segID",
",",
"N",
",",
"CA",
",",
"C",
",",
"O",
",",
"geo",
")",
"elif",
"(",
"AA",
"==",
"'Q'",
")",
":",
"res",
"=",
"makeGln",
"(",
"segID",
",",
"N",
",",
"CA",
",",
"C",
",",
"O",
",",
"geo",
")",
"elif",
"(",
"AA",
"==",
"'M'",
")",
":",
"res",
"=",
"makeMet",
"(",
"segID",
",",
"N",
",",
"CA",
",",
"C",
",",
"O",
",",
"geo",
")",
"elif",
"(",
"AA",
"==",
"'H'",
")",
":",
"res",
"=",
"makeHis",
"(",
"segID",
",",
"N",
",",
"CA",
",",
"C",
",",
"O",
",",
"geo",
")",
"elif",
"(",
"AA",
"==",
"'P'",
")",
":",
"res",
"=",
"makePro",
"(",
"segID",
",",
"N",
",",
"CA",
",",
"C",
",",
"O",
",",
"geo",
")",
"elif",
"(",
"AA",
"==",
"'F'",
")",
":",
"res",
"=",
"makePhe",
"(",
"segID",
",",
"N",
",",
"CA",
",",
"C",
",",
"O",
",",
"geo",
")",
"elif",
"(",
"AA",
"==",
"'Y'",
")",
":",
"res",
"=",
"makeTyr",
"(",
"segID",
",",
"N",
",",
"CA",
",",
"C",
",",
"O",
",",
"geo",
")",
"elif",
"(",
"AA",
"==",
"'W'",
")",
":",
"res",
"=",
"makeTrp",
"(",
"segID",
",",
"N",
",",
"CA",
",",
"C",
",",
"O",
",",
"geo",
")",
"else",
":",
"res",
"=",
"makeGly",
"(",
"segID",
",",
"N",
",",
"CA",
",",
"C",
",",
"O",
",",
"geo",
")",
"cha",
"=",
"Chain",
"(",
"'A'",
")",
"cha",
".",
"add",
"(",
"res",
")",
"mod",
"=",
"Model",
"(",
"0",
")",
"mod",
".",
"add",
"(",
"cha",
")",
"struc",
"=",
"Structure",
"(",
"'X'",
")",
"struc",
".",
"add",
"(",
"mod",
")",
"return",
"struc"
] | 30.976471 | 0.033125 | [
"def initialize_res(residue):\n",
" '''Creates a new structure containing a single amino acid. The type and\n",
" geometry of the amino acid are determined by the argument, which has to be\n",
" either a geometry object or a single-letter amino acid code.\n",
" The amino acid will be placed into chain A of model 0.'''\n",
" \n",
" if isinstance( residue, Geo ):\n",
" geo = residue\n",
" else:\n",
" geo=geometry(residue) \n",
" \n",
" segID=1\n",
" AA= geo.residue_name\n",
" CA_N_length=geo.CA_N_length\n",
" CA_C_length=geo.CA_C_length\n",
" N_CA_C_angle=geo.N_CA_C_angle\n",
" \n",
" CA_coord= numpy.array([0.,0.,0.])\n",
" C_coord= numpy.array([CA_C_length,0,0])\n",
" N_coord = numpy.array([CA_N_length*math.cos(N_CA_C_angle*(math.pi/180.0)),CA_N_length*math.sin(N_CA_C_angle*(math.pi/180.0)),0])\n",
"\n",
" N= Atom(\"N\", N_coord, 0.0 , 1.0, \" \",\" N\", 0, \"N\")\n",
" CA=Atom(\"CA\", CA_coord, 0.0 , 1.0, \" \",\" CA\", 0,\"C\")\n",
" C= Atom(\"C\", C_coord, 0.0, 1.0, \" \", \" C\",0,\"C\")\n",
"\n",
" ##Create Carbonyl atom (to be moved later)\n",
" C_O_length=geo.C_O_length\n",
" CA_C_O_angle=geo.CA_C_O_angle\n",
" N_CA_C_O_diangle=geo.N_CA_C_O_diangle\n",
" \n",
" carbonyl=calculateCoordinates(N, CA, C, C_O_length, CA_C_O_angle, N_CA_C_O_diangle)\n",
" O= Atom(\"O\",carbonyl , 0.0 , 1.0, \" \",\" O\", 0, \"O\")\n",
"\n",
" if(AA=='G'):\n",
" res=makeGly(segID, N, CA, C, O, geo)\n",
" elif(AA=='A'):\n",
" res=makeAla(segID, N, CA, C, O, geo)\n",
" elif(AA=='S'):\n",
" res=makeSer(segID, N, CA, C, O, geo)\n",
" elif(AA=='C'):\n",
" res=makeCys(segID, N, CA, C, O, geo)\n",
" elif(AA=='V'):\n",
" res=makeVal(segID, N, CA, C, O, geo)\n",
" elif(AA=='I'):\n",
" res=makeIle(segID, N, CA, C, O, geo)\n",
" elif(AA=='L'):\n",
" res=makeLeu(segID, N, CA, C, O, geo)\n",
" elif(AA=='T'):\n",
" res=makeThr(segID, N, CA, C, O, geo)\n",
" elif(AA=='R'):\n",
" res=makeArg(segID, N, CA, C, O, geo)\n",
" elif(AA=='K'):\n",
" res=makeLys(segID, N, CA, C, O, geo)\n",
" elif(AA=='D'):\n",
" res=makeAsp(segID, N, CA, C, O, geo)\n",
" elif(AA=='E'):\n",
" res=makeGlu(segID, N, CA, C, O, geo)\n",
" elif(AA=='N'):\n",
" res=makeAsn(segID, N, CA, C, O, geo)\n",
" elif(AA=='Q'):\n",
" res=makeGln(segID, N, CA, C, O, geo)\n",
" elif(AA=='M'):\n",
" res=makeMet(segID, N, CA, C, O, geo)\n",
" elif(AA=='H'):\n",
" res=makeHis(segID, N, CA, C, O, geo)\n",
" elif(AA=='P'):\n",
" res=makePro(segID, N, CA, C, O, geo)\n",
" elif(AA=='F'):\n",
" res=makePhe(segID, N, CA, C, O, geo)\n",
" elif(AA=='Y'):\n",
" res=makeTyr(segID, N, CA, C, O, geo)\n",
" elif(AA=='W'):\n",
" res=makeTrp(segID, N, CA, C, O, geo)\n",
" else:\n",
" res=makeGly(segID, N, CA, C, O, geo)\n",
"\n",
" cha= Chain('A')\n",
" cha.add(res)\n",
" \n",
" mod= Model(0)\n",
" mod.add(cha)\n",
"\n",
" struc= Structure('X')\n",
" struc.add(mod)\n",
" return struc"
] | [
0,
0,
0,
0,
0,
0.2,
0.05714285714285714,
0,
0,
0.06451612903225806,
0.2,
0.08333333333333333,
0.04,
0.03125,
0.03125,
0.029411764705882353,
0.2,
0.07894736842105263,
0.06818181818181818,
0.022556390977443608,
0,
0.05454545454545454,
0.07017543859649122,
0.05660377358490566,
0,
0.02127659574468085,
0.03333333333333333,
0.029411764705882353,
0.023809523809523808,
0.2,
0.022727272727272728,
0.10714285714285714,
0,
0.058823529411764705,
0.022222222222222223,
0.05263157894736842,
0.022222222222222223,
0.05263157894736842,
0.022222222222222223,
0.05263157894736842,
0.022222222222222223,
0.05263157894736842,
0.022222222222222223,
0.05263157894736842,
0.022222222222222223,
0.05263157894736842,
0.022222222222222223,
0.05263157894736842,
0.022222222222222223,
0.05263157894736842,
0.022222222222222223,
0.05263157894736842,
0.022222222222222223,
0.05263157894736842,
0.022222222222222223,
0.05263157894736842,
0.022222222222222223,
0.05263157894736842,
0.022222222222222223,
0.05263157894736842,
0.022222222222222223,
0.05263157894736842,
0.022222222222222223,
0.05263157894736842,
0.022222222222222223,
0.05263157894736842,
0.022222222222222223,
0.05263157894736842,
0.022222222222222223,
0.05263157894736842,
0.022222222222222223,
0.05263157894736842,
0.022222222222222223,
0,
0.022222222222222223,
0,
0.05,
0,
0.2,
0.05555555555555555,
0,
0,
0.038461538461538464,
0,
0.0625
] | 85 | 0.043031 |
def traverse_trees_recursive(odb, tree_shas, path_prefix):
"""
:return: list with entries according to the given binary tree-shas.
The result is encoded in a list
of n tuple|None per blob/commit, (n == len(tree_shas)), where
* [0] == 20 byte sha
* [1] == mode as int
* [2] == path relative to working tree root
The entry tuple is None if the respective blob/commit did not
exist in the given tree.
:param tree_shas: iterable of shas pointing to trees. All trees must
be on the same level. A tree-sha may be None in which case None
:param path_prefix: a prefix to be added to the returned paths on this level,
set it '' for the first iteration
:note: The ordering of the returned items will be partially lost"""
trees_data = []
nt = len(tree_shas)
for tree_sha in tree_shas:
if tree_sha is None:
data = []
else:
data = tree_entries_from_data(odb.stream(tree_sha).read())
# END handle muted trees
trees_data.append(data)
# END for each sha to get data for
out = []
out_append = out.append
# find all matching entries and recursively process them together if the match
# is a tree. If the match is a non-tree item, put it into the result.
# Processed items will be set None
for ti, tree_data in enumerate(trees_data):
for ii, item in enumerate(tree_data):
if not item:
continue
# END skip already done items
entries = [None for _ in range(nt)]
entries[ti] = item
sha, mode, name = item # its faster to unpack @UnusedVariable
is_dir = S_ISDIR(mode) # type mode bits
# find this item in all other tree data items
# wrap around, but stop one before our current index, hence
# ti+nt, not ti+1+nt
for tio in range(ti + 1, ti + nt):
tio = tio % nt
entries[tio] = _find_by_name(trees_data[tio], name, is_dir, ii)
# END for each other item data
# if we are a directory, enter recursion
if is_dir:
out.extend(traverse_trees_recursive(
odb, [((ei and ei[0]) or None) for ei in entries], path_prefix + name + '/'))
else:
out_append(tuple(_to_full_path(e, path_prefix) for e in entries))
# END handle recursion
# finally mark it done
tree_data[ii] = None
# END for each item
# we are done with one tree, set all its data empty
del(tree_data[:])
# END for each tree_data chunk
return out | [
"def",
"traverse_trees_recursive",
"(",
"odb",
",",
"tree_shas",
",",
"path_prefix",
")",
":",
"trees_data",
"=",
"[",
"]",
"nt",
"=",
"len",
"(",
"tree_shas",
")",
"for",
"tree_sha",
"in",
"tree_shas",
":",
"if",
"tree_sha",
"is",
"None",
":",
"data",
"=",
"[",
"]",
"else",
":",
"data",
"=",
"tree_entries_from_data",
"(",
"odb",
".",
"stream",
"(",
"tree_sha",
")",
".",
"read",
"(",
")",
")",
"# END handle muted trees",
"trees_data",
".",
"append",
"(",
"data",
")",
"# END for each sha to get data for",
"out",
"=",
"[",
"]",
"out_append",
"=",
"out",
".",
"append",
"# find all matching entries and recursively process them together if the match",
"# is a tree. If the match is a non-tree item, put it into the result.",
"# Processed items will be set None",
"for",
"ti",
",",
"tree_data",
"in",
"enumerate",
"(",
"trees_data",
")",
":",
"for",
"ii",
",",
"item",
"in",
"enumerate",
"(",
"tree_data",
")",
":",
"if",
"not",
"item",
":",
"continue",
"# END skip already done items",
"entries",
"=",
"[",
"None",
"for",
"_",
"in",
"range",
"(",
"nt",
")",
"]",
"entries",
"[",
"ti",
"]",
"=",
"item",
"sha",
",",
"mode",
",",
"name",
"=",
"item",
"# its faster to unpack @UnusedVariable",
"is_dir",
"=",
"S_ISDIR",
"(",
"mode",
")",
"# type mode bits",
"# find this item in all other tree data items",
"# wrap around, but stop one before our current index, hence",
"# ti+nt, not ti+1+nt",
"for",
"tio",
"in",
"range",
"(",
"ti",
"+",
"1",
",",
"ti",
"+",
"nt",
")",
":",
"tio",
"=",
"tio",
"%",
"nt",
"entries",
"[",
"tio",
"]",
"=",
"_find_by_name",
"(",
"trees_data",
"[",
"tio",
"]",
",",
"name",
",",
"is_dir",
",",
"ii",
")",
"# END for each other item data",
"# if we are a directory, enter recursion",
"if",
"is_dir",
":",
"out",
".",
"extend",
"(",
"traverse_trees_recursive",
"(",
"odb",
",",
"[",
"(",
"(",
"ei",
"and",
"ei",
"[",
"0",
"]",
")",
"or",
"None",
")",
"for",
"ei",
"in",
"entries",
"]",
",",
"path_prefix",
"+",
"name",
"+",
"'/'",
")",
")",
"else",
":",
"out_append",
"(",
"tuple",
"(",
"_to_full_path",
"(",
"e",
",",
"path_prefix",
")",
"for",
"e",
"in",
"entries",
")",
")",
"# END handle recursion",
"# finally mark it done",
"tree_data",
"[",
"ii",
"]",
"=",
"None",
"# END for each item",
"# we are done with one tree, set all its data empty",
"del",
"(",
"tree_data",
"[",
":",
"]",
")",
"# END for each tree_data chunk",
"return",
"out"
] | 40.818182 | 0.002175 | [
"def traverse_trees_recursive(odb, tree_shas, path_prefix):\n",
" \"\"\"\n",
" :return: list with entries according to the given binary tree-shas.\n",
" The result is encoded in a list\n",
" of n tuple|None per blob/commit, (n == len(tree_shas)), where\n",
" * [0] == 20 byte sha\n",
" * [1] == mode as int\n",
" * [2] == path relative to working tree root\n",
" The entry tuple is None if the respective blob/commit did not\n",
" exist in the given tree.\n",
" :param tree_shas: iterable of shas pointing to trees. All trees must\n",
" be on the same level. A tree-sha may be None in which case None\n",
" :param path_prefix: a prefix to be added to the returned paths on this level,\n",
" set it '' for the first iteration\n",
" :note: The ordering of the returned items will be partially lost\"\"\"\n",
" trees_data = []\n",
" nt = len(tree_shas)\n",
" for tree_sha in tree_shas:\n",
" if tree_sha is None:\n",
" data = []\n",
" else:\n",
" data = tree_entries_from_data(odb.stream(tree_sha).read())\n",
" # END handle muted trees\n",
" trees_data.append(data)\n",
" # END for each sha to get data for\n",
"\n",
" out = []\n",
" out_append = out.append\n",
"\n",
" # find all matching entries and recursively process them together if the match\n",
" # is a tree. If the match is a non-tree item, put it into the result.\n",
" # Processed items will be set None\n",
" for ti, tree_data in enumerate(trees_data):\n",
" for ii, item in enumerate(tree_data):\n",
" if not item:\n",
" continue\n",
" # END skip already done items\n",
" entries = [None for _ in range(nt)]\n",
" entries[ti] = item\n",
" sha, mode, name = item # its faster to unpack @UnusedVariable\n",
" is_dir = S_ISDIR(mode) # type mode bits\n",
"\n",
" # find this item in all other tree data items\n",
" # wrap around, but stop one before our current index, hence\n",
" # ti+nt, not ti+1+nt\n",
" for tio in range(ti + 1, ti + nt):\n",
" tio = tio % nt\n",
" entries[tio] = _find_by_name(trees_data[tio], name, is_dir, ii)\n",
" # END for each other item data\n",
"\n",
" # if we are a directory, enter recursion\n",
" if is_dir:\n",
" out.extend(traverse_trees_recursive(\n",
" odb, [((ei and ei[0]) or None) for ei in entries], path_prefix + name + '/'))\n",
" else:\n",
" out_append(tuple(_to_full_path(e, path_prefix) for e in entries))\n",
" # END handle recursion\n",
"\n",
" # finally mark it done\n",
" tree_data[ii] = None\n",
" # END for each item\n",
"\n",
" # we are done with one tree, set all its data empty\n",
" del(tree_data[:])\n",
" # END for each tree_data chunk\n",
" return out"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.012195121951219513,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.012048192771084338,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.010101010101010102,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.01020408163265306,
0,
0.012195121951219513,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.07142857142857142
] | 66 | 0.001942 |
def _gluster_output_cleanup(result):
'''
Gluster versions prior to 6 have a bug that requires tricking
isatty. This adds "gluster> " to the output. Strip it off and
produce clean xml for ElementTree.
'''
ret = ''
for line in result.splitlines():
if line.startswith('gluster>'):
ret += line[9:].strip()
elif line.startswith('Welcome to gluster prompt'):
pass
else:
ret += line.strip()
return ret | [
"def",
"_gluster_output_cleanup",
"(",
"result",
")",
":",
"ret",
"=",
"''",
"for",
"line",
"in",
"result",
".",
"splitlines",
"(",
")",
":",
"if",
"line",
".",
"startswith",
"(",
"'gluster>'",
")",
":",
"ret",
"+=",
"line",
"[",
"9",
":",
"]",
".",
"strip",
"(",
")",
"elif",
"line",
".",
"startswith",
"(",
"'Welcome to gluster prompt'",
")",
":",
"pass",
"else",
":",
"ret",
"+=",
"line",
".",
"strip",
"(",
")",
"return",
"ret"
] | 29.5 | 0.002053 | [
"def _gluster_output_cleanup(result):\n",
" '''\n",
" Gluster versions prior to 6 have a bug that requires tricking\n",
" isatty. This adds \"gluster> \" to the output. Strip it off and\n",
" produce clean xml for ElementTree.\n",
" '''\n",
" ret = ''\n",
" for line in result.splitlines():\n",
" if line.startswith('gluster>'):\n",
" ret += line[9:].strip()\n",
" elif line.startswith('Welcome to gluster prompt'):\n",
" pass\n",
" else:\n",
" ret += line.strip()\n",
"\n",
" return ret"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.07142857142857142
] | 16 | 0.004464 |
def runExperiment(args):
"""
Runs the experiment. The code is organized around what we need for specific
figures in the paper.
args is a dict representing the various parameters. We do it this way to
support multiprocessing. The function returns the args dict updated with a
number of additional keys containing performance metrics.
"""
numObjects = args.get("numObjects", 10)
numSequences = args.get("numSequences", 10)
numFeatures = args.get("numFeatures", 10)
seqLength = args.get("seqLength", 10)
numPoints = args.get("numPoints", 10)
trialNum = args.get("trialNum", 42)
inputSize = args.get("inputSize", 1024)
numLocations = args.get("numLocations", 100000)
numInputBits = args.get("inputBits", 20)
settlingTime = args.get("settlingTime", 1)
numRepetitions = args.get("numRepetitions", 5)
figure = args.get("figure", False)
synPermProximalDecL2 = args.get("synPermProximalDecL2", 0.001)
minThresholdProximalL2 = args.get("minThresholdProximalL2", 10)
sampleSizeProximalL2 = args.get("sampleSizeProximalL2", 15)
basalPredictedSegmentDecrement = args.get(
"basalPredictedSegmentDecrement", 0.0006)
stripStats = args.get("stripStats", True)
random.seed(trialNum)
#####################################################
#
# Create the sequences and objects, and make sure they share the
# same features and locations.
sequences = createObjectMachine(
machineType="sequence",
numInputBits=numInputBits,
sensorInputSize=inputSize,
externalInputSize=1024,
numCorticalColumns=1,
numFeatures=numFeatures,
numLocations=numLocations,
seed=trialNum
)
sequences.createRandomSequences(numSequences, seqLength)
objects = createObjectMachine(
machineType="simple",
numInputBits=numInputBits,
sensorInputSize=inputSize,
externalInputSize=1024,
numCorticalColumns=1,
numFeatures=numFeatures,
numLocations=numLocations,
seed=trialNum
)
# Make sure they share the same features and locations
objects.locations = sequences.locations
objects.features = sequences.features
objects.createRandomObjects(numObjects, numPoints=numPoints,
numLocations=numLocations,
numFeatures=numFeatures)
#####################################################
#
# Setup experiment and train the network
name = "combined_sequences_S%03d_O%03d_F%03d_L%03d_T%03d" % (
numSequences, numObjects, numFeatures, numLocations, trialNum
)
exp = L4TMExperiment(
name=name,
numCorticalColumns=1,
inputSize=inputSize,
numExternalInputBits=numInputBits,
externalInputSize=1024,
numInputBits=numInputBits,
seed=trialNum,
L2Overrides={"synPermProximalDec": synPermProximalDecL2,
"minThresholdProximal": minThresholdProximalL2,
"sampleSizeProximal": sampleSizeProximalL2,
"initialProximalPermanence": 0.45,
"synPermProximalDec": 0.002,
},
TMOverrides={
"basalPredictedSegmentDecrement": basalPredictedSegmentDecrement
},
L4Overrides={"initialPermanence": 0.21,
"activationThreshold": 18,
"minThreshold": 18,
"basalPredictedSegmentDecrement": basalPredictedSegmentDecrement,
},
)
printDiagnostics(exp, sequences, objects, args, verbosity=0)
# Train the network on all the sequences and then all the objects.
if figure in ["S", "6", "7"]:
trainSuperimposedSequenceObjects(exp, numRepetitions, sequences, objects)
else:
trainObjects(objects, exp, numRepetitions)
trainSequences(sequences, exp, numObjects)
##########################################################################
#
# Run inference
print "Running inference"
if figure in ["6"]:
# We have trained the system on both temporal sequences and
# objects. We test the system by randomly switching between sequences and
# objects. To replicate the graph, we want to run sequences and objects in a
# specific order
for trial,itemType in enumerate(["sequence", "object", "sequence", "object",
"sequence", "sequence", "object",
"sequence", ]):
if itemType == "sequence":
objectId = random.randint(0, numSequences-1)
inferSequence(exp, objectId, sequences, objectId+numObjects)
else:
objectId = random.randint(0, numObjects-1)
inferObject(exp, objectId, objects, objectId)
elif figure in ["7"]:
# For figure 7 we have trained the system on both temporal sequences and
# objects. We test the system by superimposing randomly chosen sequences and
# objects.
for trial in range(10):
sequenceId = random.randint(0, numSequences - 1)
objectId = random.randint(0, numObjects - 1)
inferSuperimposedSequenceObjects(exp, sequenceId=sequenceId,
objectId=objectId, sequences=sequences, objects=objects)
else:
# By default run inference on every sequence and object in order.
for objectId in objects:
inferObject(exp, objectId, objects, objectId)
for seqId in sequences:
inferSequence(exp, seqId, sequences, seqId+numObjects)
##########################################################################
#
# Debugging diagnostics
printDiagnosticsAfterTraining(exp)
##########################################################################
#
# Compute a number of overall inference statistics
print "# Sequences {} # features {} trial # {}\n".format(
numSequences, numFeatures, trialNum)
convergencePoint, sequenceAccuracyL2 = exp.averageConvergencePoint(
"L2 Representation", 30, 40, 1, numObjects)
print "L2 accuracy for sequences:", sequenceAccuracyL2
convergencePoint, objectAccuracyL2 = exp.averageConvergencePoint(
"L2 Representation", 30, 40, 1, 0, numObjects)
print "L2 accuracy for objects:", objectAccuracyL2
objectCorrectSparsityTM, _ = exp.averageSequenceAccuracy(15, 25, 0, numObjects)
print "TM accuracy for objects:", objectCorrectSparsityTM
sequenceCorrectSparsityTM, sequenceCorrectClassificationsTM = \
exp.averageSequenceAccuracy(15, 25, numObjects)
print "TM accuracy for sequences:", sequenceCorrectClassificationsTM
infStats = exp.getInferenceStats()
predictedActive = numpy.zeros(len(infStats))
predicted = numpy.zeros(len(infStats))
predictedActiveL4 = numpy.zeros(len(infStats))
predictedL4 = numpy.zeros(len(infStats))
for i,stat in enumerate(infStats):
predictedActive[i] = float(sum(stat["TM PredictedActive C0"][2:])) / len(
stat["TM PredictedActive C0"][2:])
predicted[i] = float(sum(stat["TM NextPredicted C0"][2:])) / len(
stat["TM NextPredicted C0"][2:])
predictedActiveL4[i] = float(sum(stat["L4 PredictedActive C0"])) / len(
stat["L4 PredictedActive C0"])
predictedL4[i] = float(sum(stat["L4 Predicted C0"])) / len(
stat["L4 Predicted C0"])
# Return a bunch of metrics we will use in plots
args.update({"sequences": sequences.getObjects()})
args.update({"objects": objects.getObjects()})
args.update({"convergencePoint":convergencePoint})
args.update({"objectAccuracyL2": objectAccuracyL2})
args.update({"sequenceAccuracyL2": sequenceAccuracyL2})
args.update({"sequenceCorrectSparsityTM": sequenceCorrectSparsityTM})
args.update({"sequenceCorrectClassificationsTM": sequenceCorrectClassificationsTM})
args.update({"objectCorrectSparsityTM": objectCorrectSparsityTM})
args.update({"averagePredictions": predicted.mean()})
args.update({"averagePredictedActive": predictedActive.mean()})
args.update({"averagePredictionsL4": predictedL4.mean()})
args.update({"averagePredictedActiveL4": predictedActiveL4.mean()})
if stripStats:
exp.stripStats()
args.update({"name": exp.name})
args.update({"statistics": exp.statistics})
args.update({"networkConfig": exp.config})
return args | [
"def",
"runExperiment",
"(",
"args",
")",
":",
"numObjects",
"=",
"args",
".",
"get",
"(",
"\"numObjects\"",
",",
"10",
")",
"numSequences",
"=",
"args",
".",
"get",
"(",
"\"numSequences\"",
",",
"10",
")",
"numFeatures",
"=",
"args",
".",
"get",
"(",
"\"numFeatures\"",
",",
"10",
")",
"seqLength",
"=",
"args",
".",
"get",
"(",
"\"seqLength\"",
",",
"10",
")",
"numPoints",
"=",
"args",
".",
"get",
"(",
"\"numPoints\"",
",",
"10",
")",
"trialNum",
"=",
"args",
".",
"get",
"(",
"\"trialNum\"",
",",
"42",
")",
"inputSize",
"=",
"args",
".",
"get",
"(",
"\"inputSize\"",
",",
"1024",
")",
"numLocations",
"=",
"args",
".",
"get",
"(",
"\"numLocations\"",
",",
"100000",
")",
"numInputBits",
"=",
"args",
".",
"get",
"(",
"\"inputBits\"",
",",
"20",
")",
"settlingTime",
"=",
"args",
".",
"get",
"(",
"\"settlingTime\"",
",",
"1",
")",
"numRepetitions",
"=",
"args",
".",
"get",
"(",
"\"numRepetitions\"",
",",
"5",
")",
"figure",
"=",
"args",
".",
"get",
"(",
"\"figure\"",
",",
"False",
")",
"synPermProximalDecL2",
"=",
"args",
".",
"get",
"(",
"\"synPermProximalDecL2\"",
",",
"0.001",
")",
"minThresholdProximalL2",
"=",
"args",
".",
"get",
"(",
"\"minThresholdProximalL2\"",
",",
"10",
")",
"sampleSizeProximalL2",
"=",
"args",
".",
"get",
"(",
"\"sampleSizeProximalL2\"",
",",
"15",
")",
"basalPredictedSegmentDecrement",
"=",
"args",
".",
"get",
"(",
"\"basalPredictedSegmentDecrement\"",
",",
"0.0006",
")",
"stripStats",
"=",
"args",
".",
"get",
"(",
"\"stripStats\"",
",",
"True",
")",
"random",
".",
"seed",
"(",
"trialNum",
")",
"#####################################################",
"#",
"# Create the sequences and objects, and make sure they share the",
"# same features and locations.",
"sequences",
"=",
"createObjectMachine",
"(",
"machineType",
"=",
"\"sequence\"",
",",
"numInputBits",
"=",
"numInputBits",
",",
"sensorInputSize",
"=",
"inputSize",
",",
"externalInputSize",
"=",
"1024",
",",
"numCorticalColumns",
"=",
"1",
",",
"numFeatures",
"=",
"numFeatures",
",",
"numLocations",
"=",
"numLocations",
",",
"seed",
"=",
"trialNum",
")",
"sequences",
".",
"createRandomSequences",
"(",
"numSequences",
",",
"seqLength",
")",
"objects",
"=",
"createObjectMachine",
"(",
"machineType",
"=",
"\"simple\"",
",",
"numInputBits",
"=",
"numInputBits",
",",
"sensorInputSize",
"=",
"inputSize",
",",
"externalInputSize",
"=",
"1024",
",",
"numCorticalColumns",
"=",
"1",
",",
"numFeatures",
"=",
"numFeatures",
",",
"numLocations",
"=",
"numLocations",
",",
"seed",
"=",
"trialNum",
")",
"# Make sure they share the same features and locations",
"objects",
".",
"locations",
"=",
"sequences",
".",
"locations",
"objects",
".",
"features",
"=",
"sequences",
".",
"features",
"objects",
".",
"createRandomObjects",
"(",
"numObjects",
",",
"numPoints",
"=",
"numPoints",
",",
"numLocations",
"=",
"numLocations",
",",
"numFeatures",
"=",
"numFeatures",
")",
"#####################################################",
"#",
"# Setup experiment and train the network",
"name",
"=",
"\"combined_sequences_S%03d_O%03d_F%03d_L%03d_T%03d\"",
"%",
"(",
"numSequences",
",",
"numObjects",
",",
"numFeatures",
",",
"numLocations",
",",
"trialNum",
")",
"exp",
"=",
"L4TMExperiment",
"(",
"name",
"=",
"name",
",",
"numCorticalColumns",
"=",
"1",
",",
"inputSize",
"=",
"inputSize",
",",
"numExternalInputBits",
"=",
"numInputBits",
",",
"externalInputSize",
"=",
"1024",
",",
"numInputBits",
"=",
"numInputBits",
",",
"seed",
"=",
"trialNum",
",",
"L2Overrides",
"=",
"{",
"\"synPermProximalDec\"",
":",
"synPermProximalDecL2",
",",
"\"minThresholdProximal\"",
":",
"minThresholdProximalL2",
",",
"\"sampleSizeProximal\"",
":",
"sampleSizeProximalL2",
",",
"\"initialProximalPermanence\"",
":",
"0.45",
",",
"\"synPermProximalDec\"",
":",
"0.002",
",",
"}",
",",
"TMOverrides",
"=",
"{",
"\"basalPredictedSegmentDecrement\"",
":",
"basalPredictedSegmentDecrement",
"}",
",",
"L4Overrides",
"=",
"{",
"\"initialPermanence\"",
":",
"0.21",
",",
"\"activationThreshold\"",
":",
"18",
",",
"\"minThreshold\"",
":",
"18",
",",
"\"basalPredictedSegmentDecrement\"",
":",
"basalPredictedSegmentDecrement",
",",
"}",
",",
")",
"printDiagnostics",
"(",
"exp",
",",
"sequences",
",",
"objects",
",",
"args",
",",
"verbosity",
"=",
"0",
")",
"# Train the network on all the sequences and then all the objects.",
"if",
"figure",
"in",
"[",
"\"S\"",
",",
"\"6\"",
",",
"\"7\"",
"]",
":",
"trainSuperimposedSequenceObjects",
"(",
"exp",
",",
"numRepetitions",
",",
"sequences",
",",
"objects",
")",
"else",
":",
"trainObjects",
"(",
"objects",
",",
"exp",
",",
"numRepetitions",
")",
"trainSequences",
"(",
"sequences",
",",
"exp",
",",
"numObjects",
")",
"##########################################################################",
"#",
"# Run inference",
"print",
"\"Running inference\"",
"if",
"figure",
"in",
"[",
"\"6\"",
"]",
":",
"# We have trained the system on both temporal sequences and",
"# objects. We test the system by randomly switching between sequences and",
"# objects. To replicate the graph, we want to run sequences and objects in a",
"# specific order",
"for",
"trial",
",",
"itemType",
"in",
"enumerate",
"(",
"[",
"\"sequence\"",
",",
"\"object\"",
",",
"\"sequence\"",
",",
"\"object\"",
",",
"\"sequence\"",
",",
"\"sequence\"",
",",
"\"object\"",
",",
"\"sequence\"",
",",
"]",
")",
":",
"if",
"itemType",
"==",
"\"sequence\"",
":",
"objectId",
"=",
"random",
".",
"randint",
"(",
"0",
",",
"numSequences",
"-",
"1",
")",
"inferSequence",
"(",
"exp",
",",
"objectId",
",",
"sequences",
",",
"objectId",
"+",
"numObjects",
")",
"else",
":",
"objectId",
"=",
"random",
".",
"randint",
"(",
"0",
",",
"numObjects",
"-",
"1",
")",
"inferObject",
"(",
"exp",
",",
"objectId",
",",
"objects",
",",
"objectId",
")",
"elif",
"figure",
"in",
"[",
"\"7\"",
"]",
":",
"# For figure 7 we have trained the system on both temporal sequences and",
"# objects. We test the system by superimposing randomly chosen sequences and",
"# objects.",
"for",
"trial",
"in",
"range",
"(",
"10",
")",
":",
"sequenceId",
"=",
"random",
".",
"randint",
"(",
"0",
",",
"numSequences",
"-",
"1",
")",
"objectId",
"=",
"random",
".",
"randint",
"(",
"0",
",",
"numObjects",
"-",
"1",
")",
"inferSuperimposedSequenceObjects",
"(",
"exp",
",",
"sequenceId",
"=",
"sequenceId",
",",
"objectId",
"=",
"objectId",
",",
"sequences",
"=",
"sequences",
",",
"objects",
"=",
"objects",
")",
"else",
":",
"# By default run inference on every sequence and object in order.",
"for",
"objectId",
"in",
"objects",
":",
"inferObject",
"(",
"exp",
",",
"objectId",
",",
"objects",
",",
"objectId",
")",
"for",
"seqId",
"in",
"sequences",
":",
"inferSequence",
"(",
"exp",
",",
"seqId",
",",
"sequences",
",",
"seqId",
"+",
"numObjects",
")",
"##########################################################################",
"#",
"# Debugging diagnostics",
"printDiagnosticsAfterTraining",
"(",
"exp",
")",
"##########################################################################",
"#",
"# Compute a number of overall inference statistics",
"print",
"\"# Sequences {} # features {} trial # {}\\n\"",
".",
"format",
"(",
"numSequences",
",",
"numFeatures",
",",
"trialNum",
")",
"convergencePoint",
",",
"sequenceAccuracyL2",
"=",
"exp",
".",
"averageConvergencePoint",
"(",
"\"L2 Representation\"",
",",
"30",
",",
"40",
",",
"1",
",",
"numObjects",
")",
"print",
"\"L2 accuracy for sequences:\"",
",",
"sequenceAccuracyL2",
"convergencePoint",
",",
"objectAccuracyL2",
"=",
"exp",
".",
"averageConvergencePoint",
"(",
"\"L2 Representation\"",
",",
"30",
",",
"40",
",",
"1",
",",
"0",
",",
"numObjects",
")",
"print",
"\"L2 accuracy for objects:\"",
",",
"objectAccuracyL2",
"objectCorrectSparsityTM",
",",
"_",
"=",
"exp",
".",
"averageSequenceAccuracy",
"(",
"15",
",",
"25",
",",
"0",
",",
"numObjects",
")",
"print",
"\"TM accuracy for objects:\"",
",",
"objectCorrectSparsityTM",
"sequenceCorrectSparsityTM",
",",
"sequenceCorrectClassificationsTM",
"=",
"exp",
".",
"averageSequenceAccuracy",
"(",
"15",
",",
"25",
",",
"numObjects",
")",
"print",
"\"TM accuracy for sequences:\"",
",",
"sequenceCorrectClassificationsTM",
"infStats",
"=",
"exp",
".",
"getInferenceStats",
"(",
")",
"predictedActive",
"=",
"numpy",
".",
"zeros",
"(",
"len",
"(",
"infStats",
")",
")",
"predicted",
"=",
"numpy",
".",
"zeros",
"(",
"len",
"(",
"infStats",
")",
")",
"predictedActiveL4",
"=",
"numpy",
".",
"zeros",
"(",
"len",
"(",
"infStats",
")",
")",
"predictedL4",
"=",
"numpy",
".",
"zeros",
"(",
"len",
"(",
"infStats",
")",
")",
"for",
"i",
",",
"stat",
"in",
"enumerate",
"(",
"infStats",
")",
":",
"predictedActive",
"[",
"i",
"]",
"=",
"float",
"(",
"sum",
"(",
"stat",
"[",
"\"TM PredictedActive C0\"",
"]",
"[",
"2",
":",
"]",
")",
")",
"/",
"len",
"(",
"stat",
"[",
"\"TM PredictedActive C0\"",
"]",
"[",
"2",
":",
"]",
")",
"predicted",
"[",
"i",
"]",
"=",
"float",
"(",
"sum",
"(",
"stat",
"[",
"\"TM NextPredicted C0\"",
"]",
"[",
"2",
":",
"]",
")",
")",
"/",
"len",
"(",
"stat",
"[",
"\"TM NextPredicted C0\"",
"]",
"[",
"2",
":",
"]",
")",
"predictedActiveL4",
"[",
"i",
"]",
"=",
"float",
"(",
"sum",
"(",
"stat",
"[",
"\"L4 PredictedActive C0\"",
"]",
")",
")",
"/",
"len",
"(",
"stat",
"[",
"\"L4 PredictedActive C0\"",
"]",
")",
"predictedL4",
"[",
"i",
"]",
"=",
"float",
"(",
"sum",
"(",
"stat",
"[",
"\"L4 Predicted C0\"",
"]",
")",
")",
"/",
"len",
"(",
"stat",
"[",
"\"L4 Predicted C0\"",
"]",
")",
"# Return a bunch of metrics we will use in plots",
"args",
".",
"update",
"(",
"{",
"\"sequences\"",
":",
"sequences",
".",
"getObjects",
"(",
")",
"}",
")",
"args",
".",
"update",
"(",
"{",
"\"objects\"",
":",
"objects",
".",
"getObjects",
"(",
")",
"}",
")",
"args",
".",
"update",
"(",
"{",
"\"convergencePoint\"",
":",
"convergencePoint",
"}",
")",
"args",
".",
"update",
"(",
"{",
"\"objectAccuracyL2\"",
":",
"objectAccuracyL2",
"}",
")",
"args",
".",
"update",
"(",
"{",
"\"sequenceAccuracyL2\"",
":",
"sequenceAccuracyL2",
"}",
")",
"args",
".",
"update",
"(",
"{",
"\"sequenceCorrectSparsityTM\"",
":",
"sequenceCorrectSparsityTM",
"}",
")",
"args",
".",
"update",
"(",
"{",
"\"sequenceCorrectClassificationsTM\"",
":",
"sequenceCorrectClassificationsTM",
"}",
")",
"args",
".",
"update",
"(",
"{",
"\"objectCorrectSparsityTM\"",
":",
"objectCorrectSparsityTM",
"}",
")",
"args",
".",
"update",
"(",
"{",
"\"averagePredictions\"",
":",
"predicted",
".",
"mean",
"(",
")",
"}",
")",
"args",
".",
"update",
"(",
"{",
"\"averagePredictedActive\"",
":",
"predictedActive",
".",
"mean",
"(",
")",
"}",
")",
"args",
".",
"update",
"(",
"{",
"\"averagePredictionsL4\"",
":",
"predictedL4",
".",
"mean",
"(",
")",
"}",
")",
"args",
".",
"update",
"(",
"{",
"\"averagePredictedActiveL4\"",
":",
"predictedActiveL4",
".",
"mean",
"(",
")",
"}",
")",
"if",
"stripStats",
":",
"exp",
".",
"stripStats",
"(",
")",
"args",
".",
"update",
"(",
"{",
"\"name\"",
":",
"exp",
".",
"name",
"}",
")",
"args",
".",
"update",
"(",
"{",
"\"statistics\"",
":",
"exp",
".",
"statistics",
"}",
")",
"args",
".",
"update",
"(",
"{",
"\"networkConfig\"",
":",
"exp",
".",
"config",
"}",
")",
"return",
"args"
] | 37.019048 | 0.014907 | [
"def runExperiment(args):\n",
" \"\"\"\n",
" Runs the experiment. The code is organized around what we need for specific\n",
" figures in the paper.\n",
"\n",
" args is a dict representing the various parameters. We do it this way to\n",
" support multiprocessing. The function returns the args dict updated with a\n",
" number of additional keys containing performance metrics.\n",
" \"\"\"\n",
" numObjects = args.get(\"numObjects\", 10)\n",
" numSequences = args.get(\"numSequences\", 10)\n",
" numFeatures = args.get(\"numFeatures\", 10)\n",
" seqLength = args.get(\"seqLength\", 10)\n",
" numPoints = args.get(\"numPoints\", 10)\n",
" trialNum = args.get(\"trialNum\", 42)\n",
" inputSize = args.get(\"inputSize\", 1024)\n",
" numLocations = args.get(\"numLocations\", 100000)\n",
" numInputBits = args.get(\"inputBits\", 20)\n",
" settlingTime = args.get(\"settlingTime\", 1)\n",
" numRepetitions = args.get(\"numRepetitions\", 5)\n",
" figure = args.get(\"figure\", False)\n",
" synPermProximalDecL2 = args.get(\"synPermProximalDecL2\", 0.001)\n",
" minThresholdProximalL2 = args.get(\"minThresholdProximalL2\", 10)\n",
" sampleSizeProximalL2 = args.get(\"sampleSizeProximalL2\", 15)\n",
" basalPredictedSegmentDecrement = args.get(\n",
" \"basalPredictedSegmentDecrement\", 0.0006)\n",
" stripStats = args.get(\"stripStats\", True)\n",
"\n",
"\n",
" random.seed(trialNum)\n",
"\n",
" #####################################################\n",
" #\n",
" # Create the sequences and objects, and make sure they share the\n",
" # same features and locations.\n",
"\n",
" sequences = createObjectMachine(\n",
" machineType=\"sequence\",\n",
" numInputBits=numInputBits,\n",
" sensorInputSize=inputSize,\n",
" externalInputSize=1024,\n",
" numCorticalColumns=1,\n",
" numFeatures=numFeatures,\n",
" numLocations=numLocations,\n",
" seed=trialNum\n",
" )\n",
" sequences.createRandomSequences(numSequences, seqLength)\n",
"\n",
" objects = createObjectMachine(\n",
" machineType=\"simple\",\n",
" numInputBits=numInputBits,\n",
" sensorInputSize=inputSize,\n",
" externalInputSize=1024,\n",
" numCorticalColumns=1,\n",
" numFeatures=numFeatures,\n",
" numLocations=numLocations,\n",
" seed=trialNum\n",
" )\n",
"\n",
" # Make sure they share the same features and locations\n",
" objects.locations = sequences.locations\n",
" objects.features = sequences.features\n",
"\n",
" objects.createRandomObjects(numObjects, numPoints=numPoints,\n",
" numLocations=numLocations,\n",
" numFeatures=numFeatures)\n",
"\n",
" #####################################################\n",
" #\n",
" # Setup experiment and train the network\n",
" name = \"combined_sequences_S%03d_O%03d_F%03d_L%03d_T%03d\" % (\n",
" numSequences, numObjects, numFeatures, numLocations, trialNum\n",
" )\n",
" exp = L4TMExperiment(\n",
" name=name,\n",
" numCorticalColumns=1,\n",
" inputSize=inputSize,\n",
" numExternalInputBits=numInputBits,\n",
" externalInputSize=1024,\n",
" numInputBits=numInputBits,\n",
" seed=trialNum,\n",
" L2Overrides={\"synPermProximalDec\": synPermProximalDecL2,\n",
" \"minThresholdProximal\": minThresholdProximalL2,\n",
" \"sampleSizeProximal\": sampleSizeProximalL2,\n",
" \"initialProximalPermanence\": 0.45,\n",
" \"synPermProximalDec\": 0.002,\n",
" },\n",
" TMOverrides={\n",
" \"basalPredictedSegmentDecrement\": basalPredictedSegmentDecrement\n",
" },\n",
" L4Overrides={\"initialPermanence\": 0.21,\n",
" \"activationThreshold\": 18,\n",
" \"minThreshold\": 18,\n",
" \"basalPredictedSegmentDecrement\": basalPredictedSegmentDecrement,\n",
" },\n",
" )\n",
"\n",
" printDiagnostics(exp, sequences, objects, args, verbosity=0)\n",
"\n",
" # Train the network on all the sequences and then all the objects.\n",
" if figure in [\"S\", \"6\", \"7\"]:\n",
" trainSuperimposedSequenceObjects(exp, numRepetitions, sequences, objects)\n",
" else:\n",
" trainObjects(objects, exp, numRepetitions)\n",
" trainSequences(sequences, exp, numObjects)\n",
"\n",
" ##########################################################################\n",
" #\n",
" # Run inference\n",
"\n",
" print \"Running inference\"\n",
" if figure in [\"6\"]:\n",
" # We have trained the system on both temporal sequences and\n",
" # objects. We test the system by randomly switching between sequences and\n",
" # objects. To replicate the graph, we want to run sequences and objects in a\n",
" # specific order\n",
" for trial,itemType in enumerate([\"sequence\", \"object\", \"sequence\", \"object\",\n",
" \"sequence\", \"sequence\", \"object\",\n",
" \"sequence\", ]):\n",
" if itemType == \"sequence\":\n",
" objectId = random.randint(0, numSequences-1)\n",
" inferSequence(exp, objectId, sequences, objectId+numObjects)\n",
"\n",
" else:\n",
" objectId = random.randint(0, numObjects-1)\n",
" inferObject(exp, objectId, objects, objectId)\n",
"\n",
"\n",
" elif figure in [\"7\"]:\n",
" # For figure 7 we have trained the system on both temporal sequences and\n",
" # objects. We test the system by superimposing randomly chosen sequences and\n",
" # objects.\n",
" for trial in range(10):\n",
" sequenceId = random.randint(0, numSequences - 1)\n",
" objectId = random.randint(0, numObjects - 1)\n",
" inferSuperimposedSequenceObjects(exp, sequenceId=sequenceId,\n",
" objectId=objectId, sequences=sequences, objects=objects)\n",
"\n",
" else:\n",
" # By default run inference on every sequence and object in order.\n",
" for objectId in objects:\n",
" inferObject(exp, objectId, objects, objectId)\n",
" for seqId in sequences:\n",
" inferSequence(exp, seqId, sequences, seqId+numObjects)\n",
"\n",
"\n",
" ##########################################################################\n",
" #\n",
" # Debugging diagnostics\n",
" printDiagnosticsAfterTraining(exp)\n",
"\n",
" ##########################################################################\n",
" #\n",
" # Compute a number of overall inference statistics\n",
"\n",
" print \"# Sequences {} # features {} trial # {}\\n\".format(\n",
" numSequences, numFeatures, trialNum)\n",
"\n",
" convergencePoint, sequenceAccuracyL2 = exp.averageConvergencePoint(\n",
" \"L2 Representation\", 30, 40, 1, numObjects)\n",
" print \"L2 accuracy for sequences:\", sequenceAccuracyL2\n",
"\n",
" convergencePoint, objectAccuracyL2 = exp.averageConvergencePoint(\n",
" \"L2 Representation\", 30, 40, 1, 0, numObjects)\n",
" print \"L2 accuracy for objects:\", objectAccuracyL2\n",
"\n",
" objectCorrectSparsityTM, _ = exp.averageSequenceAccuracy(15, 25, 0, numObjects)\n",
" print \"TM accuracy for objects:\", objectCorrectSparsityTM\n",
"\n",
" sequenceCorrectSparsityTM, sequenceCorrectClassificationsTM = \\\n",
" exp.averageSequenceAccuracy(15, 25, numObjects)\n",
" print \"TM accuracy for sequences:\", sequenceCorrectClassificationsTM\n",
"\n",
" infStats = exp.getInferenceStats()\n",
" predictedActive = numpy.zeros(len(infStats))\n",
" predicted = numpy.zeros(len(infStats))\n",
" predictedActiveL4 = numpy.zeros(len(infStats))\n",
" predictedL4 = numpy.zeros(len(infStats))\n",
" for i,stat in enumerate(infStats):\n",
" predictedActive[i] = float(sum(stat[\"TM PredictedActive C0\"][2:])) / len(\n",
" stat[\"TM PredictedActive C0\"][2:])\n",
" predicted[i] = float(sum(stat[\"TM NextPredicted C0\"][2:])) / len(\n",
" stat[\"TM NextPredicted C0\"][2:])\n",
"\n",
" predictedActiveL4[i] = float(sum(stat[\"L4 PredictedActive C0\"])) / len(\n",
" stat[\"L4 PredictedActive C0\"])\n",
" predictedL4[i] = float(sum(stat[\"L4 Predicted C0\"])) / len(\n",
" stat[\"L4 Predicted C0\"])\n",
"\n",
" # Return a bunch of metrics we will use in plots\n",
" args.update({\"sequences\": sequences.getObjects()})\n",
" args.update({\"objects\": objects.getObjects()})\n",
" args.update({\"convergencePoint\":convergencePoint})\n",
" args.update({\"objectAccuracyL2\": objectAccuracyL2})\n",
" args.update({\"sequenceAccuracyL2\": sequenceAccuracyL2})\n",
" args.update({\"sequenceCorrectSparsityTM\": sequenceCorrectSparsityTM})\n",
" args.update({\"sequenceCorrectClassificationsTM\": sequenceCorrectClassificationsTM})\n",
" args.update({\"objectCorrectSparsityTM\": objectCorrectSparsityTM})\n",
" args.update({\"averagePredictions\": predicted.mean()})\n",
" args.update({\"averagePredictedActive\": predictedActive.mean()})\n",
" args.update({\"averagePredictionsL4\": predictedL4.mean()})\n",
" args.update({\"averagePredictedActiveL4\": predictedActiveL4.mean()})\n",
"\n",
" if stripStats:\n",
" exp.stripStats()\n",
" args.update({\"name\": exp.name})\n",
" args.update({\"statistics\": exp.statistics})\n",
" args.update({\"networkConfig\": exp.config})\n",
"\n",
" return args"
] | [
0,
0.16666666666666666,
0,
0,
0,
0,
0,
0,
0,
0.023809523809523808,
0.021739130434782608,
0.022727272727272728,
0.025,
0.025,
0.02631578947368421,
0.023809523809523808,
0.02,
0.023255813953488372,
0.022222222222222223,
0.02040816326530612,
0.02702702702702703,
0.015384615384615385,
0.015151515151515152,
0.016129032258064516,
0.022222222222222223,
0,
0.022727272727272728,
0,
0,
0.08333333333333333,
0,
0.017857142857142856,
0.25,
0.014925373134328358,
0.030303030303030304,
0,
0.02857142857142857,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.01694915254237288,
0,
0.030303030303030304,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.017543859649122806,
0.023809523809523808,
0.025,
0,
0.015873015873015872,
0.015873015873015872,
0.01639344262295082,
0,
0.017857142857142856,
0.25,
0.023255813953488372,
0.015625,
0,
0,
0.041666666666666664,
0,
0,
0,
0,
0,
0,
0,
0,
0.01694915254237288,
0.01818181818181818,
0.021739130434782608,
0.025,
0.14285714285714285,
0,
0,
0,
0,
0.02631578947368421,
0.03225806451612903,
0.012987012987012988,
0.14285714285714285,
0,
0,
0.015873015873015872,
0,
0.014492753623188406,
0.03125,
0,
0.125,
0,
0,
0,
0.012987012987012988,
0.25,
0.05555555555555555,
0,
0.03571428571428571,
0.045454545454545456,
0,
0,
0.012345679012345678,
0,
0.024691358024691357,
0,
0,
0.030303030303030304,
0,
0,
0,
0.08333333333333333,
0,
0,
0,
0,
0.08333333333333333,
0,
0.012345679012345678,
0,
0,
0.01818181818181818,
0.0196078431372549,
0.014925373134328358,
0.024390243902439025,
0,
0.125,
0,
0,
0.019230769230769232,
0,
0.01639344262295082,
0,
0,
0.025974025974025976,
0.25,
0.038461538461538464,
0.02702702702702703,
0,
0.012987012987012988,
0.25,
0.018867924528301886,
0,
0.016666666666666666,
0,
0,
0.014285714285714285,
0,
0.017543859649122806,
0,
0.014705882352941176,
0,
0.018867924528301886,
0,
0.024390243902439025,
0.016666666666666666,
0,
0.015151515151515152,
0.019230769230769232,
0.014084507042253521,
0,
0.02702702702702703,
0.02127659574468085,
0.024390243902439025,
0.02040816326530612,
0.023255813953488372,
0.05405405405405406,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.0196078431372549,
0.018867924528301886,
0.02040816326530612,
0.03773584905660377,
0.018518518518518517,
0.017241379310344827,
0.013888888888888888,
0.023255813953488372,
0.014705882352941176,
0.017857142857142856,
0.015151515151515152,
0.016666666666666666,
0.014285714285714285,
0,
0.058823529411764705,
0,
0.029411764705882353,
0.021739130434782608,
0.022222222222222223,
0,
0.15384615384615385
] | 210 | 0.021352 |
def get_items(self):
"""Get a list of objects describing the content of `self`.
:return: the list of objects.
:returntype: `list` of `MucItemBase` (`MucItem` and/or `MucStatus`)
"""
if not self.xmlnode.children:
return []
ret=[]
n=self.xmlnode.children
while n:
ns=n.ns()
if ns and ns.getContent()!=self.ns:
pass
elif n.name=="item":
ret.append(MucItem(n))
elif n.name=="status":
ret.append(MucStatus(n))
# FIXME: alt,decline,invite,password
n=n.next
return ret | [
"def",
"get_items",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"xmlnode",
".",
"children",
":",
"return",
"[",
"]",
"ret",
"=",
"[",
"]",
"n",
"=",
"self",
".",
"xmlnode",
".",
"children",
"while",
"n",
":",
"ns",
"=",
"n",
".",
"ns",
"(",
")",
"if",
"ns",
"and",
"ns",
".",
"getContent",
"(",
")",
"!=",
"self",
".",
"ns",
":",
"pass",
"elif",
"n",
".",
"name",
"==",
"\"item\"",
":",
"ret",
".",
"append",
"(",
"MucItem",
"(",
"n",
")",
")",
"elif",
"n",
".",
"name",
"==",
"\"status\"",
":",
"ret",
".",
"append",
"(",
"MucStatus",
"(",
"n",
")",
")",
"# FIXME: alt,decline,invite,password",
"n",
"=",
"n",
".",
"next",
"return",
"ret"
] | 30.761905 | 0.013514 | [
"def get_items(self):\n",
" \"\"\"Get a list of objects describing the content of `self`.\n",
"\n",
" :return: the list of objects.\n",
" :returntype: `list` of `MucItemBase` (`MucItem` and/or `MucStatus`)\n",
" \"\"\"\n",
" if not self.xmlnode.children:\n",
" return []\n",
" ret=[]\n",
" n=self.xmlnode.children\n",
" while n:\n",
" ns=n.ns()\n",
" if ns and ns.getContent()!=self.ns:\n",
" pass\n",
" elif n.name==\"item\":\n",
" ret.append(MucItem(n))\n",
" elif n.name==\"status\":\n",
" ret.append(MucStatus(n))\n",
" # FIXME: alt,decline,invite,password\n",
" n=n.next\n",
" return ret"
] | [
0,
0.014925373134328358,
0,
0,
0,
0,
0,
0,
0.06666666666666667,
0.03125,
0,
0.045454545454545456,
0.020833333333333332,
0,
0.030303030303030304,
0,
0.02857142857142857,
0,
0,
0.047619047619047616,
0.05555555555555555
] | 21 | 0.016247 |
def SInt(value, width):
"""
Convert a bitstring `value` of `width` bits to a signed integer
representation.
:param value: The value to convert.
:type value: int or long or BitVec
:param int width: The width of the bitstring to consider
:return: The converted value
:rtype int or long or BitVec
"""
return Operators.ITEBV(width, Bit(value, width - 1) == 1,
GetNBits(value, width) - 2**width,
GetNBits(value, width)) | [
"def",
"SInt",
"(",
"value",
",",
"width",
")",
":",
"return",
"Operators",
".",
"ITEBV",
"(",
"width",
",",
"Bit",
"(",
"value",
",",
"width",
"-",
"1",
")",
"==",
"1",
",",
"GetNBits",
"(",
"value",
",",
"width",
")",
"-",
"2",
"**",
"width",
",",
"GetNBits",
"(",
"value",
",",
"width",
")",
")"
] | 35.428571 | 0.001965 | [
"def SInt(value, width):\n",
" \"\"\"\n",
" Convert a bitstring `value` of `width` bits to a signed integer\n",
" representation.\n",
"\n",
" :param value: The value to convert.\n",
" :type value: int or long or BitVec\n",
" :param int width: The width of the bitstring to consider\n",
" :return: The converted value\n",
" :rtype int or long or BitVec\n",
" \"\"\"\n",
" return Operators.ITEBV(width, Bit(value, width - 1) == 1,\n",
" GetNBits(value, width) - 2**width,\n",
" GetNBits(value, width))"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.02
] | 14 | 0.001429 |
Subsets and Splits