Unnamed: 0
int64 0
10k
| repository_name
stringlengths 7
54
| func_path_in_repository
stringlengths 5
223
| func_name
stringlengths 1
134
| whole_func_string
stringlengths 100
30.3k
| language
stringclasses 1
value | func_code_string
stringlengths 100
30.3k
| func_code_tokens
stringlengths 138
33.2k
| func_documentation_string
stringlengths 1
15k
| func_documentation_tokens
stringlengths 5
5.14k
| split_name
stringclasses 1
value | func_code_url
stringlengths 91
315
|
---|---|---|---|---|---|---|---|---|---|---|---|
400 | pyviz/holoviews | holoviews/plotting/bokeh/util.py | pad_width | def pad_width(model, table_padding=0.85, tabs_padding=1.2):
"""
Computes the width of a model and sets up appropriate padding
for Tabs and DataTable types.
"""
if isinstance(model, Row):
vals = [pad_width(child) for child in model.children]
width = np.max([v for v in vals if v is not None])
elif isinstance(model, Column):
vals = [pad_width(child) for child in model.children]
width = np.sum([v for v in vals if v is not None])
elif isinstance(model, Tabs):
vals = [pad_width(t) for t in model.tabs]
width = np.max([v for v in vals if v is not None])
for model in model.tabs:
model.width = width
width = int(tabs_padding*width)
elif isinstance(model, DataTable):
width = model.width
model.width = int(table_padding*width)
elif isinstance(model, (WidgetBox, Div)):
width = model.width
elif model:
width = model.plot_width
else:
width = 0
return width | python | def pad_width(model, table_padding=0.85, tabs_padding=1.2):
"""
Computes the width of a model and sets up appropriate padding
for Tabs and DataTable types.
"""
if isinstance(model, Row):
vals = [pad_width(child) for child in model.children]
width = np.max([v for v in vals if v is not None])
elif isinstance(model, Column):
vals = [pad_width(child) for child in model.children]
width = np.sum([v for v in vals if v is not None])
elif isinstance(model, Tabs):
vals = [pad_width(t) for t in model.tabs]
width = np.max([v for v in vals if v is not None])
for model in model.tabs:
model.width = width
width = int(tabs_padding*width)
elif isinstance(model, DataTable):
width = model.width
model.width = int(table_padding*width)
elif isinstance(model, (WidgetBox, Div)):
width = model.width
elif model:
width = model.plot_width
else:
width = 0
return width | ['def', 'pad_width', '(', 'model', ',', 'table_padding', '=', '0.85', ',', 'tabs_padding', '=', '1.2', ')', ':', 'if', 'isinstance', '(', 'model', ',', 'Row', ')', ':', 'vals', '=', '[', 'pad_width', '(', 'child', ')', 'for', 'child', 'in', 'model', '.', 'children', ']', 'width', '=', 'np', '.', 'max', '(', '[', 'v', 'for', 'v', 'in', 'vals', 'if', 'v', 'is', 'not', 'None', ']', ')', 'elif', 'isinstance', '(', 'model', ',', 'Column', ')', ':', 'vals', '=', '[', 'pad_width', '(', 'child', ')', 'for', 'child', 'in', 'model', '.', 'children', ']', 'width', '=', 'np', '.', 'sum', '(', '[', 'v', 'for', 'v', 'in', 'vals', 'if', 'v', 'is', 'not', 'None', ']', ')', 'elif', 'isinstance', '(', 'model', ',', 'Tabs', ')', ':', 'vals', '=', '[', 'pad_width', '(', 't', ')', 'for', 't', 'in', 'model', '.', 'tabs', ']', 'width', '=', 'np', '.', 'max', '(', '[', 'v', 'for', 'v', 'in', 'vals', 'if', 'v', 'is', 'not', 'None', ']', ')', 'for', 'model', 'in', 'model', '.', 'tabs', ':', 'model', '.', 'width', '=', 'width', 'width', '=', 'int', '(', 'tabs_padding', '*', 'width', ')', 'elif', 'isinstance', '(', 'model', ',', 'DataTable', ')', ':', 'width', '=', 'model', '.', 'width', 'model', '.', 'width', '=', 'int', '(', 'table_padding', '*', 'width', ')', 'elif', 'isinstance', '(', 'model', ',', '(', 'WidgetBox', ',', 'Div', ')', ')', ':', 'width', '=', 'model', '.', 'width', 'elif', 'model', ':', 'width', '=', 'model', '.', 'plot_width', 'else', ':', 'width', '=', '0', 'return', 'width'] | Computes the width of a model and sets up appropriate padding
for Tabs and DataTable types. | ['Computes', 'the', 'width', 'of', 'a', 'model', 'and', 'sets', 'up', 'appropriate', 'padding', 'for', 'Tabs', 'and', 'DataTable', 'types', '.'] | train | https://github.com/pyviz/holoviews/blob/ae0dd2f3de448b0ca5e9065aabd6ef8d84c7e655/holoviews/plotting/bokeh/util.py#L468-L494 |
401 | Esri/ArcREST | src/arcrest/hostedservice/service.py | Services.services | def services(self):
""" returns all the service objects in the admin service's page """
self._services = []
params = {"f": "json"}
if not self._url.endswith('/services'):
uURL = self._url + "/services"
else:
uURL = self._url
res = self._get(url=uURL, param_dict=params,
securityHandler=self._securityHandler,
proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
for k, v in res.items():
if k == "foldersDetail":
for item in v:
if 'isDefault' in item and item['isDefault'] == False:
fURL = self._url + "/services/" + item['folderName']
resFolder = self._get(url=fURL, param_dict=params,
securityHandler=self._securityHandler,
proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
for k1, v1 in resFolder.items():
if k1 == "services":
self._checkservice(k1,v1,fURL)
elif k == "services":
self._checkservice(k,v,uURL)
return self._services | python | def services(self):
""" returns all the service objects in the admin service's page """
self._services = []
params = {"f": "json"}
if not self._url.endswith('/services'):
uURL = self._url + "/services"
else:
uURL = self._url
res = self._get(url=uURL, param_dict=params,
securityHandler=self._securityHandler,
proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
for k, v in res.items():
if k == "foldersDetail":
for item in v:
if 'isDefault' in item and item['isDefault'] == False:
fURL = self._url + "/services/" + item['folderName']
resFolder = self._get(url=fURL, param_dict=params,
securityHandler=self._securityHandler,
proxy_port=self._proxy_port,
proxy_url=self._proxy_url)
for k1, v1 in resFolder.items():
if k1 == "services":
self._checkservice(k1,v1,fURL)
elif k == "services":
self._checkservice(k,v,uURL)
return self._services | ['def', 'services', '(', 'self', ')', ':', 'self', '.', '_services', '=', '[', ']', 'params', '=', '{', '"f"', ':', '"json"', '}', 'if', 'not', 'self', '.', '_url', '.', 'endswith', '(', "'/services'", ')', ':', 'uURL', '=', 'self', '.', '_url', '+', '"/services"', 'else', ':', 'uURL', '=', 'self', '.', '_url', 'res', '=', 'self', '.', '_get', '(', 'url', '=', 'uURL', ',', 'param_dict', '=', 'params', ',', 'securityHandler', '=', 'self', '.', '_securityHandler', ',', 'proxy_port', '=', 'self', '.', '_proxy_port', ',', 'proxy_url', '=', 'self', '.', '_proxy_url', ')', 'for', 'k', ',', 'v', 'in', 'res', '.', 'items', '(', ')', ':', 'if', 'k', '==', '"foldersDetail"', ':', 'for', 'item', 'in', 'v', ':', 'if', "'isDefault'", 'in', 'item', 'and', 'item', '[', "'isDefault'", ']', '==', 'False', ':', 'fURL', '=', 'self', '.', '_url', '+', '"/services/"', '+', 'item', '[', "'folderName'", ']', 'resFolder', '=', 'self', '.', '_get', '(', 'url', '=', 'fURL', ',', 'param_dict', '=', 'params', ',', 'securityHandler', '=', 'self', '.', '_securityHandler', ',', 'proxy_port', '=', 'self', '.', '_proxy_port', ',', 'proxy_url', '=', 'self', '.', '_proxy_url', ')', 'for', 'k1', ',', 'v1', 'in', 'resFolder', '.', 'items', '(', ')', ':', 'if', 'k1', '==', '"services"', ':', 'self', '.', '_checkservice', '(', 'k1', ',', 'v1', ',', 'fURL', ')', 'elif', 'k', '==', '"services"', ':', 'self', '.', '_checkservice', '(', 'k', ',', 'v', ',', 'uURL', ')', 'return', 'self', '.', '_services'] | returns all the service objects in the admin service's page | ['returns', 'all', 'the', 'service', 'objects', 'in', 'the', 'admin', 'service', 's', 'page'] | train | https://github.com/Esri/ArcREST/blob/ab240fde2b0200f61d4a5f6df033516e53f2f416/src/arcrest/hostedservice/service.py#L144-L170 |
402 | niklasf/python-chess | chess/polyglot.py | MemoryMappedReader.find | def find(self, board: Union[chess.Board, int], *, minimum_weight: int = 1, exclude_moves: Container[chess.Move] = ()) -> Entry:
"""
Finds the main entry for the given position or Zobrist hash.
The main entry is the (first) entry with the highest weight.
By default, entries with weight ``0`` are excluded. This is a common
way to delete entries from an opening book without compacting it. Pass
*minimum_weight* ``0`` to select all entries.
:raises: :exc:`IndexError` if no entries are found. Use
:func:`~chess.polyglot.MemoryMappedReader.get()` if you prefer to
get ``None`` instead of an exception.
"""
try:
return max(self.find_all(board, minimum_weight=minimum_weight, exclude_moves=exclude_moves), key=lambda entry: entry.weight)
except ValueError:
raise IndexError() | python | def find(self, board: Union[chess.Board, int], *, minimum_weight: int = 1, exclude_moves: Container[chess.Move] = ()) -> Entry:
"""
Finds the main entry for the given position or Zobrist hash.
The main entry is the (first) entry with the highest weight.
By default, entries with weight ``0`` are excluded. This is a common
way to delete entries from an opening book without compacting it. Pass
*minimum_weight* ``0`` to select all entries.
:raises: :exc:`IndexError` if no entries are found. Use
:func:`~chess.polyglot.MemoryMappedReader.get()` if you prefer to
get ``None`` instead of an exception.
"""
try:
return max(self.find_all(board, minimum_weight=minimum_weight, exclude_moves=exclude_moves), key=lambda entry: entry.weight)
except ValueError:
raise IndexError() | ['def', 'find', '(', 'self', ',', 'board', ':', 'Union', '[', 'chess', '.', 'Board', ',', 'int', ']', ',', '*', ',', 'minimum_weight', ':', 'int', '=', '1', ',', 'exclude_moves', ':', 'Container', '[', 'chess', '.', 'Move', ']', '=', '(', ')', ')', '->', 'Entry', ':', 'try', ':', 'return', 'max', '(', 'self', '.', 'find_all', '(', 'board', ',', 'minimum_weight', '=', 'minimum_weight', ',', 'exclude_moves', '=', 'exclude_moves', ')', ',', 'key', '=', 'lambda', 'entry', ':', 'entry', '.', 'weight', ')', 'except', 'ValueError', ':', 'raise', 'IndexError', '(', ')'] | Finds the main entry for the given position or Zobrist hash.
The main entry is the (first) entry with the highest weight.
By default, entries with weight ``0`` are excluded. This is a common
way to delete entries from an opening book without compacting it. Pass
*minimum_weight* ``0`` to select all entries.
:raises: :exc:`IndexError` if no entries are found. Use
:func:`~chess.polyglot.MemoryMappedReader.get()` if you prefer to
get ``None`` instead of an exception. | ['Finds', 'the', 'main', 'entry', 'for', 'the', 'given', 'position', 'or', 'Zobrist', 'hash', '.'] | train | https://github.com/niklasf/python-chess/blob/d91f986ca3e046b300a0d7d9ee2a13b07610fe1a/chess/polyglot.py#L421-L438 |
403 | saltstack/salt | salt/proxy/esxcluster.py | find_credentials | def find_credentials():
'''
Cycle through all the possible credentials and return the first one that
works.
'''
# if the username and password were already found don't fo though the
# connection process again
if 'username' in DETAILS and 'password' in DETAILS:
return DETAILS['username'], DETAILS['password']
passwords = DETAILS['passwords']
for password in passwords:
DETAILS['password'] = password
if not __salt__['vsphere.test_vcenter_connection']():
# We are unable to authenticate
continue
# If we have data returned from above, we've successfully authenticated.
return DETAILS['username'], password
# We've reached the end of the list without successfully authenticating.
raise salt.exceptions.VMwareConnectionError('Cannot complete login due to '
'incorrect credentials.') | python | def find_credentials():
'''
Cycle through all the possible credentials and return the first one that
works.
'''
# if the username and password were already found don't fo though the
# connection process again
if 'username' in DETAILS and 'password' in DETAILS:
return DETAILS['username'], DETAILS['password']
passwords = DETAILS['passwords']
for password in passwords:
DETAILS['password'] = password
if not __salt__['vsphere.test_vcenter_connection']():
# We are unable to authenticate
continue
# If we have data returned from above, we've successfully authenticated.
return DETAILS['username'], password
# We've reached the end of the list without successfully authenticating.
raise salt.exceptions.VMwareConnectionError('Cannot complete login due to '
'incorrect credentials.') | ['def', 'find_credentials', '(', ')', ':', "# if the username and password were already found don't fo though the", '# connection process again', 'if', "'username'", 'in', 'DETAILS', 'and', "'password'", 'in', 'DETAILS', ':', 'return', 'DETAILS', '[', "'username'", ']', ',', 'DETAILS', '[', "'password'", ']', 'passwords', '=', 'DETAILS', '[', "'passwords'", ']', 'for', 'password', 'in', 'passwords', ':', 'DETAILS', '[', "'password'", ']', '=', 'password', 'if', 'not', '__salt__', '[', "'vsphere.test_vcenter_connection'", ']', '(', ')', ':', '# We are unable to authenticate', 'continue', "# If we have data returned from above, we've successfully authenticated.", 'return', 'DETAILS', '[', "'username'", ']', ',', 'password', "# We've reached the end of the list without successfully authenticating.", 'raise', 'salt', '.', 'exceptions', '.', 'VMwareConnectionError', '(', "'Cannot complete login due to '", "'incorrect credentials.'", ')'] | Cycle through all the possible credentials and return the first one that
works. | ['Cycle', 'through', 'all', 'the', 'possible', 'credentials', 'and', 'return', 'the', 'first', 'one', 'that', 'works', '.'] | train | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/proxy/esxcluster.py#L281-L302 |
404 | twilio/twilio-python | twilio/rest/taskrouter/v1/workspace/task_queue/__init__.py | TaskQueueContext.cumulative_statistics | def cumulative_statistics(self):
"""
Access the cumulative_statistics
:returns: twilio.rest.taskrouter.v1.workspace.task_queue.task_queue_cumulative_statistics.TaskQueueCumulativeStatisticsList
:rtype: twilio.rest.taskrouter.v1.workspace.task_queue.task_queue_cumulative_statistics.TaskQueueCumulativeStatisticsList
"""
if self._cumulative_statistics is None:
self._cumulative_statistics = TaskQueueCumulativeStatisticsList(
self._version,
workspace_sid=self._solution['workspace_sid'],
task_queue_sid=self._solution['sid'],
)
return self._cumulative_statistics | python | def cumulative_statistics(self):
"""
Access the cumulative_statistics
:returns: twilio.rest.taskrouter.v1.workspace.task_queue.task_queue_cumulative_statistics.TaskQueueCumulativeStatisticsList
:rtype: twilio.rest.taskrouter.v1.workspace.task_queue.task_queue_cumulative_statistics.TaskQueueCumulativeStatisticsList
"""
if self._cumulative_statistics is None:
self._cumulative_statistics = TaskQueueCumulativeStatisticsList(
self._version,
workspace_sid=self._solution['workspace_sid'],
task_queue_sid=self._solution['sid'],
)
return self._cumulative_statistics | ['def', 'cumulative_statistics', '(', 'self', ')', ':', 'if', 'self', '.', '_cumulative_statistics', 'is', 'None', ':', 'self', '.', '_cumulative_statistics', '=', 'TaskQueueCumulativeStatisticsList', '(', 'self', '.', '_version', ',', 'workspace_sid', '=', 'self', '.', '_solution', '[', "'workspace_sid'", ']', ',', 'task_queue_sid', '=', 'self', '.', '_solution', '[', "'sid'", ']', ',', ')', 'return', 'self', '.', '_cumulative_statistics'] | Access the cumulative_statistics
:returns: twilio.rest.taskrouter.v1.workspace.task_queue.task_queue_cumulative_statistics.TaskQueueCumulativeStatisticsList
:rtype: twilio.rest.taskrouter.v1.workspace.task_queue.task_queue_cumulative_statistics.TaskQueueCumulativeStatisticsList | ['Access', 'the', 'cumulative_statistics'] | train | https://github.com/twilio/twilio-python/blob/c867895f55dcc29f522e6e8b8868d0d18483132f/twilio/rest/taskrouter/v1/workspace/task_queue/__init__.py#L406-L419 |
405 | mkoura/dump2polarion | dump2polarion/utils.py | get_unicode_str | def get_unicode_str(obj):
"""Makes sure obj is a unicode string."""
if isinstance(obj, six.text_type):
return obj
if isinstance(obj, six.binary_type):
return obj.decode("utf-8", errors="ignore")
return six.text_type(obj) | python | def get_unicode_str(obj):
"""Makes sure obj is a unicode string."""
if isinstance(obj, six.text_type):
return obj
if isinstance(obj, six.binary_type):
return obj.decode("utf-8", errors="ignore")
return six.text_type(obj) | ['def', 'get_unicode_str', '(', 'obj', ')', ':', 'if', 'isinstance', '(', 'obj', ',', 'six', '.', 'text_type', ')', ':', 'return', 'obj', 'if', 'isinstance', '(', 'obj', ',', 'six', '.', 'binary_type', ')', ':', 'return', 'obj', '.', 'decode', '(', '"utf-8"', ',', 'errors', '=', '"ignore"', ')', 'return', 'six', '.', 'text_type', '(', 'obj', ')'] | Makes sure obj is a unicode string. | ['Makes', 'sure', 'obj', 'is', 'a', 'unicode', 'string', '.'] | train | https://github.com/mkoura/dump2polarion/blob/f4bd24e9d5070e282aad15f1e8bb514c0525cd37/dump2polarion/utils.py#L30-L36 |
406 | softlayer/softlayer-python | SoftLayer/managers/user.py | UserManager.get_events | def get_events(self, user_id, start_date=None):
"""Gets the event log for a specific user, default start_date is 30 days ago
:param int id: User id to view
:param string start_date: "%Y-%m-%dT%H:%M:%s.0000-06:00" is the full formatted string.
The Timezone part has to be HH:MM, notice the : there.
:returns: https://softlayer.github.io/reference/datatypes/SoftLayer_Event_Log/
"""
if start_date is None:
date_object = datetime.datetime.today() - datetime.timedelta(days=30)
start_date = date_object.strftime("%Y-%m-%dT00:00:00")
object_filter = {
'userId': {
'operation': user_id
},
'eventCreateDate': {
'operation': 'greaterThanDate',
'options': [{'name': 'date', 'value': [start_date]}]
}
}
events = self.client.call('Event_Log', 'getAllObjects', filter=object_filter)
if events is None:
events = [{'eventName': 'No Events Found'}]
return events | python | def get_events(self, user_id, start_date=None):
"""Gets the event log for a specific user, default start_date is 30 days ago
:param int id: User id to view
:param string start_date: "%Y-%m-%dT%H:%M:%s.0000-06:00" is the full formatted string.
The Timezone part has to be HH:MM, notice the : there.
:returns: https://softlayer.github.io/reference/datatypes/SoftLayer_Event_Log/
"""
if start_date is None:
date_object = datetime.datetime.today() - datetime.timedelta(days=30)
start_date = date_object.strftime("%Y-%m-%dT00:00:00")
object_filter = {
'userId': {
'operation': user_id
},
'eventCreateDate': {
'operation': 'greaterThanDate',
'options': [{'name': 'date', 'value': [start_date]}]
}
}
events = self.client.call('Event_Log', 'getAllObjects', filter=object_filter)
if events is None:
events = [{'eventName': 'No Events Found'}]
return events | ['def', 'get_events', '(', 'self', ',', 'user_id', ',', 'start_date', '=', 'None', ')', ':', 'if', 'start_date', 'is', 'None', ':', 'date_object', '=', 'datetime', '.', 'datetime', '.', 'today', '(', ')', '-', 'datetime', '.', 'timedelta', '(', 'days', '=', '30', ')', 'start_date', '=', 'date_object', '.', 'strftime', '(', '"%Y-%m-%dT00:00:00"', ')', 'object_filter', '=', '{', "'userId'", ':', '{', "'operation'", ':', 'user_id', '}', ',', "'eventCreateDate'", ':', '{', "'operation'", ':', "'greaterThanDate'", ',', "'options'", ':', '[', '{', "'name'", ':', "'date'", ',', "'value'", ':', '[', 'start_date', ']', '}', ']', '}', '}', 'events', '=', 'self', '.', 'client', '.', 'call', '(', "'Event_Log'", ',', "'getAllObjects'", ',', 'filter', '=', 'object_filter', ')', 'if', 'events', 'is', 'None', ':', 'events', '=', '[', '{', "'eventName'", ':', "'No Events Found'", '}', ']', 'return', 'events'] | Gets the event log for a specific user, default start_date is 30 days ago
:param int id: User id to view
:param string start_date: "%Y-%m-%dT%H:%M:%s.0000-06:00" is the full formatted string.
The Timezone part has to be HH:MM, notice the : there.
:returns: https://softlayer.github.io/reference/datatypes/SoftLayer_Event_Log/ | ['Gets', 'the', 'event', 'log', 'for', 'a', 'specific', 'user', 'default', 'start_date', 'is', '30', 'days', 'ago'] | train | https://github.com/softlayer/softlayer-python/blob/9f181be08cc3668353b05a6de0cb324f52cff6fa/SoftLayer/managers/user.py#L171-L197 |
407 | inasafe/inasafe | safe/common/utilities.py | get_utm_zone | def get_utm_zone(longitude):
"""Return utm zone."""
zone = int((math.floor((longitude + 180.0) / 6.0) + 1) % 60)
if zone == 0:
zone = 60
return zone | python | def get_utm_zone(longitude):
"""Return utm zone."""
zone = int((math.floor((longitude + 180.0) / 6.0) + 1) % 60)
if zone == 0:
zone = 60
return zone | ['def', 'get_utm_zone', '(', 'longitude', ')', ':', 'zone', '=', 'int', '(', '(', 'math', '.', 'floor', '(', '(', 'longitude', '+', '180.0', ')', '/', '6.0', ')', '+', '1', ')', '%', '60', ')', 'if', 'zone', '==', '0', ':', 'zone', '=', '60', 'return', 'zone'] | Return utm zone. | ['Return', 'utm', 'zone', '.'] | train | https://github.com/inasafe/inasafe/blob/831d60abba919f6d481dc94a8d988cc205130724/safe/common/utilities.py#L480-L485 |
408 | ponty/PyVirtualDisplay | pyvirtualdisplay/abstractdisplay.py | AbstractDisplay.start | def start(self):
'''
start display
:rtype: self
'''
if self.use_xauth:
self._setup_xauth()
EasyProcess.start(self)
# https://github.com/ponty/PyVirtualDisplay/issues/2
# https://github.com/ponty/PyVirtualDisplay/issues/14
self.old_display_var = os.environ.get('DISPLAY', None)
self.redirect_display(True)
# wait until X server is active
start_time = time.time()
ok = False
d = self.new_display_var
while time.time() - start_time < X_START_TIMEOUT:
try:
exit_code = EasyProcess('xdpyinfo').call().return_code
except EasyProcessError:
log.warn('xdpyinfo was not found, X start can not be checked! Please install xdpyinfo!')
time.sleep(X_START_WAIT) # old method
ok = True
break
if exit_code != 0:
pass
else:
log.info('Successfully started X with display "%s".', d)
ok = True
break
time.sleep(X_START_TIME_STEP)
if not ok:
msg = 'Failed to start X on display "%s" (xdpyinfo check failed).'
raise XStartTimeoutError(msg % d)
return self | python | def start(self):
'''
start display
:rtype: self
'''
if self.use_xauth:
self._setup_xauth()
EasyProcess.start(self)
# https://github.com/ponty/PyVirtualDisplay/issues/2
# https://github.com/ponty/PyVirtualDisplay/issues/14
self.old_display_var = os.environ.get('DISPLAY', None)
self.redirect_display(True)
# wait until X server is active
start_time = time.time()
ok = False
d = self.new_display_var
while time.time() - start_time < X_START_TIMEOUT:
try:
exit_code = EasyProcess('xdpyinfo').call().return_code
except EasyProcessError:
log.warn('xdpyinfo was not found, X start can not be checked! Please install xdpyinfo!')
time.sleep(X_START_WAIT) # old method
ok = True
break
if exit_code != 0:
pass
else:
log.info('Successfully started X with display "%s".', d)
ok = True
break
time.sleep(X_START_TIME_STEP)
if not ok:
msg = 'Failed to start X on display "%s" (xdpyinfo check failed).'
raise XStartTimeoutError(msg % d)
return self | ['def', 'start', '(', 'self', ')', ':', 'if', 'self', '.', 'use_xauth', ':', 'self', '.', '_setup_xauth', '(', ')', 'EasyProcess', '.', 'start', '(', 'self', ')', '# https://github.com/ponty/PyVirtualDisplay/issues/2', '# https://github.com/ponty/PyVirtualDisplay/issues/14', 'self', '.', 'old_display_var', '=', 'os', '.', 'environ', '.', 'get', '(', "'DISPLAY'", ',', 'None', ')', 'self', '.', 'redirect_display', '(', 'True', ')', '# wait until X server is active', 'start_time', '=', 'time', '.', 'time', '(', ')', 'ok', '=', 'False', 'd', '=', 'self', '.', 'new_display_var', 'while', 'time', '.', 'time', '(', ')', '-', 'start_time', '<', 'X_START_TIMEOUT', ':', 'try', ':', 'exit_code', '=', 'EasyProcess', '(', "'xdpyinfo'", ')', '.', 'call', '(', ')', '.', 'return_code', 'except', 'EasyProcessError', ':', 'log', '.', 'warn', '(', "'xdpyinfo was not found, X start can not be checked! Please install xdpyinfo!'", ')', 'time', '.', 'sleep', '(', 'X_START_WAIT', ')', '# old method', 'ok', '=', 'True', 'break', 'if', 'exit_code', '!=', '0', ':', 'pass', 'else', ':', 'log', '.', 'info', '(', '\'Successfully started X with display "%s".\'', ',', 'd', ')', 'ok', '=', 'True', 'break', 'time', '.', 'sleep', '(', 'X_START_TIME_STEP', ')', 'if', 'not', 'ok', ':', 'msg', '=', '\'Failed to start X on display "%s" (xdpyinfo check failed).\'', 'raise', 'XStartTimeoutError', '(', 'msg', '%', 'd', ')', 'return', 'self'] | start display
:rtype: self | ['start', 'display'] | train | https://github.com/ponty/PyVirtualDisplay/blob/903841f5ef13bf162be6fdd22daa5c349af45d67/pyvirtualdisplay/abstractdisplay.py#L100-L140 |
409 | PmagPy/PmagPy | pmagpy/pmag.py | open_file | def open_file(infile, verbose=True):
"""
Open file and return a list of the file's lines.
Try to use utf-8 encoding, and if that fails use Latin-1.
Parameters
----------
infile : str
full path to file
Returns
----------
data: list
all lines in the file
"""
try:
with codecs.open(infile, "r", "utf-8") as f:
lines = list(f.readlines())
# file might not exist
except FileNotFoundError:
if verbose:
print(
'-W- You are trying to open a file: {} that does not exist'.format(infile))
return []
# encoding might be wrong
except UnicodeDecodeError:
try:
with codecs.open(infile, "r", "Latin-1") as f:
print(
'-I- Using less strict decoding for {}, output may have formatting errors'.format(infile))
lines = list(f.readlines())
# if file exists, and encoding is correct, who knows what the problem is
except Exception as ex:
print("-W- ", type(ex), ex)
return []
except Exception as ex:
print("-W- ", type(ex), ex)
return []
# don't leave a blank line at the end
i = 0
while i < 10:
if not len(lines[-1].strip("\n").strip("\t")):
lines = lines[:-1]
i += 1
else:
i = 10
return lines | python | def open_file(infile, verbose=True):
"""
Open file and return a list of the file's lines.
Try to use utf-8 encoding, and if that fails use Latin-1.
Parameters
----------
infile : str
full path to file
Returns
----------
data: list
all lines in the file
"""
try:
with codecs.open(infile, "r", "utf-8") as f:
lines = list(f.readlines())
# file might not exist
except FileNotFoundError:
if verbose:
print(
'-W- You are trying to open a file: {} that does not exist'.format(infile))
return []
# encoding might be wrong
except UnicodeDecodeError:
try:
with codecs.open(infile, "r", "Latin-1") as f:
print(
'-I- Using less strict decoding for {}, output may have formatting errors'.format(infile))
lines = list(f.readlines())
# if file exists, and encoding is correct, who knows what the problem is
except Exception as ex:
print("-W- ", type(ex), ex)
return []
except Exception as ex:
print("-W- ", type(ex), ex)
return []
# don't leave a blank line at the end
i = 0
while i < 10:
if not len(lines[-1].strip("\n").strip("\t")):
lines = lines[:-1]
i += 1
else:
i = 10
return lines | ['def', 'open_file', '(', 'infile', ',', 'verbose', '=', 'True', ')', ':', 'try', ':', 'with', 'codecs', '.', 'open', '(', 'infile', ',', '"r"', ',', '"utf-8"', ')', 'as', 'f', ':', 'lines', '=', 'list', '(', 'f', '.', 'readlines', '(', ')', ')', '# file might not exist', 'except', 'FileNotFoundError', ':', 'if', 'verbose', ':', 'print', '(', "'-W- You are trying to open a file: {} that does not exist'", '.', 'format', '(', 'infile', ')', ')', 'return', '[', ']', '# encoding might be wrong', 'except', 'UnicodeDecodeError', ':', 'try', ':', 'with', 'codecs', '.', 'open', '(', 'infile', ',', '"r"', ',', '"Latin-1"', ')', 'as', 'f', ':', 'print', '(', "'-I- Using less strict decoding for {}, output may have formatting errors'", '.', 'format', '(', 'infile', ')', ')', 'lines', '=', 'list', '(', 'f', '.', 'readlines', '(', ')', ')', '# if file exists, and encoding is correct, who knows what the problem is', 'except', 'Exception', 'as', 'ex', ':', 'print', '(', '"-W- "', ',', 'type', '(', 'ex', ')', ',', 'ex', ')', 'return', '[', ']', 'except', 'Exception', 'as', 'ex', ':', 'print', '(', '"-W- "', ',', 'type', '(', 'ex', ')', ',', 'ex', ')', 'return', '[', ']', "# don't leave a blank line at the end", 'i', '=', '0', 'while', 'i', '<', '10', ':', 'if', 'not', 'len', '(', 'lines', '[', '-', '1', ']', '.', 'strip', '(', '"\\n"', ')', '.', 'strip', '(', '"\\t"', ')', ')', ':', 'lines', '=', 'lines', '[', ':', '-', '1', ']', 'i', '+=', '1', 'else', ':', 'i', '=', '10', 'return', 'lines'] | Open file and return a list of the file's lines.
Try to use utf-8 encoding, and if that fails use Latin-1.
Parameters
----------
infile : str
full path to file
Returns
----------
data: list
all lines in the file | ['Open', 'file', 'and', 'return', 'a', 'list', 'of', 'the', 'file', 's', 'lines', '.', 'Try', 'to', 'use', 'utf', '-', '8', 'encoding', 'and', 'if', 'that', 'fails', 'use', 'Latin', '-', '1', '.'] | train | https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/pmag.py#L1689-L1735 |
410 | tanghaibao/goatools | goatools/anno/init/reader_gaf.py | GafData._wrlog_details_illegal_gaf | def _wrlog_details_illegal_gaf(self, fout_err, err_cnts):
"""Print details regarding illegal GAF lines seen to a log file."""
# fout_err = "{}.log".format(fin_gaf)
gaf_base = os.path.basename(fout_err)
with open(fout_err, 'w') as prt:
prt.write("ILLEGAL GAF ERROR SUMMARY:\n\n")
for err_cnt in err_cnts:
prt.write(err_cnt)
prt.write("\n\nILLEGAL GAF ERROR DETAILS:\n\n")
for lnum, line in self.ignored:
prt.write("**WARNING: GAF LINE IGNORED: {FIN}[{LNUM}]:\n{L}\n".format(
FIN=gaf_base, L=line, LNUM=lnum))
self.prt_line_detail(prt, line)
prt.write("\n\n")
for error, lines in self.illegal_lines.items():
for lnum, line in lines:
prt.write("**WARNING: GAF LINE ILLEGAL({ERR}): {FIN}[{LNUM}]:\n{L}\n".format(
ERR=error, FIN=gaf_base, L=line, LNUM=lnum))
self.prt_line_detail(prt, line)
prt.write("\n\n")
return fout_err | python | def _wrlog_details_illegal_gaf(self, fout_err, err_cnts):
"""Print details regarding illegal GAF lines seen to a log file."""
# fout_err = "{}.log".format(fin_gaf)
gaf_base = os.path.basename(fout_err)
with open(fout_err, 'w') as prt:
prt.write("ILLEGAL GAF ERROR SUMMARY:\n\n")
for err_cnt in err_cnts:
prt.write(err_cnt)
prt.write("\n\nILLEGAL GAF ERROR DETAILS:\n\n")
for lnum, line in self.ignored:
prt.write("**WARNING: GAF LINE IGNORED: {FIN}[{LNUM}]:\n{L}\n".format(
FIN=gaf_base, L=line, LNUM=lnum))
self.prt_line_detail(prt, line)
prt.write("\n\n")
for error, lines in self.illegal_lines.items():
for lnum, line in lines:
prt.write("**WARNING: GAF LINE ILLEGAL({ERR}): {FIN}[{LNUM}]:\n{L}\n".format(
ERR=error, FIN=gaf_base, L=line, LNUM=lnum))
self.prt_line_detail(prt, line)
prt.write("\n\n")
return fout_err | ['def', '_wrlog_details_illegal_gaf', '(', 'self', ',', 'fout_err', ',', 'err_cnts', ')', ':', '# fout_err = "{}.log".format(fin_gaf)', 'gaf_base', '=', 'os', '.', 'path', '.', 'basename', '(', 'fout_err', ')', 'with', 'open', '(', 'fout_err', ',', "'w'", ')', 'as', 'prt', ':', 'prt', '.', 'write', '(', '"ILLEGAL GAF ERROR SUMMARY:\\n\\n"', ')', 'for', 'err_cnt', 'in', 'err_cnts', ':', 'prt', '.', 'write', '(', 'err_cnt', ')', 'prt', '.', 'write', '(', '"\\n\\nILLEGAL GAF ERROR DETAILS:\\n\\n"', ')', 'for', 'lnum', ',', 'line', 'in', 'self', '.', 'ignored', ':', 'prt', '.', 'write', '(', '"**WARNING: GAF LINE IGNORED: {FIN}[{LNUM}]:\\n{L}\\n"', '.', 'format', '(', 'FIN', '=', 'gaf_base', ',', 'L', '=', 'line', ',', 'LNUM', '=', 'lnum', ')', ')', 'self', '.', 'prt_line_detail', '(', 'prt', ',', 'line', ')', 'prt', '.', 'write', '(', '"\\n\\n"', ')', 'for', 'error', ',', 'lines', 'in', 'self', '.', 'illegal_lines', '.', 'items', '(', ')', ':', 'for', 'lnum', ',', 'line', 'in', 'lines', ':', 'prt', '.', 'write', '(', '"**WARNING: GAF LINE ILLEGAL({ERR}): {FIN}[{LNUM}]:\\n{L}\\n"', '.', 'format', '(', 'ERR', '=', 'error', ',', 'FIN', '=', 'gaf_base', ',', 'L', '=', 'line', ',', 'LNUM', '=', 'lnum', ')', ')', 'self', '.', 'prt_line_detail', '(', 'prt', ',', 'line', ')', 'prt', '.', 'write', '(', '"\\n\\n"', ')', 'return', 'fout_err'] | Print details regarding illegal GAF lines seen to a log file. | ['Print', 'details', 'regarding', 'illegal', 'GAF', 'lines', 'seen', 'to', 'a', 'log', 'file', '.'] | train | https://github.com/tanghaibao/goatools/blob/407682e573a108864a79031f8ca19ee3bf377626/goatools/anno/init/reader_gaf.py#L280-L300 |
411 | Sanji-IO/sanji | sanji/router.py | Router.create_route_func | def create_route_func(self, method):
"""
create_route_func
"""
def _route(resource, handler, schema=None):
"""
_route
"""
route = self.routes.get(resource, Route(resource))
route.__getattribute__(method)(handler, schema)
self.routes.update({resource: route})
return self
return _route | python | def create_route_func(self, method):
"""
create_route_func
"""
def _route(resource, handler, schema=None):
"""
_route
"""
route = self.routes.get(resource, Route(resource))
route.__getattribute__(method)(handler, schema)
self.routes.update({resource: route})
return self
return _route | ['def', 'create_route_func', '(', 'self', ',', 'method', ')', ':', 'def', '_route', '(', 'resource', ',', 'handler', ',', 'schema', '=', 'None', ')', ':', '"""\n _route\n """', 'route', '=', 'self', '.', 'routes', '.', 'get', '(', 'resource', ',', 'Route', '(', 'resource', ')', ')', 'route', '.', '__getattribute__', '(', 'method', ')', '(', 'handler', ',', 'schema', ')', 'self', '.', 'routes', '.', 'update', '(', '{', 'resource', ':', 'route', '}', ')', 'return', 'self', 'return', '_route'] | create_route_func | ['create_route_func'] | train | https://github.com/Sanji-IO/sanji/blob/5c54cc2772bdfeae3337f785de1957237b828b34/sanji/router.py#L90-L103 |
412 | lappis-unb/salic-ml | src/salicml/metrics/finance/item_prices.py | aggregated_relevant_items | def aggregated_relevant_items(raw_df):
"""
Aggragation for calculate mean and std.
"""
df = (
raw_df[['idSegmento', 'idPlanilhaItens', 'VlUnitarioAprovado']]
.groupby(by=['idSegmento', 'idPlanilhaItens'])
.agg([np.mean, lambda x: np.std(x, ddof=0)])
)
df.columns = df.columns.droplevel(0)
return (
df
.rename(columns={'<lambda>': 'std'})
) | python | def aggregated_relevant_items(raw_df):
"""
Aggragation for calculate mean and std.
"""
df = (
raw_df[['idSegmento', 'idPlanilhaItens', 'VlUnitarioAprovado']]
.groupby(by=['idSegmento', 'idPlanilhaItens'])
.agg([np.mean, lambda x: np.std(x, ddof=0)])
)
df.columns = df.columns.droplevel(0)
return (
df
.rename(columns={'<lambda>': 'std'})
) | ['def', 'aggregated_relevant_items', '(', 'raw_df', ')', ':', 'df', '=', '(', 'raw_df', '[', '[', "'idSegmento'", ',', "'idPlanilhaItens'", ',', "'VlUnitarioAprovado'", ']', ']', '.', 'groupby', '(', 'by', '=', '[', "'idSegmento'", ',', "'idPlanilhaItens'", ']', ')', '.', 'agg', '(', '[', 'np', '.', 'mean', ',', 'lambda', 'x', ':', 'np', '.', 'std', '(', 'x', ',', 'ddof', '=', '0', ')', ']', ')', ')', 'df', '.', 'columns', '=', 'df', '.', 'columns', '.', 'droplevel', '(', '0', ')', 'return', '(', 'df', '.', 'rename', '(', 'columns', '=', '{', "'<lambda>'", ':', "'std'", '}', ')', ')'] | Aggragation for calculate mean and std. | ['Aggragation', 'for', 'calculate', 'mean', 'and', 'std', '.'] | train | https://github.com/lappis-unb/salic-ml/blob/1b3ebc4f8067740999897ccffd9892dc94482a93/src/salicml/metrics/finance/item_prices.py#L50-L63 |
413 | evhub/coconut | coconut/compiler/compiler.py | Compiler.endline_repl | def endline_repl(self, inputstring, reformatting=False, **kwargs):
"""Add end of line comments."""
out = []
ln = 1 # line number
for line in inputstring.splitlines():
add_one_to_ln = False
try:
if line.endswith(lnwrapper):
line, index = line[:-1].rsplit("#", 1)
new_ln = self.get_ref("ln", index)
if new_ln < ln:
raise CoconutInternalException("line number decreased", (ln, new_ln))
ln = new_ln
line = line.rstrip()
add_one_to_ln = True
if not reformatting or add_one_to_ln: # add_one_to_ln here is a proxy for whether there was a ln comment or not
line += self.comments.get(ln, "")
if not reformatting and line.rstrip() and not line.lstrip().startswith("#"):
line += self.ln_comment(ln)
except CoconutInternalException as err:
complain(err)
out.append(line)
if add_one_to_ln:
ln += 1
return "\n".join(out) | python | def endline_repl(self, inputstring, reformatting=False, **kwargs):
"""Add end of line comments."""
out = []
ln = 1 # line number
for line in inputstring.splitlines():
add_one_to_ln = False
try:
if line.endswith(lnwrapper):
line, index = line[:-1].rsplit("#", 1)
new_ln = self.get_ref("ln", index)
if new_ln < ln:
raise CoconutInternalException("line number decreased", (ln, new_ln))
ln = new_ln
line = line.rstrip()
add_one_to_ln = True
if not reformatting or add_one_to_ln: # add_one_to_ln here is a proxy for whether there was a ln comment or not
line += self.comments.get(ln, "")
if not reformatting and line.rstrip() and not line.lstrip().startswith("#"):
line += self.ln_comment(ln)
except CoconutInternalException as err:
complain(err)
out.append(line)
if add_one_to_ln:
ln += 1
return "\n".join(out) | ['def', 'endline_repl', '(', 'self', ',', 'inputstring', ',', 'reformatting', '=', 'False', ',', '*', '*', 'kwargs', ')', ':', 'out', '=', '[', ']', 'ln', '=', '1', '# line number', 'for', 'line', 'in', 'inputstring', '.', 'splitlines', '(', ')', ':', 'add_one_to_ln', '=', 'False', 'try', ':', 'if', 'line', '.', 'endswith', '(', 'lnwrapper', ')', ':', 'line', ',', 'index', '=', 'line', '[', ':', '-', '1', ']', '.', 'rsplit', '(', '"#"', ',', '1', ')', 'new_ln', '=', 'self', '.', 'get_ref', '(', '"ln"', ',', 'index', ')', 'if', 'new_ln', '<', 'ln', ':', 'raise', 'CoconutInternalException', '(', '"line number decreased"', ',', '(', 'ln', ',', 'new_ln', ')', ')', 'ln', '=', 'new_ln', 'line', '=', 'line', '.', 'rstrip', '(', ')', 'add_one_to_ln', '=', 'True', 'if', 'not', 'reformatting', 'or', 'add_one_to_ln', ':', '# add_one_to_ln here is a proxy for whether there was a ln comment or not', 'line', '+=', 'self', '.', 'comments', '.', 'get', '(', 'ln', ',', '""', ')', 'if', 'not', 'reformatting', 'and', 'line', '.', 'rstrip', '(', ')', 'and', 'not', 'line', '.', 'lstrip', '(', ')', '.', 'startswith', '(', '"#"', ')', ':', 'line', '+=', 'self', '.', 'ln_comment', '(', 'ln', ')', 'except', 'CoconutInternalException', 'as', 'err', ':', 'complain', '(', 'err', ')', 'out', '.', 'append', '(', 'line', ')', 'if', 'add_one_to_ln', ':', 'ln', '+=', '1', 'return', '"\\n"', '.', 'join', '(', 'out', ')'] | Add end of line comments. | ['Add', 'end', 'of', 'line', 'comments', '.'] | train | https://github.com/evhub/coconut/blob/ff97177344e7604e89a0a98a977a87ed2a56fc6d/coconut/compiler/compiler.py#L973-L997 |
414 | wummel/linkchecker | linkcheck/checker/mailtourl.py | getaddresses | def getaddresses (addr):
"""Return list of email addresses from given field value."""
parsed = [mail for name, mail in AddressList(addr).addresslist if mail]
if parsed:
addresses = parsed
elif addr:
# we could not parse any mail addresses, so try with the raw string
addresses = [addr]
else:
addresses = []
return addresses | python | def getaddresses (addr):
"""Return list of email addresses from given field value."""
parsed = [mail for name, mail in AddressList(addr).addresslist if mail]
if parsed:
addresses = parsed
elif addr:
# we could not parse any mail addresses, so try with the raw string
addresses = [addr]
else:
addresses = []
return addresses | ['def', 'getaddresses', '(', 'addr', ')', ':', 'parsed', '=', '[', 'mail', 'for', 'name', ',', 'mail', 'in', 'AddressList', '(', 'addr', ')', '.', 'addresslist', 'if', 'mail', ']', 'if', 'parsed', ':', 'addresses', '=', 'parsed', 'elif', 'addr', ':', '# we could not parse any mail addresses, so try with the raw string', 'addresses', '=', '[', 'addr', ']', 'else', ':', 'addresses', '=', '[', ']', 'return', 'addresses'] | Return list of email addresses from given field value. | ['Return', 'list', 'of', 'email', 'addresses', 'from', 'given', 'field', 'value', '.'] | train | https://github.com/wummel/linkchecker/blob/c2ce810c3fb00b895a841a7be6b2e78c64e7b042/linkcheck/checker/mailtourl.py#L37-L47 |
415 | linkedin/luminol | src/luminol/algorithms/anomaly_detector_algorithms/bitmap_detector.py | BitmapDetector._generate_SAX_single | def _generate_SAX_single(self, sections, value):
"""
Generate SAX representation(Symbolic Aggregate approXimation) for a single data point.
Read more about it here: Assumption-Free Anomaly Detection in Time Series(http://alumni.cs.ucr.edu/~ratana/SSDBM05.pdf).
:param dict sections: value sections.
:param float value: value to be categorized.
:return str: a SAX representation.
"""
sax = 0
for section_number in sections.keys():
section_lower_bound = sections[section_number]
if value >= section_lower_bound:
sax = section_number
else:
break
return str(sax) | python | def _generate_SAX_single(self, sections, value):
"""
Generate SAX representation(Symbolic Aggregate approXimation) for a single data point.
Read more about it here: Assumption-Free Anomaly Detection in Time Series(http://alumni.cs.ucr.edu/~ratana/SSDBM05.pdf).
:param dict sections: value sections.
:param float value: value to be categorized.
:return str: a SAX representation.
"""
sax = 0
for section_number in sections.keys():
section_lower_bound = sections[section_number]
if value >= section_lower_bound:
sax = section_number
else:
break
return str(sax) | ['def', '_generate_SAX_single', '(', 'self', ',', 'sections', ',', 'value', ')', ':', 'sax', '=', '0', 'for', 'section_number', 'in', 'sections', '.', 'keys', '(', ')', ':', 'section_lower_bound', '=', 'sections', '[', 'section_number', ']', 'if', 'value', '>=', 'section_lower_bound', ':', 'sax', '=', 'section_number', 'else', ':', 'break', 'return', 'str', '(', 'sax', ')'] | Generate SAX representation(Symbolic Aggregate approXimation) for a single data point.
Read more about it here: Assumption-Free Anomaly Detection in Time Series(http://alumni.cs.ucr.edu/~ratana/SSDBM05.pdf).
:param dict sections: value sections.
:param float value: value to be categorized.
:return str: a SAX representation. | ['Generate', 'SAX', 'representation', '(', 'Symbolic', 'Aggregate', 'approXimation', ')', 'for', 'a', 'single', 'data', 'point', '.', 'Read', 'more', 'about', 'it', 'here', ':', 'Assumption', '-', 'Free', 'Anomaly', 'Detection', 'in', 'Time', 'Series', '(', 'http', ':', '//', 'alumni', '.', 'cs', '.', 'ucr', '.', 'edu', '/', '~ratana', '/', 'SSDBM05', '.', 'pdf', ')', '.', ':', 'param', 'dict', 'sections', ':', 'value', 'sections', '.', ':', 'param', 'float', 'value', ':', 'value', 'to', 'be', 'categorized', '.', ':', 'return', 'str', ':', 'a', 'SAX', 'representation', '.'] | train | https://github.com/linkedin/luminol/blob/42e4ab969b774ff98f902d064cb041556017f635/src/luminol/algorithms/anomaly_detector_algorithms/bitmap_detector.py#L75-L90 |
416 | ARMmbed/mbed-cloud-sdk-python | src/mbed_cloud/connect/connect.py | ConnectAPI.get_resource_value | def get_resource_value(self, device_id, resource_path, fix_path=True, timeout=None):
"""Get a resource value for a given device and resource path by blocking thread.
Example usage:
.. code-block:: python
try:
v = api.get_resource_value(device_id, path)
print("Current value", v)
except CloudAsyncError, e:
print("Error", e)
:param str device_id: The name/id of the device (Required)
:param str resource_path: The resource path to get (Required)
:param fix_path: if True then the leading /, if found, will be stripped before
doing request to backend. This is a requirement for the API to work properly
:param timeout: Seconds to request value for before timeout. If not provided, the
program might hang indefinitely.
:raises: CloudAsyncError, CloudTimeoutError
:returns: The resource value for the requested resource path
:rtype: str
"""
return self.get_resource_value_async(device_id, resource_path, fix_path).wait(timeout) | python | def get_resource_value(self, device_id, resource_path, fix_path=True, timeout=None):
"""Get a resource value for a given device and resource path by blocking thread.
Example usage:
.. code-block:: python
try:
v = api.get_resource_value(device_id, path)
print("Current value", v)
except CloudAsyncError, e:
print("Error", e)
:param str device_id: The name/id of the device (Required)
:param str resource_path: The resource path to get (Required)
:param fix_path: if True then the leading /, if found, will be stripped before
doing request to backend. This is a requirement for the API to work properly
:param timeout: Seconds to request value for before timeout. If not provided, the
program might hang indefinitely.
:raises: CloudAsyncError, CloudTimeoutError
:returns: The resource value for the requested resource path
:rtype: str
"""
return self.get_resource_value_async(device_id, resource_path, fix_path).wait(timeout) | ['def', 'get_resource_value', '(', 'self', ',', 'device_id', ',', 'resource_path', ',', 'fix_path', '=', 'True', ',', 'timeout', '=', 'None', ')', ':', 'return', 'self', '.', 'get_resource_value_async', '(', 'device_id', ',', 'resource_path', ',', 'fix_path', ')', '.', 'wait', '(', 'timeout', ')'] | Get a resource value for a given device and resource path by blocking thread.
Example usage:
.. code-block:: python
try:
v = api.get_resource_value(device_id, path)
print("Current value", v)
except CloudAsyncError, e:
print("Error", e)
:param str device_id: The name/id of the device (Required)
:param str resource_path: The resource path to get (Required)
:param fix_path: if True then the leading /, if found, will be stripped before
doing request to backend. This is a requirement for the API to work properly
:param timeout: Seconds to request value for before timeout. If not provided, the
program might hang indefinitely.
:raises: CloudAsyncError, CloudTimeoutError
:returns: The resource value for the requested resource path
:rtype: str | ['Get', 'a', 'resource', 'value', 'for', 'a', 'given', 'device', 'and', 'resource', 'path', 'by', 'blocking', 'thread', '.'] | train | https://github.com/ARMmbed/mbed-cloud-sdk-python/blob/c0af86fb2cdd4dc7ed26f236139241067d293509/src/mbed_cloud/connect/connect.py#L279-L302 |
417 | mmoussallam/bird | bird/_bird.py | _bird_core | def _bird_core(X, scales, n_runs, Lambda_W, max_iter=100,
stop_crit=np.mean,
selection_rule=np.sum,
n_jobs=1, indep=True,
random_state=None, memory=Memory(None), verbose=False):
"""Automatically detect when noise zone has been reached and stop
MP at this point
Parameters
----------
X : array, shape (n_channels, n_times)
The numpy n_channels-vy-N array to be denoised where n_channels is
number of sensors and N the dimension
scales : list
The list of MDCT scales that will be used to built the
dictionary Phi
n_runs : int
the number of runs (n_runs in the paper)
Lambda_W : float
bound for lambda under which a run will be stopped
max_iter : int
Maximum number of iterations (serves as alternate stopping criterion)
stop_crit : function
controls the calculation of Lambda
selection_rule : callable
controls the way multiple channel projections are combined for atom
selection only used if indep=False
n_jobs : int
number of jobs to run in parallel
indep : bool
True for BIRD (independent processing of each channel,
False for S-BIRD (structured sparsity seeked)
random_state : None | int | np.random.RandomState
To specify the random generator state (seed).
memory : instance of Memory
The object to use to cache some computations. If cachedir is None, no
caching is performed.
verbose : bool
verbose mode
Returns
-------
X_denoise : array, shape (n_channels, n_times)
denoised array of same shape as X
"""
Phi = MDCT(scales)
pad = int(1.5 * max(scales))
X_denoise = np.zeros_like(X)
approx = []
rng = check_random_state(random_state)
seeds = rng.randint(4294967295, size=n_runs) # < max seed value
if n_jobs <= 0:
n_cores = multiprocessing.cpu_count()
n_jobs = min(n_cores + n_jobs + 1, n_cores)
if indep:
# Independent treat of each channel (plain BIRD)
for r, x in zip(X_denoise, X):
this_approx = Parallel(n_jobs=n_jobs)(
delayed(_denoise)(this_seeds, x, Phi, Lambda_W,
max_iter, pad=pad, verbose=verbose,
indep=True, memory=memory)
for this_seeds in
np.array_split(seeds, n_jobs))
this_approx = sum(this_approx[1:], this_approx[0])
r[:] = sum([a[pad:-pad] for a in this_approx])
approx.append(this_approx)
else:
# data need to be processed jointly
this_approx = Parallel(n_jobs=n_jobs)(
delayed(_denoise)(this_seeds, X, Phi, Lambda_W,
max_iter, pad=pad, verbose=verbose,
selection_rule=selection_rule,
indep=False, memory=memory,
stop_crit=stop_crit)
for this_seeds in
np.array_split(seeds, n_jobs))
# reconstruction by averaging
for jidx in range(len(this_approx)):
for ridx in range(len(this_approx[jidx])):
X_denoise += this_approx[jidx][ridx]
X_denoise /= float(n_runs)
return X_denoise | python | def _bird_core(X, scales, n_runs, Lambda_W, max_iter=100,
stop_crit=np.mean,
selection_rule=np.sum,
n_jobs=1, indep=True,
random_state=None, memory=Memory(None), verbose=False):
"""Automatically detect when noise zone has been reached and stop
MP at this point
Parameters
----------
X : array, shape (n_channels, n_times)
The numpy n_channels-vy-N array to be denoised where n_channels is
number of sensors and N the dimension
scales : list
The list of MDCT scales that will be used to built the
dictionary Phi
n_runs : int
the number of runs (n_runs in the paper)
Lambda_W : float
bound for lambda under which a run will be stopped
max_iter : int
Maximum number of iterations (serves as alternate stopping criterion)
stop_crit : function
controls the calculation of Lambda
selection_rule : callable
controls the way multiple channel projections are combined for atom
selection only used if indep=False
n_jobs : int
number of jobs to run in parallel
indep : bool
True for BIRD (independent processing of each channel,
False for S-BIRD (structured sparsity seeked)
random_state : None | int | np.random.RandomState
To specify the random generator state (seed).
memory : instance of Memory
The object to use to cache some computations. If cachedir is None, no
caching is performed.
verbose : bool
verbose mode
Returns
-------
X_denoise : array, shape (n_channels, n_times)
denoised array of same shape as X
"""
Phi = MDCT(scales)
pad = int(1.5 * max(scales))
X_denoise = np.zeros_like(X)
approx = []
rng = check_random_state(random_state)
seeds = rng.randint(4294967295, size=n_runs) # < max seed value
if n_jobs <= 0:
n_cores = multiprocessing.cpu_count()
n_jobs = min(n_cores + n_jobs + 1, n_cores)
if indep:
# Independent treat of each channel (plain BIRD)
for r, x in zip(X_denoise, X):
this_approx = Parallel(n_jobs=n_jobs)(
delayed(_denoise)(this_seeds, x, Phi, Lambda_W,
max_iter, pad=pad, verbose=verbose,
indep=True, memory=memory)
for this_seeds in
np.array_split(seeds, n_jobs))
this_approx = sum(this_approx[1:], this_approx[0])
r[:] = sum([a[pad:-pad] for a in this_approx])
approx.append(this_approx)
else:
# data need to be processed jointly
this_approx = Parallel(n_jobs=n_jobs)(
delayed(_denoise)(this_seeds, X, Phi, Lambda_W,
max_iter, pad=pad, verbose=verbose,
selection_rule=selection_rule,
indep=False, memory=memory,
stop_crit=stop_crit)
for this_seeds in
np.array_split(seeds, n_jobs))
# reconstruction by averaging
for jidx in range(len(this_approx)):
for ridx in range(len(this_approx[jidx])):
X_denoise += this_approx[jidx][ridx]
X_denoise /= float(n_runs)
return X_denoise | ['def', '_bird_core', '(', 'X', ',', 'scales', ',', 'n_runs', ',', 'Lambda_W', ',', 'max_iter', '=', '100', ',', 'stop_crit', '=', 'np', '.', 'mean', ',', 'selection_rule', '=', 'np', '.', 'sum', ',', 'n_jobs', '=', '1', ',', 'indep', '=', 'True', ',', 'random_state', '=', 'None', ',', 'memory', '=', 'Memory', '(', 'None', ')', ',', 'verbose', '=', 'False', ')', ':', 'Phi', '=', 'MDCT', '(', 'scales', ')', 'pad', '=', 'int', '(', '1.5', '*', 'max', '(', 'scales', ')', ')', 'X_denoise', '=', 'np', '.', 'zeros_like', '(', 'X', ')', 'approx', '=', '[', ']', 'rng', '=', 'check_random_state', '(', 'random_state', ')', 'seeds', '=', 'rng', '.', 'randint', '(', '4294967295', ',', 'size', '=', 'n_runs', ')', '# < max seed value', 'if', 'n_jobs', '<=', '0', ':', 'n_cores', '=', 'multiprocessing', '.', 'cpu_count', '(', ')', 'n_jobs', '=', 'min', '(', 'n_cores', '+', 'n_jobs', '+', '1', ',', 'n_cores', ')', 'if', 'indep', ':', '# Independent treat of each channel (plain BIRD)', 'for', 'r', ',', 'x', 'in', 'zip', '(', 'X_denoise', ',', 'X', ')', ':', 'this_approx', '=', 'Parallel', '(', 'n_jobs', '=', 'n_jobs', ')', '(', 'delayed', '(', '_denoise', ')', '(', 'this_seeds', ',', 'x', ',', 'Phi', ',', 'Lambda_W', ',', 'max_iter', ',', 'pad', '=', 'pad', ',', 'verbose', '=', 'verbose', ',', 'indep', '=', 'True', ',', 'memory', '=', 'memory', ')', 'for', 'this_seeds', 'in', 'np', '.', 'array_split', '(', 'seeds', ',', 'n_jobs', ')', ')', 'this_approx', '=', 'sum', '(', 'this_approx', '[', '1', ':', ']', ',', 'this_approx', '[', '0', ']', ')', 'r', '[', ':', ']', '=', 'sum', '(', '[', 'a', '[', 'pad', ':', '-', 'pad', ']', 'for', 'a', 'in', 'this_approx', ']', ')', 'approx', '.', 'append', '(', 'this_approx', ')', 'else', ':', '# data need to be processed jointly', 'this_approx', '=', 'Parallel', '(', 'n_jobs', '=', 'n_jobs', ')', '(', 'delayed', '(', '_denoise', ')', '(', 'this_seeds', ',', 'X', ',', 'Phi', ',', 'Lambda_W', ',', 'max_iter', ',', 'pad', '=', 'pad', ',', 'verbose', '=', 'verbose', ',', 'selection_rule', '=', 'selection_rule', ',', 'indep', '=', 'False', ',', 'memory', '=', 'memory', ',', 'stop_crit', '=', 'stop_crit', ')', 'for', 'this_seeds', 'in', 'np', '.', 'array_split', '(', 'seeds', ',', 'n_jobs', ')', ')', '# reconstruction by averaging', 'for', 'jidx', 'in', 'range', '(', 'len', '(', 'this_approx', ')', ')', ':', 'for', 'ridx', 'in', 'range', '(', 'len', '(', 'this_approx', '[', 'jidx', ']', ')', ')', ':', 'X_denoise', '+=', 'this_approx', '[', 'jidx', ']', '[', 'ridx', ']', 'X_denoise', '/=', 'float', '(', 'n_runs', ')', 'return', 'X_denoise'] | Automatically detect when noise zone has been reached and stop
MP at this point
Parameters
----------
X : array, shape (n_channels, n_times)
The numpy n_channels-vy-N array to be denoised where n_channels is
number of sensors and N the dimension
scales : list
The list of MDCT scales that will be used to built the
dictionary Phi
n_runs : int
the number of runs (n_runs in the paper)
Lambda_W : float
bound for lambda under which a run will be stopped
max_iter : int
Maximum number of iterations (serves as alternate stopping criterion)
stop_crit : function
controls the calculation of Lambda
selection_rule : callable
controls the way multiple channel projections are combined for atom
selection only used if indep=False
n_jobs : int
number of jobs to run in parallel
indep : bool
True for BIRD (independent processing of each channel,
False for S-BIRD (structured sparsity seeked)
random_state : None | int | np.random.RandomState
To specify the random generator state (seed).
memory : instance of Memory
The object to use to cache some computations. If cachedir is None, no
caching is performed.
verbose : bool
verbose mode
Returns
-------
X_denoise : array, shape (n_channels, n_times)
denoised array of same shape as X | ['Automatically', 'detect', 'when', 'noise', 'zone', 'has', 'been', 'reached', 'and', 'stop', 'MP', 'at', 'this', 'point'] | train | https://github.com/mmoussallam/bird/blob/1c726e6569db4f3b00804ab7ac063acaa3965987/bird/_bird.py#L235-L320 |
418 | pneff/wsgiservice | wsgiservice/status.py | raise_412 | def raise_412(instance, msg=None):
"""Abort the current request with a 412 (Precondition Failed) response
code. If the message is given it's output as an error message in the
response body (correctly converted to the requested MIME type).
:param instance: Resource instance (used to access the response)
:type instance: :class:`webob.resource.Resource`
:raises: :class:`webob.exceptions.ResponseException` of status 412
"""
instance.response.status = 412
if msg:
instance.response.body_raw = {'error': msg}
raise ResponseException(instance.response) | python | def raise_412(instance, msg=None):
"""Abort the current request with a 412 (Precondition Failed) response
code. If the message is given it's output as an error message in the
response body (correctly converted to the requested MIME type).
:param instance: Resource instance (used to access the response)
:type instance: :class:`webob.resource.Resource`
:raises: :class:`webob.exceptions.ResponseException` of status 412
"""
instance.response.status = 412
if msg:
instance.response.body_raw = {'error': msg}
raise ResponseException(instance.response) | ['def', 'raise_412', '(', 'instance', ',', 'msg', '=', 'None', ')', ':', 'instance', '.', 'response', '.', 'status', '=', '412', 'if', 'msg', ':', 'instance', '.', 'response', '.', 'body_raw', '=', '{', "'error'", ':', 'msg', '}', 'raise', 'ResponseException', '(', 'instance', '.', 'response', ')'] | Abort the current request with a 412 (Precondition Failed) response
code. If the message is given it's output as an error message in the
response body (correctly converted to the requested MIME type).
:param instance: Resource instance (used to access the response)
:type instance: :class:`webob.resource.Resource`
:raises: :class:`webob.exceptions.ResponseException` of status 412 | ['Abort', 'the', 'current', 'request', 'with', 'a', '412', '(', 'Precondition', 'Failed', ')', 'response', 'code', '.', 'If', 'the', 'message', 'is', 'given', 'it', 's', 'output', 'as', 'an', 'error', 'message', 'in', 'the', 'response', 'body', '(', 'correctly', 'converted', 'to', 'the', 'requested', 'MIME', 'type', ')', '.'] | train | https://github.com/pneff/wsgiservice/blob/03c064ac2e8c53a1aac9c7b99970f23cf79e20f4/wsgiservice/status.py#L296-L308 |
419 | kubernetes-client/python | kubernetes/client/apis/core_v1_api.py | CoreV1Api.list_component_status | def list_component_status(self, **kwargs):
"""
list objects of kind ComponentStatus
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_component_status(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str pretty: If 'true', then the output is pretty printed.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1ComponentStatusList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.list_component_status_with_http_info(**kwargs)
else:
(data) = self.list_component_status_with_http_info(**kwargs)
return data | python | def list_component_status(self, **kwargs):
"""
list objects of kind ComponentStatus
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_component_status(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str pretty: If 'true', then the output is pretty printed.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1ComponentStatusList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.list_component_status_with_http_info(**kwargs)
else:
(data) = self.list_component_status_with_http_info(**kwargs)
return data | ['def', 'list_component_status', '(', 'self', ',', '*', '*', 'kwargs', ')', ':', 'kwargs', '[', "'_return_http_data_only'", ']', '=', 'True', 'if', 'kwargs', '.', 'get', '(', "'async_req'", ')', ':', 'return', 'self', '.', 'list_component_status_with_http_info', '(', '*', '*', 'kwargs', ')', 'else', ':', '(', 'data', ')', '=', 'self', '.', 'list_component_status_with_http_info', '(', '*', '*', 'kwargs', ')', 'return', 'data'] | list objects of kind ComponentStatus
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_component_status(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str pretty: If 'true', then the output is pretty printed.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1ComponentStatusList
If the method is called asynchronously,
returns the request thread. | ['list', 'objects', 'of', 'kind', 'ComponentStatus', 'This', 'method', 'makes', 'a', 'synchronous', 'HTTP', 'request', 'by', 'default', '.', 'To', 'make', 'an', 'asynchronous', 'HTTP', 'request', 'please', 'pass', 'async_req', '=', 'True', '>>>', 'thread', '=', 'api', '.', 'list_component_status', '(', 'async_req', '=', 'True', ')', '>>>', 'result', '=', 'thread', '.', 'get', '()'] | train | https://github.com/kubernetes-client/python/blob/5e512ff564c244c50cab780d821542ed56aa965a/kubernetes/client/apis/core_v1_api.py#L10980-L11006 |
420 | dslackw/slpkg | slpkg/utils.py | Utils.check_downloaded | def check_downloaded(self, path, maybe_downloaded):
"""Check if files downloaded and return downloaded
packages
"""
downloaded = []
for pkg in maybe_downloaded:
if os.path.isfile(path + pkg):
downloaded.append(pkg)
return downloaded | python | def check_downloaded(self, path, maybe_downloaded):
"""Check if files downloaded and return downloaded
packages
"""
downloaded = []
for pkg in maybe_downloaded:
if os.path.isfile(path + pkg):
downloaded.append(pkg)
return downloaded | ['def', 'check_downloaded', '(', 'self', ',', 'path', ',', 'maybe_downloaded', ')', ':', 'downloaded', '=', '[', ']', 'for', 'pkg', 'in', 'maybe_downloaded', ':', 'if', 'os', '.', 'path', '.', 'isfile', '(', 'path', '+', 'pkg', ')', ':', 'downloaded', '.', 'append', '(', 'pkg', ')', 'return', 'downloaded'] | Check if files downloaded and return downloaded
packages | ['Check', 'if', 'files', 'downloaded', 'and', 'return', 'downloaded', 'packages'] | train | https://github.com/dslackw/slpkg/blob/dd2e08a80e944d337d157b992167ba631a4343de/slpkg/utils.py#L76-L84 |
421 | watson-developer-cloud/python-sdk | ibm_watson/text_to_speech_v1.py | Pronunciation._to_dict | def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'pronunciation') and self.pronunciation is not None:
_dict['pronunciation'] = self.pronunciation
return _dict | python | def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'pronunciation') and self.pronunciation is not None:
_dict['pronunciation'] = self.pronunciation
return _dict | ['def', '_to_dict', '(', 'self', ')', ':', '_dict', '=', '{', '}', 'if', 'hasattr', '(', 'self', ',', "'pronunciation'", ')', 'and', 'self', '.', 'pronunciation', 'is', 'not', 'None', ':', '_dict', '[', "'pronunciation'", ']', '=', 'self', '.', 'pronunciation', 'return', '_dict'] | Return a json dictionary representing this model. | ['Return', 'a', 'json', 'dictionary', 'representing', 'this', 'model', '.'] | train | https://github.com/watson-developer-cloud/python-sdk/blob/4c2c9df4466fcde88975da9ecd834e6ba95eb353/ibm_watson/text_to_speech_v1.py#L932-L937 |
422 | odlgroup/odl | odl/contrib/datasets/ct/fips.py | lotus_root_geometry | def lotus_root_geometry():
"""Tomographic geometry for the lotus root dataset.
Notes
-----
See the article `Tomographic X-ray data of a lotus root filled with
attenuating objects`_ for further information.
See Also
--------
lotus_root_geometry
References
----------
.. _Tomographic X-ray data of a lotus root filled with attenuating objects:
https://arxiv.org/abs/1609.07299
"""
# To get the same rotation as in the reference article
a_offset = np.pi / 2
apart = uniform_partition(a_offset,
a_offset + 2 * np.pi * 366. / 360.,
366)
# TODO: Find exact value, determined experimentally
d_offset = 0.35
dpart = uniform_partition(d_offset - 60, d_offset + 60, 2240)
geometry = FanBeamGeometry(apart, dpart,
src_radius=540, det_radius=90)
return geometry | python | def lotus_root_geometry():
"""Tomographic geometry for the lotus root dataset.
Notes
-----
See the article `Tomographic X-ray data of a lotus root filled with
attenuating objects`_ for further information.
See Also
--------
lotus_root_geometry
References
----------
.. _Tomographic X-ray data of a lotus root filled with attenuating objects:
https://arxiv.org/abs/1609.07299
"""
# To get the same rotation as in the reference article
a_offset = np.pi / 2
apart = uniform_partition(a_offset,
a_offset + 2 * np.pi * 366. / 360.,
366)
# TODO: Find exact value, determined experimentally
d_offset = 0.35
dpart = uniform_partition(d_offset - 60, d_offset + 60, 2240)
geometry = FanBeamGeometry(apart, dpart,
src_radius=540, det_radius=90)
return geometry | ['def', 'lotus_root_geometry', '(', ')', ':', '# To get the same rotation as in the reference article', 'a_offset', '=', 'np', '.', 'pi', '/', '2', 'apart', '=', 'uniform_partition', '(', 'a_offset', ',', 'a_offset', '+', '2', '*', 'np', '.', 'pi', '*', '366.', '/', '360.', ',', '366', ')', '# TODO: Find exact value, determined experimentally', 'd_offset', '=', '0.35', 'dpart', '=', 'uniform_partition', '(', 'd_offset', '-', '60', ',', 'd_offset', '+', '60', ',', '2240', ')', 'geometry', '=', 'FanBeamGeometry', '(', 'apart', ',', 'dpart', ',', 'src_radius', '=', '540', ',', 'det_radius', '=', '90', ')', 'return', 'geometry'] | Tomographic geometry for the lotus root dataset.
Notes
-----
See the article `Tomographic X-ray data of a lotus root filled with
attenuating objects`_ for further information.
See Also
--------
lotus_root_geometry
References
----------
.. _Tomographic X-ray data of a lotus root filled with attenuating objects:
https://arxiv.org/abs/1609.07299 | ['Tomographic', 'geometry', 'for', 'the', 'lotus', 'root', 'dataset', '.'] | train | https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/contrib/datasets/ct/fips.py#L118-L148 |
423 | limodou/uliweb | uliweb/utils/common.py | import_mod_attr | def import_mod_attr(path):
"""
Import string format module, e.g. 'uliweb.orm' or an object
return module object and object
"""
import inspect
if isinstance(path, (str, unicode)):
v = path.split(':')
if len(v) == 1:
module, func = path.rsplit('.', 1)
else:
module, func = v
mod = __import__(module, fromlist=['*'])
f = mod
for x in func.split('.'):
try:
f = getattr(f, x)
except:
raise AttributeError("Get %s attribute according %s error" % (x, path))
else:
f = path
mod = inspect.getmodule(path)
return mod, f | python | def import_mod_attr(path):
"""
Import string format module, e.g. 'uliweb.orm' or an object
return module object and object
"""
import inspect
if isinstance(path, (str, unicode)):
v = path.split(':')
if len(v) == 1:
module, func = path.rsplit('.', 1)
else:
module, func = v
mod = __import__(module, fromlist=['*'])
f = mod
for x in func.split('.'):
try:
f = getattr(f, x)
except:
raise AttributeError("Get %s attribute according %s error" % (x, path))
else:
f = path
mod = inspect.getmodule(path)
return mod, f | ['def', 'import_mod_attr', '(', 'path', ')', ':', 'import', 'inspect', 'if', 'isinstance', '(', 'path', ',', '(', 'str', ',', 'unicode', ')', ')', ':', 'v', '=', 'path', '.', 'split', '(', "':'", ')', 'if', 'len', '(', 'v', ')', '==', '1', ':', 'module', ',', 'func', '=', 'path', '.', 'rsplit', '(', "'.'", ',', '1', ')', 'else', ':', 'module', ',', 'func', '=', 'v', 'mod', '=', '__import__', '(', 'module', ',', 'fromlist', '=', '[', "'*'", ']', ')', 'f', '=', 'mod', 'for', 'x', 'in', 'func', '.', 'split', '(', "'.'", ')', ':', 'try', ':', 'f', '=', 'getattr', '(', 'f', ',', 'x', ')', 'except', ':', 'raise', 'AttributeError', '(', '"Get %s attribute according %s error"', '%', '(', 'x', ',', 'path', ')', ')', 'else', ':', 'f', '=', 'path', 'mod', '=', 'inspect', '.', 'getmodule', '(', 'path', ')', 'return', 'mod', ',', 'f'] | Import string format module, e.g. 'uliweb.orm' or an object
return module object and object | ['Import', 'string', 'format', 'module', 'e', '.', 'g', '.', 'uliweb', '.', 'orm', 'or', 'an', 'object', 'return', 'module', 'object', 'and', 'object'] | train | https://github.com/limodou/uliweb/blob/34472f25e4bc0b954a35346672f94e84ef18b076/uliweb/utils/common.py#L22-L45 |
424 | mardix/Mocha | mocha/contrib/auth/__init__.py | UserModel.add_federation | def add_federation(self, provider, federated_id):
"""
Add federated login to the current user
:param provider:
:param federated_id:
:return:
"""
models.AuthUserFederation.new(user=self,
provider=provider,
federated_id=federated_id) | python | def add_federation(self, provider, federated_id):
"""
Add federated login to the current user
:param provider:
:param federated_id:
:return:
"""
models.AuthUserFederation.new(user=self,
provider=provider,
federated_id=federated_id) | ['def', 'add_federation', '(', 'self', ',', 'provider', ',', 'federated_id', ')', ':', 'models', '.', 'AuthUserFederation', '.', 'new', '(', 'user', '=', 'self', ',', 'provider', '=', 'provider', ',', 'federated_id', '=', 'federated_id', ')'] | Add federated login to the current user
:param provider:
:param federated_id:
:return: | ['Add', 'federated', 'login', 'to', 'the', 'current', 'user', ':', 'param', 'provider', ':', ':', 'param', 'federated_id', ':', ':', 'return', ':'] | train | https://github.com/mardix/Mocha/blob/bce481cb31a0972061dd99bc548701411dcb9de3/mocha/contrib/auth/__init__.py#L636-L645 |
425 | inspirehep/refextract | refextract/references/tag.py | get_replacement_types | def get_replacement_types(titles, reportnumbers, publishers):
"""Given the indices of the titles and reportnumbers that have been
recognised within a reference line, create a dictionary keyed by
the replacement position in the line, where the value for each
key is a string describing the type of item replaced at that
position in the line.
The description strings are:
'title' - indicating that the replacement is a
periodical title
'reportnumber' - indicating that the replacement is a
preprint report number.
@param titles: (list) of locations in the string at which
periodical titles were found.
@param reportnumbers: (list) of locations in the string at which
reportnumbers were found.
@return: (dictionary) of replacement types at various locations
within the string.
"""
rep_types = {}
for item_idx in titles:
rep_types[item_idx] = "journal"
for item_idx in reportnumbers:
rep_types[item_idx] = "reportnumber"
for item_idx in publishers:
rep_types[item_idx] = "publisher"
return rep_types | python | def get_replacement_types(titles, reportnumbers, publishers):
"""Given the indices of the titles and reportnumbers that have been
recognised within a reference line, create a dictionary keyed by
the replacement position in the line, where the value for each
key is a string describing the type of item replaced at that
position in the line.
The description strings are:
'title' - indicating that the replacement is a
periodical title
'reportnumber' - indicating that the replacement is a
preprint report number.
@param titles: (list) of locations in the string at which
periodical titles were found.
@param reportnumbers: (list) of locations in the string at which
reportnumbers were found.
@return: (dictionary) of replacement types at various locations
within the string.
"""
rep_types = {}
for item_idx in titles:
rep_types[item_idx] = "journal"
for item_idx in reportnumbers:
rep_types[item_idx] = "reportnumber"
for item_idx in publishers:
rep_types[item_idx] = "publisher"
return rep_types | ['def', 'get_replacement_types', '(', 'titles', ',', 'reportnumbers', ',', 'publishers', ')', ':', 'rep_types', '=', '{', '}', 'for', 'item_idx', 'in', 'titles', ':', 'rep_types', '[', 'item_idx', ']', '=', '"journal"', 'for', 'item_idx', 'in', 'reportnumbers', ':', 'rep_types', '[', 'item_idx', ']', '=', '"reportnumber"', 'for', 'item_idx', 'in', 'publishers', ':', 'rep_types', '[', 'item_idx', ']', '=', '"publisher"', 'return', 'rep_types'] | Given the indices of the titles and reportnumbers that have been
recognised within a reference line, create a dictionary keyed by
the replacement position in the line, where the value for each
key is a string describing the type of item replaced at that
position in the line.
The description strings are:
'title' - indicating that the replacement is a
periodical title
'reportnumber' - indicating that the replacement is a
preprint report number.
@param titles: (list) of locations in the string at which
periodical titles were found.
@param reportnumbers: (list) of locations in the string at which
reportnumbers were found.
@return: (dictionary) of replacement types at various locations
within the string. | ['Given', 'the', 'indices', 'of', 'the', 'titles', 'and', 'reportnumbers', 'that', 'have', 'been', 'recognised', 'within', 'a', 'reference', 'line', 'create', 'a', 'dictionary', 'keyed', 'by', 'the', 'replacement', 'position', 'in', 'the', 'line', 'where', 'the', 'value', 'for', 'each', 'key', 'is', 'a', 'string', 'describing', 'the', 'type', 'of', 'item', 'replaced', 'at', 'that', 'position', 'in', 'the', 'line', '.', 'The', 'description', 'strings', 'are', ':', 'title', '-', 'indicating', 'that', 'the', 'replacement', 'is', 'a', 'periodical', 'title', 'reportnumber', '-', 'indicating', 'that', 'the', 'replacement', 'is', 'a', 'preprint', 'report', 'number', '.'] | train | https://github.com/inspirehep/refextract/blob/d70e3787be3c495a3a07d1517b53f81d51c788c7/refextract/references/tag.py#L736-L761 |
426 | bitesofcode/projexui | projexui/widgets/xorbquerywidget/xorbqueryplugin.py | XOrbQueryPlugin.operators | def operators(self, ignore=0):
"""
Returns a list of operators for this plugin.
:return <str>
"""
return [k for k, v in self._operatorMap.items() if not v.flags & ignore] | python | def operators(self, ignore=0):
"""
Returns a list of operators for this plugin.
:return <str>
"""
return [k for k, v in self._operatorMap.items() if not v.flags & ignore] | ['def', 'operators', '(', 'self', ',', 'ignore', '=', '0', ')', ':', 'return', '[', 'k', 'for', 'k', ',', 'v', 'in', 'self', '.', '_operatorMap', '.', 'items', '(', ')', 'if', 'not', 'v', '.', 'flags', '&', 'ignore', ']'] | Returns a list of operators for this plugin.
:return <str> | ['Returns', 'a', 'list', 'of', 'operators', 'for', 'this', 'plugin', '.', ':', 'return', '<str', '>'] | train | https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xorbquerywidget/xorbqueryplugin.py#L90-L96 |
427 | opendatateam/udata | udata/core/badges/models.py | BadgeMixin.add_badge | def add_badge(self, kind):
'''Perform an atomic prepend for a new badge'''
badge = self.get_badge(kind)
if badge:
return badge
if kind not in getattr(self, '__badges__', {}):
msg = 'Unknown badge type for {model}: {kind}'
raise db.ValidationError(msg.format(model=self.__class__.__name__,
kind=kind))
badge = Badge(kind=kind)
if current_user.is_authenticated:
badge.created_by = current_user.id
self.update(__raw__={
'$push': {
'badges': {
'$each': [badge.to_mongo()],
'$position': 0
}
}
})
self.reload()
post_save.send(self.__class__, document=self)
on_badge_added.send(self, kind=kind)
return self.get_badge(kind) | python | def add_badge(self, kind):
'''Perform an atomic prepend for a new badge'''
badge = self.get_badge(kind)
if badge:
return badge
if kind not in getattr(self, '__badges__', {}):
msg = 'Unknown badge type for {model}: {kind}'
raise db.ValidationError(msg.format(model=self.__class__.__name__,
kind=kind))
badge = Badge(kind=kind)
if current_user.is_authenticated:
badge.created_by = current_user.id
self.update(__raw__={
'$push': {
'badges': {
'$each': [badge.to_mongo()],
'$position': 0
}
}
})
self.reload()
post_save.send(self.__class__, document=self)
on_badge_added.send(self, kind=kind)
return self.get_badge(kind) | ['def', 'add_badge', '(', 'self', ',', 'kind', ')', ':', 'badge', '=', 'self', '.', 'get_badge', '(', 'kind', ')', 'if', 'badge', ':', 'return', 'badge', 'if', 'kind', 'not', 'in', 'getattr', '(', 'self', ',', "'__badges__'", ',', '{', '}', ')', ':', 'msg', '=', "'Unknown badge type for {model}: {kind}'", 'raise', 'db', '.', 'ValidationError', '(', 'msg', '.', 'format', '(', 'model', '=', 'self', '.', '__class__', '.', '__name__', ',', 'kind', '=', 'kind', ')', ')', 'badge', '=', 'Badge', '(', 'kind', '=', 'kind', ')', 'if', 'current_user', '.', 'is_authenticated', ':', 'badge', '.', 'created_by', '=', 'current_user', '.', 'id', 'self', '.', 'update', '(', '__raw__', '=', '{', "'$push'", ':', '{', "'badges'", ':', '{', "'$each'", ':', '[', 'badge', '.', 'to_mongo', '(', ')', ']', ',', "'$position'", ':', '0', '}', '}', '}', ')', 'self', '.', 'reload', '(', ')', 'post_save', '.', 'send', '(', 'self', '.', '__class__', ',', 'document', '=', 'self', ')', 'on_badge_added', '.', 'send', '(', 'self', ',', 'kind', '=', 'kind', ')', 'return', 'self', '.', 'get_badge', '(', 'kind', ')'] | Perform an atomic prepend for a new badge | ['Perform', 'an', 'atomic', 'prepend', 'for', 'a', 'new', 'badge'] | train | https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/core/badges/models.py#L59-L83 |
428 | sentinel-hub/sentinelhub-py | sentinelhub/geo_utils.py | utm_to_pixel | def utm_to_pixel(east, north, transform, truncate=True):
""" Convert UTM coordinate to image coordinate given a transform
:param east: east coordinate of point
:type east: float
:param north: north coordinate of point
:type north: float
:param transform: georeferencing transform of the image, e.g. `(x_upper_left, res_x, 0, y_upper_left, 0, -res_y)`
:type transform: tuple or list
:param truncate: Whether to truncate pixel coordinates. Default is ``True``
:type truncate: bool
:return: row and column pixel image coordinates
:rtype: float, float or int, int
"""
column = (east - transform[0]) / transform[1]
row = (north - transform[3]) / transform[5]
if truncate:
return int(row + ERR), int(column + ERR)
return row, column | python | def utm_to_pixel(east, north, transform, truncate=True):
""" Convert UTM coordinate to image coordinate given a transform
:param east: east coordinate of point
:type east: float
:param north: north coordinate of point
:type north: float
:param transform: georeferencing transform of the image, e.g. `(x_upper_left, res_x, 0, y_upper_left, 0, -res_y)`
:type transform: tuple or list
:param truncate: Whether to truncate pixel coordinates. Default is ``True``
:type truncate: bool
:return: row and column pixel image coordinates
:rtype: float, float or int, int
"""
column = (east - transform[0]) / transform[1]
row = (north - transform[3]) / transform[5]
if truncate:
return int(row + ERR), int(column + ERR)
return row, column | ['def', 'utm_to_pixel', '(', 'east', ',', 'north', ',', 'transform', ',', 'truncate', '=', 'True', ')', ':', 'column', '=', '(', 'east', '-', 'transform', '[', '0', ']', ')', '/', 'transform', '[', '1', ']', 'row', '=', '(', 'north', '-', 'transform', '[', '3', ']', ')', '/', 'transform', '[', '5', ']', 'if', 'truncate', ':', 'return', 'int', '(', 'row', '+', 'ERR', ')', ',', 'int', '(', 'column', '+', 'ERR', ')', 'return', 'row', ',', 'column'] | Convert UTM coordinate to image coordinate given a transform
:param east: east coordinate of point
:type east: float
:param north: north coordinate of point
:type north: float
:param transform: georeferencing transform of the image, e.g. `(x_upper_left, res_x, 0, y_upper_left, 0, -res_y)`
:type transform: tuple or list
:param truncate: Whether to truncate pixel coordinates. Default is ``True``
:type truncate: bool
:return: row and column pixel image coordinates
:rtype: float, float or int, int | ['Convert', 'UTM', 'coordinate', 'to', 'image', 'coordinate', 'given', 'a', 'transform'] | train | https://github.com/sentinel-hub/sentinelhub-py/blob/08a83b7f1e289187159a643336995d8369860fea/sentinelhub/geo_utils.py#L140-L158 |
429 | developersociety/django-glitter | glitter/page.py | GlitterColumn.add_block_widget | def add_block_widget(self, top=False):
"""
Return a select widget for blocks which can be added to this column.
"""
widget = AddBlockSelect(attrs={
'class': 'glitter-add-block-select',
}, choices=self.add_block_options(top=top))
return widget.render(name='', value=None) | python | def add_block_widget(self, top=False):
"""
Return a select widget for blocks which can be added to this column.
"""
widget = AddBlockSelect(attrs={
'class': 'glitter-add-block-select',
}, choices=self.add_block_options(top=top))
return widget.render(name='', value=None) | ['def', 'add_block_widget', '(', 'self', ',', 'top', '=', 'False', ')', ':', 'widget', '=', 'AddBlockSelect', '(', 'attrs', '=', '{', "'class'", ':', "'glitter-add-block-select'", ',', '}', ',', 'choices', '=', 'self', '.', 'add_block_options', '(', 'top', '=', 'top', ')', ')', 'return', 'widget', '.', 'render', '(', 'name', '=', "''", ',', 'value', '=', 'None', ')'] | Return a select widget for blocks which can be added to this column. | ['Return', 'a', 'select', 'widget', 'for', 'blocks', 'which', 'can', 'be', 'added', 'to', 'this', 'column', '.'] | train | https://github.com/developersociety/django-glitter/blob/2c0280ec83afee80deee94ee3934fc54239c2e87/glitter/page.py#L176-L184 |
430 | uralbash/pyramid_pages | pyramid_pages/resources.py | resources_of_config | def resources_of_config(config):
""" Returns all resources and models from config.
"""
return set( # unique values
sum([ # join lists to flat list
list(value) # if value is iter (ex: list of resources)
if hasattr(value, '__iter__')
else [value, ] # if value is not iter (ex: model or resource)
for value in config.values()
], [])
) | python | def resources_of_config(config):
""" Returns all resources and models from config.
"""
return set( # unique values
sum([ # join lists to flat list
list(value) # if value is iter (ex: list of resources)
if hasattr(value, '__iter__')
else [value, ] # if value is not iter (ex: model or resource)
for value in config.values()
], [])
) | ['def', 'resources_of_config', '(', 'config', ')', ':', 'return', 'set', '(', '# unique values', 'sum', '(', '[', '# join lists to flat list', 'list', '(', 'value', ')', '# if value is iter (ex: list of resources)', 'if', 'hasattr', '(', 'value', ',', "'__iter__'", ')', 'else', '[', 'value', ',', ']', '# if value is not iter (ex: model or resource)', 'for', 'value', 'in', 'config', '.', 'values', '(', ')', ']', ',', '[', ']', ')', ')'] | Returns all resources and models from config. | ['Returns', 'all', 'resources', 'and', 'models', 'from', 'config', '.'] | train | https://github.com/uralbash/pyramid_pages/blob/545b1ecb2e5dee5742135ba2a689b9635dd4efa1/pyramid_pages/resources.py#L143-L153 |
431 | pyviz/holoviews | holoviews/util/__init__.py | opts._element_keywords | def _element_keywords(cls, backend, elements=None):
"Returns a dictionary of element names to allowed keywords"
if backend not in Store.loaded_backends():
return {}
mapping = {}
backend_options = Store.options(backend)
elements = elements if elements is not None else backend_options.keys()
for element in elements:
if '.' in element: continue
element = element if isinstance(element, tuple) else (element,)
element_keywords = []
options = backend_options['.'.join(element)]
for group in Options._option_groups:
element_keywords.extend(options[group].allowed_keywords)
mapping[element[0]] = element_keywords
return mapping | python | def _element_keywords(cls, backend, elements=None):
"Returns a dictionary of element names to allowed keywords"
if backend not in Store.loaded_backends():
return {}
mapping = {}
backend_options = Store.options(backend)
elements = elements if elements is not None else backend_options.keys()
for element in elements:
if '.' in element: continue
element = element if isinstance(element, tuple) else (element,)
element_keywords = []
options = backend_options['.'.join(element)]
for group in Options._option_groups:
element_keywords.extend(options[group].allowed_keywords)
mapping[element[0]] = element_keywords
return mapping | ['def', '_element_keywords', '(', 'cls', ',', 'backend', ',', 'elements', '=', 'None', ')', ':', 'if', 'backend', 'not', 'in', 'Store', '.', 'loaded_backends', '(', ')', ':', 'return', '{', '}', 'mapping', '=', '{', '}', 'backend_options', '=', 'Store', '.', 'options', '(', 'backend', ')', 'elements', '=', 'elements', 'if', 'elements', 'is', 'not', 'None', 'else', 'backend_options', '.', 'keys', '(', ')', 'for', 'element', 'in', 'elements', ':', 'if', "'.'", 'in', 'element', ':', 'continue', 'element', '=', 'element', 'if', 'isinstance', '(', 'element', ',', 'tuple', ')', 'else', '(', 'element', ',', ')', 'element_keywords', '=', '[', ']', 'options', '=', 'backend_options', '[', "'.'", '.', 'join', '(', 'element', ')', ']', 'for', 'group', 'in', 'Options', '.', '_option_groups', ':', 'element_keywords', '.', 'extend', '(', 'options', '[', 'group', ']', '.', 'allowed_keywords', ')', 'mapping', '[', 'element', '[', '0', ']', ']', '=', 'element_keywords', 'return', 'mapping'] | Returns a dictionary of element names to allowed keywords | ['Returns', 'a', 'dictionary', 'of', 'element', 'names', 'to', 'allowed', 'keywords'] | train | https://github.com/pyviz/holoviews/blob/ae0dd2f3de448b0ca5e9065aabd6ef8d84c7e655/holoviews/util/__init__.py#L494-L511 |
432 | JdeRobot/base | src/drivers/MAVLinkServer/MAVProxy/pymavlink/mavutil.py | mavfile.set_mode_px4 | def set_mode_px4(self, mode, custom_mode, custom_sub_mode):
'''enter arbitrary mode'''
if isinstance(mode, str):
mode_map = self.mode_mapping()
if mode_map is None or mode not in mode_map:
print("Unknown mode '%s'" % mode)
return
# PX4 uses two fields to define modes
mode, custom_mode, custom_sub_mode = px4_map[mode]
self.mav.command_long_send(self.target_system, self.target_component,
mavlink.MAV_CMD_DO_SET_MODE, 0, mode, custom_mode, custom_sub_mode, 0, 0, 0, 0) | python | def set_mode_px4(self, mode, custom_mode, custom_sub_mode):
'''enter arbitrary mode'''
if isinstance(mode, str):
mode_map = self.mode_mapping()
if mode_map is None or mode not in mode_map:
print("Unknown mode '%s'" % mode)
return
# PX4 uses two fields to define modes
mode, custom_mode, custom_sub_mode = px4_map[mode]
self.mav.command_long_send(self.target_system, self.target_component,
mavlink.MAV_CMD_DO_SET_MODE, 0, mode, custom_mode, custom_sub_mode, 0, 0, 0, 0) | ['def', 'set_mode_px4', '(', 'self', ',', 'mode', ',', 'custom_mode', ',', 'custom_sub_mode', ')', ':', 'if', 'isinstance', '(', 'mode', ',', 'str', ')', ':', 'mode_map', '=', 'self', '.', 'mode_mapping', '(', ')', 'if', 'mode_map', 'is', 'None', 'or', 'mode', 'not', 'in', 'mode_map', ':', 'print', '(', '"Unknown mode \'%s\'"', '%', 'mode', ')', 'return', '# PX4 uses two fields to define modes', 'mode', ',', 'custom_mode', ',', 'custom_sub_mode', '=', 'px4_map', '[', 'mode', ']', 'self', '.', 'mav', '.', 'command_long_send', '(', 'self', '.', 'target_system', ',', 'self', '.', 'target_component', ',', 'mavlink', '.', 'MAV_CMD_DO_SET_MODE', ',', '0', ',', 'mode', ',', 'custom_mode', ',', 'custom_sub_mode', ',', '0', ',', '0', ',', '0', ',', '0', ')'] | enter arbitrary mode | ['enter', 'arbitrary', 'mode'] | train | https://github.com/JdeRobot/base/blob/303b18992785b2fe802212f2d758a60873007f1f/src/drivers/MAVLinkServer/MAVProxy/pymavlink/mavutil.py#L531-L541 |
433 | nitmir/django-cas-server | cas_server/utils.py | crypt_salt_is_valid | def crypt_salt_is_valid(salt):
"""
Validate a salt as crypt salt
:param str salt: a password salt
:return: ``True`` if ``salt`` is a valid crypt salt on this system, ``False`` otherwise
:rtype: bool
"""
if len(salt) < 2:
return False
else:
if salt[0] == '$':
if salt[1] == '$':
return False
else:
if '$' not in salt[1:]:
return False
else:
hashed = crypt.crypt("", salt)
if not hashed or '$' not in hashed[1:]:
return False
else:
return True
else:
return True | python | def crypt_salt_is_valid(salt):
"""
Validate a salt as crypt salt
:param str salt: a password salt
:return: ``True`` if ``salt`` is a valid crypt salt on this system, ``False`` otherwise
:rtype: bool
"""
if len(salt) < 2:
return False
else:
if salt[0] == '$':
if salt[1] == '$':
return False
else:
if '$' not in salt[1:]:
return False
else:
hashed = crypt.crypt("", salt)
if not hashed or '$' not in hashed[1:]:
return False
else:
return True
else:
return True | ['def', 'crypt_salt_is_valid', '(', 'salt', ')', ':', 'if', 'len', '(', 'salt', ')', '<', '2', ':', 'return', 'False', 'else', ':', 'if', 'salt', '[', '0', ']', '==', "'$'", ':', 'if', 'salt', '[', '1', ']', '==', "'$'", ':', 'return', 'False', 'else', ':', 'if', "'$'", 'not', 'in', 'salt', '[', '1', ':', ']', ':', 'return', 'False', 'else', ':', 'hashed', '=', 'crypt', '.', 'crypt', '(', '""', ',', 'salt', ')', 'if', 'not', 'hashed', 'or', "'$'", 'not', 'in', 'hashed', '[', '1', ':', ']', ':', 'return', 'False', 'else', ':', 'return', 'True', 'else', ':', 'return', 'True'] | Validate a salt as crypt salt
:param str salt: a password salt
:return: ``True`` if ``salt`` is a valid crypt salt on this system, ``False`` otherwise
:rtype: bool | ['Validate', 'a', 'salt', 'as', 'crypt', 'salt'] | train | https://github.com/nitmir/django-cas-server/blob/d106181b94c444f1946269da5c20f6c904840ad3/cas_server/utils.py#L393-L417 |
434 | KrishnaswamyLab/PHATE | Python/phate/phate.py | PHATE.von_neumann_entropy | def von_neumann_entropy(self, t_max=100):
"""Calculate Von Neumann Entropy
Determines the Von Neumann entropy of the diffusion affinities
at varying levels of `t`. The user should select a value of `t`
around the "knee" of the entropy curve.
We require that 'fit' stores the value of `PHATE.diff_op`
in order to calculate the Von Neumann entropy.
Parameters
----------
t_max : int, default: 100
Maximum value of `t` to test
Returns
-------
entropy : array, shape=[t_max]
The entropy of the diffusion affinities for each value of `t`
"""
t = np.arange(t_max)
return t, vne.compute_von_neumann_entropy(self.diff_op, t_max=t_max) | python | def von_neumann_entropy(self, t_max=100):
"""Calculate Von Neumann Entropy
Determines the Von Neumann entropy of the diffusion affinities
at varying levels of `t`. The user should select a value of `t`
around the "knee" of the entropy curve.
We require that 'fit' stores the value of `PHATE.diff_op`
in order to calculate the Von Neumann entropy.
Parameters
----------
t_max : int, default: 100
Maximum value of `t` to test
Returns
-------
entropy : array, shape=[t_max]
The entropy of the diffusion affinities for each value of `t`
"""
t = np.arange(t_max)
return t, vne.compute_von_neumann_entropy(self.diff_op, t_max=t_max) | ['def', 'von_neumann_entropy', '(', 'self', ',', 't_max', '=', '100', ')', ':', 't', '=', 'np', '.', 'arange', '(', 't_max', ')', 'return', 't', ',', 'vne', '.', 'compute_von_neumann_entropy', '(', 'self', '.', 'diff_op', ',', 't_max', '=', 't_max', ')'] | Calculate Von Neumann Entropy
Determines the Von Neumann entropy of the diffusion affinities
at varying levels of `t`. The user should select a value of `t`
around the "knee" of the entropy curve.
We require that 'fit' stores the value of `PHATE.diff_op`
in order to calculate the Von Neumann entropy.
Parameters
----------
t_max : int, default: 100
Maximum value of `t` to test
Returns
-------
entropy : array, shape=[t_max]
The entropy of the diffusion affinities for each value of `t` | ['Calculate', 'Von', 'Neumann', 'Entropy'] | train | https://github.com/KrishnaswamyLab/PHATE/blob/346a4597dcfc523f8bef99bce482e677282b6719/Python/phate/phate.py#L865-L886 |
435 | saltstack/salt | salt/modules/bluecoat_sslv.py | add_ip_address | def add_ip_address(list_name, item_name):
'''
Add an IP address to an IP address list.
list_name(str): The name of the specific policy IP address list to append to.
item_name(str): The IP address to append to the list.
CLI Example:
.. code-block:: bash
salt '*' bluecoat_sslv.add_ip_address MyIPAddressList 10.0.0.0/24
'''
payload = {"jsonrpc": "2.0",
"id": "ID0",
"method": "add_policy_ip_addresses",
"params": [list_name, {"item_name": item_name}]}
response = __proxy__['bluecoat_sslv.call'](payload, True)
return _validate_change_result(response) | python | def add_ip_address(list_name, item_name):
'''
Add an IP address to an IP address list.
list_name(str): The name of the specific policy IP address list to append to.
item_name(str): The IP address to append to the list.
CLI Example:
.. code-block:: bash
salt '*' bluecoat_sslv.add_ip_address MyIPAddressList 10.0.0.0/24
'''
payload = {"jsonrpc": "2.0",
"id": "ID0",
"method": "add_policy_ip_addresses",
"params": [list_name, {"item_name": item_name}]}
response = __proxy__['bluecoat_sslv.call'](payload, True)
return _validate_change_result(response) | ['def', 'add_ip_address', '(', 'list_name', ',', 'item_name', ')', ':', 'payload', '=', '{', '"jsonrpc"', ':', '"2.0"', ',', '"id"', ':', '"ID0"', ',', '"method"', ':', '"add_policy_ip_addresses"', ',', '"params"', ':', '[', 'list_name', ',', '{', '"item_name"', ':', 'item_name', '}', ']', '}', 'response', '=', '__proxy__', '[', "'bluecoat_sslv.call'", ']', '(', 'payload', ',', 'True', ')', 'return', '_validate_change_result', '(', 'response', ')'] | Add an IP address to an IP address list.
list_name(str): The name of the specific policy IP address list to append to.
item_name(str): The IP address to append to the list.
CLI Example:
.. code-block:: bash
salt '*' bluecoat_sslv.add_ip_address MyIPAddressList 10.0.0.0/24 | ['Add', 'an', 'IP', 'address', 'to', 'an', 'IP', 'address', 'list', '.'] | train | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/bluecoat_sslv.py#L172-L194 |
436 | senaite/senaite.core | bika/lims/idserver.py | get_alpha_or_number | def get_alpha_or_number(number, template):
"""Returns an Alphanumber that represents the number passed in, expressed
as defined in the template. Otherwise, returns the number
"""
match = re.match(r".*\{alpha:(\d+a\d+d)\}$", template.strip())
if match and match.groups():
format = match.groups()[0]
return to_alpha(number, format)
return number | python | def get_alpha_or_number(number, template):
"""Returns an Alphanumber that represents the number passed in, expressed
as defined in the template. Otherwise, returns the number
"""
match = re.match(r".*\{alpha:(\d+a\d+d)\}$", template.strip())
if match and match.groups():
format = match.groups()[0]
return to_alpha(number, format)
return number | ['def', 'get_alpha_or_number', '(', 'number', ',', 'template', ')', ':', 'match', '=', 're', '.', 'match', '(', 'r".*\\{alpha:(\\d+a\\d+d)\\}$"', ',', 'template', '.', 'strip', '(', ')', ')', 'if', 'match', 'and', 'match', '.', 'groups', '(', ')', ':', 'format', '=', 'match', '.', 'groups', '(', ')', '[', '0', ']', 'return', 'to_alpha', '(', 'number', ',', 'format', ')', 'return', 'number'] | Returns an Alphanumber that represents the number passed in, expressed
as defined in the template. Otherwise, returns the number | ['Returns', 'an', 'Alphanumber', 'that', 'represents', 'the', 'number', 'passed', 'in', 'expressed', 'as', 'defined', 'in', 'the', 'template', '.', 'Otherwise', 'returns', 'the', 'number'] | train | https://github.com/senaite/senaite.core/blob/7602ce2ea2f9e81eb34e20ce17b98a3e70713f85/bika/lims/idserver.py#L387-L395 |
437 | ibis-project/ibis | ibis/expr/groupby.py | GroupedTableExpr.having | def having(self, expr):
"""
Add a post-aggregation result filter (like the having argument in
`aggregate`), for composability with the group_by API
Parameters
----------
expr : ibis.expr.types.Expr
Returns
-------
grouped : GroupedTableExpr
"""
exprs = util.promote_list(expr)
new_having = self._having + exprs
return GroupedTableExpr(
self.table,
self.by,
having=new_having,
order_by=self._order_by,
window=self._window,
) | python | def having(self, expr):
"""
Add a post-aggregation result filter (like the having argument in
`aggregate`), for composability with the group_by API
Parameters
----------
expr : ibis.expr.types.Expr
Returns
-------
grouped : GroupedTableExpr
"""
exprs = util.promote_list(expr)
new_having = self._having + exprs
return GroupedTableExpr(
self.table,
self.by,
having=new_having,
order_by=self._order_by,
window=self._window,
) | ['def', 'having', '(', 'self', ',', 'expr', ')', ':', 'exprs', '=', 'util', '.', 'promote_list', '(', 'expr', ')', 'new_having', '=', 'self', '.', '_having', '+', 'exprs', 'return', 'GroupedTableExpr', '(', 'self', '.', 'table', ',', 'self', '.', 'by', ',', 'having', '=', 'new_having', ',', 'order_by', '=', 'self', '.', '_order_by', ',', 'window', '=', 'self', '.', '_window', ',', ')'] | Add a post-aggregation result filter (like the having argument in
`aggregate`), for composability with the group_by API
Parameters
----------
expr : ibis.expr.types.Expr
Returns
-------
grouped : GroupedTableExpr | ['Add', 'a', 'post', '-', 'aggregation', 'result', 'filter', '(', 'like', 'the', 'having', 'argument', 'in', 'aggregate', ')', 'for', 'composability', 'with', 'the', 'group_by', 'API'] | train | https://github.com/ibis-project/ibis/blob/1e39a5fd9ef088b45c155e8a5f541767ee8ef2e7/ibis/expr/groupby.py#L98-L119 |
438 | bwohlberg/sporco | sporco/admm/admm.py | ADMM.rsdl_sn | def rsdl_sn(self, U):
"""Compute dual residual normalisation term.
Overriding this method is required if methods :meth:`cnst_A`,
:meth:`cnst_AT`, :meth:`cnst_B`, and :meth:`cnst_c` are not
overridden.
"""
return self.rho * np.linalg.norm(self.cnst_AT(U)) | python | def rsdl_sn(self, U):
"""Compute dual residual normalisation term.
Overriding this method is required if methods :meth:`cnst_A`,
:meth:`cnst_AT`, :meth:`cnst_B`, and :meth:`cnst_c` are not
overridden.
"""
return self.rho * np.linalg.norm(self.cnst_AT(U)) | ['def', 'rsdl_sn', '(', 'self', ',', 'U', ')', ':', 'return', 'self', '.', 'rho', '*', 'np', '.', 'linalg', '.', 'norm', '(', 'self', '.', 'cnst_AT', '(', 'U', ')', ')'] | Compute dual residual normalisation term.
Overriding this method is required if methods :meth:`cnst_A`,
:meth:`cnst_AT`, :meth:`cnst_B`, and :meth:`cnst_c` are not
overridden. | ['Compute', 'dual', 'residual', 'normalisation', 'term', '.'] | train | https://github.com/bwohlberg/sporco/blob/8946a04331106f4e39904fbdf2dc7351900baa04/sporco/admm/admm.py#L766-L774 |
439 | bitesofcode/projexui | projexui/widgets/xnodewidget/xnode.py | XNode.setVisible | def setVisible( self, state ):
"""
Sets whether or not this node is visible in the scene.
:param state | <bool>
"""
self._visible = state
super(XNode, self).setVisible(self.isVisible())
self.dispatch.visibilityChanged.emit(state)
self.setDirty() | python | def setVisible( self, state ):
"""
Sets whether or not this node is visible in the scene.
:param state | <bool>
"""
self._visible = state
super(XNode, self).setVisible(self.isVisible())
self.dispatch.visibilityChanged.emit(state)
self.setDirty() | ['def', 'setVisible', '(', 'self', ',', 'state', ')', ':', 'self', '.', '_visible', '=', 'state', 'super', '(', 'XNode', ',', 'self', ')', '.', 'setVisible', '(', 'self', '.', 'isVisible', '(', ')', ')', 'self', '.', 'dispatch', '.', 'visibilityChanged', '.', 'emit', '(', 'state', ')', 'self', '.', 'setDirty', '(', ')'] | Sets whether or not this node is visible in the scene.
:param state | <bool> | ['Sets', 'whether', 'or', 'not', 'this', 'node', 'is', 'visible', 'in', 'the', 'scene', '.', ':', 'param', 'state', '|', '<bool', '>'] | train | https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xnodewidget/xnode.py#L2032-L2043 |
440 | gbiggs/rtsprofile | rtsprofile/participant.py | Participant.parse_xml_node | def parse_xml_node(self, node):
'''Parse an xml.dom Node object representing a participant into this
object.
'''
if node.getElementsByTagNameNS(RTS_NS, 'Participant').length != 1:
raise InvalidParticipantNodeError
self.target_component = TargetComponent().parse_xml_node(\
node.getElementsByTagNameNS(RTS_NS, 'Participant')[0])
return self | python | def parse_xml_node(self, node):
'''Parse an xml.dom Node object representing a participant into this
object.
'''
if node.getElementsByTagNameNS(RTS_NS, 'Participant').length != 1:
raise InvalidParticipantNodeError
self.target_component = TargetComponent().parse_xml_node(\
node.getElementsByTagNameNS(RTS_NS, 'Participant')[0])
return self | ['def', 'parse_xml_node', '(', 'self', ',', 'node', ')', ':', 'if', 'node', '.', 'getElementsByTagNameNS', '(', 'RTS_NS', ',', "'Participant'", ')', '.', 'length', '!=', '1', ':', 'raise', 'InvalidParticipantNodeError', 'self', '.', 'target_component', '=', 'TargetComponent', '(', ')', '.', 'parse_xml_node', '(', 'node', '.', 'getElementsByTagNameNS', '(', 'RTS_NS', ',', "'Participant'", ')', '[', '0', ']', ')', 'return', 'self'] | Parse an xml.dom Node object representing a participant into this
object. | ['Parse', 'an', 'xml', '.', 'dom', 'Node', 'object', 'representing', 'a', 'participant', 'into', 'this', 'object', '.'] | train | https://github.com/gbiggs/rtsprofile/blob/fded6eddcb0b25fe9808b1b12336a4413ea00905/rtsprofile/participant.py#L65-L74 |
441 | seung-lab/cloud-volume | cloudvolume/cloudvolume.py | CloudVolume.init_submodules | def init_submodules(self, cache):
"""cache = path or bool"""
self.cache = CacheService(cache, weakref.proxy(self))
self.mesh = PrecomputedMeshService(weakref.proxy(self))
self.skeleton = PrecomputedSkeletonService(weakref.proxy(self)) | python | def init_submodules(self, cache):
"""cache = path or bool"""
self.cache = CacheService(cache, weakref.proxy(self))
self.mesh = PrecomputedMeshService(weakref.proxy(self))
self.skeleton = PrecomputedSkeletonService(weakref.proxy(self)) | ['def', 'init_submodules', '(', 'self', ',', 'cache', ')', ':', 'self', '.', 'cache', '=', 'CacheService', '(', 'cache', ',', 'weakref', '.', 'proxy', '(', 'self', ')', ')', 'self', '.', 'mesh', '=', 'PrecomputedMeshService', '(', 'weakref', '.', 'proxy', '(', 'self', ')', ')', 'self', '.', 'skeleton', '=', 'PrecomputedSkeletonService', '(', 'weakref', '.', 'proxy', '(', 'self', ')', ')'] | cache = path or bool | ['cache', '=', 'path', 'or', 'bool'] | train | https://github.com/seung-lab/cloud-volume/blob/d2fd4500333f1bc3cd3e3919a8b649cec5d8e214/cloudvolume/cloudvolume.py#L239-L243 |
442 | pyrogram/pyrogram | pyrogram/client/client.py | Client.save_file | def save_file(self,
path: str,
file_id: int = None,
file_part: int = 0,
progress: callable = None,
progress_args: tuple = ()):
"""Use this method to upload a file onto Telegram servers, without actually sending the message to anyone.
This is a utility method intended to be used **only** when working with Raw Functions (i.e: a Telegram API
method you wish to use which is not available yet in the Client class as an easy-to-use method), whenever an
InputFile type is required.
Args:
path (``str``):
The path of the file you want to upload that exists on your local machine.
file_id (``int``, *optional*):
In case a file part expired, pass the file_id and the file_part to retry uploading that specific chunk.
file_part (``int``, *optional*):
In case a file part expired, pass the file_id and the file_part to retry uploading that specific chunk.
progress (``callable``, *optional*):
Pass a callback function to view the upload progress.
The function must take *(client, current, total, \*args)* as positional arguments (look at the section
below for a detailed description).
progress_args (``tuple``, *optional*):
Extra custom arguments for the progress callback function. Useful, for example, if you want to pass
a chat_id and a message_id in order to edit a message with the updated progress.
Other Parameters:
client (:obj:`Client <pyrogram.Client>`):
The Client itself, useful when you want to call other API methods inside the callback function.
current (``int``):
The amount of bytes uploaded so far.
total (``int``):
The size of the file.
*args (``tuple``, *optional*):
Extra custom arguments as defined in the *progress_args* parameter.
You can either keep *\*args* or add every single extra argument in your function signature.
Returns:
On success, the uploaded file is returned in form of an InputFile object.
Raises:
:class:`RPCError <pyrogram.RPCError>` in case of a Telegram RPC error.
"""
part_size = 512 * 1024
file_size = os.path.getsize(path)
if file_size == 0:
raise ValueError("File size equals to 0 B")
if file_size > 1500 * 1024 * 1024:
raise ValueError("Telegram doesn't support uploading files bigger than 1500 MiB")
file_total_parts = int(math.ceil(file_size / part_size))
is_big = True if file_size > 10 * 1024 * 1024 else False
is_missing_part = True if file_id is not None else False
file_id = file_id or self.rnd_id()
md5_sum = md5() if not is_big and not is_missing_part else None
session = Session(self, self.dc_id, self.auth_key, is_media=True)
session.start()
try:
with open(path, "rb") as f:
f.seek(part_size * file_part)
while True:
chunk = f.read(part_size)
if not chunk:
if not is_big:
md5_sum = "".join([hex(i)[2:].zfill(2) for i in md5_sum.digest()])
break
for _ in range(3):
if is_big:
rpc = functions.upload.SaveBigFilePart(
file_id=file_id,
file_part=file_part,
file_total_parts=file_total_parts,
bytes=chunk
)
else:
rpc = functions.upload.SaveFilePart(
file_id=file_id,
file_part=file_part,
bytes=chunk
)
if session.send(rpc):
break
else:
raise AssertionError("Telegram didn't accept chunk #{} of {}".format(file_part, path))
if is_missing_part:
return
if not is_big:
md5_sum.update(chunk)
file_part += 1
if progress:
progress(self, min(file_part * part_size, file_size), file_size, *progress_args)
except Client.StopTransmission:
raise
except Exception as e:
log.error(e, exc_info=True)
else:
if is_big:
return types.InputFileBig(
id=file_id,
parts=file_total_parts,
name=os.path.basename(path),
)
else:
return types.InputFile(
id=file_id,
parts=file_total_parts,
name=os.path.basename(path),
md5_checksum=md5_sum
)
finally:
session.stop() | python | def save_file(self,
path: str,
file_id: int = None,
file_part: int = 0,
progress: callable = None,
progress_args: tuple = ()):
"""Use this method to upload a file onto Telegram servers, without actually sending the message to anyone.
This is a utility method intended to be used **only** when working with Raw Functions (i.e: a Telegram API
method you wish to use which is not available yet in the Client class as an easy-to-use method), whenever an
InputFile type is required.
Args:
path (``str``):
The path of the file you want to upload that exists on your local machine.
file_id (``int``, *optional*):
In case a file part expired, pass the file_id and the file_part to retry uploading that specific chunk.
file_part (``int``, *optional*):
In case a file part expired, pass the file_id and the file_part to retry uploading that specific chunk.
progress (``callable``, *optional*):
Pass a callback function to view the upload progress.
The function must take *(client, current, total, \*args)* as positional arguments (look at the section
below for a detailed description).
progress_args (``tuple``, *optional*):
Extra custom arguments for the progress callback function. Useful, for example, if you want to pass
a chat_id and a message_id in order to edit a message with the updated progress.
Other Parameters:
client (:obj:`Client <pyrogram.Client>`):
The Client itself, useful when you want to call other API methods inside the callback function.
current (``int``):
The amount of bytes uploaded so far.
total (``int``):
The size of the file.
*args (``tuple``, *optional*):
Extra custom arguments as defined in the *progress_args* parameter.
You can either keep *\*args* or add every single extra argument in your function signature.
Returns:
On success, the uploaded file is returned in form of an InputFile object.
Raises:
:class:`RPCError <pyrogram.RPCError>` in case of a Telegram RPC error.
"""
part_size = 512 * 1024
file_size = os.path.getsize(path)
if file_size == 0:
raise ValueError("File size equals to 0 B")
if file_size > 1500 * 1024 * 1024:
raise ValueError("Telegram doesn't support uploading files bigger than 1500 MiB")
file_total_parts = int(math.ceil(file_size / part_size))
is_big = True if file_size > 10 * 1024 * 1024 else False
is_missing_part = True if file_id is not None else False
file_id = file_id or self.rnd_id()
md5_sum = md5() if not is_big and not is_missing_part else None
session = Session(self, self.dc_id, self.auth_key, is_media=True)
session.start()
try:
with open(path, "rb") as f:
f.seek(part_size * file_part)
while True:
chunk = f.read(part_size)
if not chunk:
if not is_big:
md5_sum = "".join([hex(i)[2:].zfill(2) for i in md5_sum.digest()])
break
for _ in range(3):
if is_big:
rpc = functions.upload.SaveBigFilePart(
file_id=file_id,
file_part=file_part,
file_total_parts=file_total_parts,
bytes=chunk
)
else:
rpc = functions.upload.SaveFilePart(
file_id=file_id,
file_part=file_part,
bytes=chunk
)
if session.send(rpc):
break
else:
raise AssertionError("Telegram didn't accept chunk #{} of {}".format(file_part, path))
if is_missing_part:
return
if not is_big:
md5_sum.update(chunk)
file_part += 1
if progress:
progress(self, min(file_part * part_size, file_size), file_size, *progress_args)
except Client.StopTransmission:
raise
except Exception as e:
log.error(e, exc_info=True)
else:
if is_big:
return types.InputFileBig(
id=file_id,
parts=file_total_parts,
name=os.path.basename(path),
)
else:
return types.InputFile(
id=file_id,
parts=file_total_parts,
name=os.path.basename(path),
md5_checksum=md5_sum
)
finally:
session.stop() | ['def', 'save_file', '(', 'self', ',', 'path', ':', 'str', ',', 'file_id', ':', 'int', '=', 'None', ',', 'file_part', ':', 'int', '=', '0', ',', 'progress', ':', 'callable', '=', 'None', ',', 'progress_args', ':', 'tuple', '=', '(', ')', ')', ':', 'part_size', '=', '512', '*', '1024', 'file_size', '=', 'os', '.', 'path', '.', 'getsize', '(', 'path', ')', 'if', 'file_size', '==', '0', ':', 'raise', 'ValueError', '(', '"File size equals to 0 B"', ')', 'if', 'file_size', '>', '1500', '*', '1024', '*', '1024', ':', 'raise', 'ValueError', '(', '"Telegram doesn\'t support uploading files bigger than 1500 MiB"', ')', 'file_total_parts', '=', 'int', '(', 'math', '.', 'ceil', '(', 'file_size', '/', 'part_size', ')', ')', 'is_big', '=', 'True', 'if', 'file_size', '>', '10', '*', '1024', '*', '1024', 'else', 'False', 'is_missing_part', '=', 'True', 'if', 'file_id', 'is', 'not', 'None', 'else', 'False', 'file_id', '=', 'file_id', 'or', 'self', '.', 'rnd_id', '(', ')', 'md5_sum', '=', 'md5', '(', ')', 'if', 'not', 'is_big', 'and', 'not', 'is_missing_part', 'else', 'None', 'session', '=', 'Session', '(', 'self', ',', 'self', '.', 'dc_id', ',', 'self', '.', 'auth_key', ',', 'is_media', '=', 'True', ')', 'session', '.', 'start', '(', ')', 'try', ':', 'with', 'open', '(', 'path', ',', '"rb"', ')', 'as', 'f', ':', 'f', '.', 'seek', '(', 'part_size', '*', 'file_part', ')', 'while', 'True', ':', 'chunk', '=', 'f', '.', 'read', '(', 'part_size', ')', 'if', 'not', 'chunk', ':', 'if', 'not', 'is_big', ':', 'md5_sum', '=', '""', '.', 'join', '(', '[', 'hex', '(', 'i', ')', '[', '2', ':', ']', '.', 'zfill', '(', '2', ')', 'for', 'i', 'in', 'md5_sum', '.', 'digest', '(', ')', ']', ')', 'break', 'for', '_', 'in', 'range', '(', '3', ')', ':', 'if', 'is_big', ':', 'rpc', '=', 'functions', '.', 'upload', '.', 'SaveBigFilePart', '(', 'file_id', '=', 'file_id', ',', 'file_part', '=', 'file_part', ',', 'file_total_parts', '=', 'file_total_parts', ',', 'bytes', '=', 'chunk', ')', 'else', ':', 'rpc', '=', 'functions', '.', 'upload', '.', 'SaveFilePart', '(', 'file_id', '=', 'file_id', ',', 'file_part', '=', 'file_part', ',', 'bytes', '=', 'chunk', ')', 'if', 'session', '.', 'send', '(', 'rpc', ')', ':', 'break', 'else', ':', 'raise', 'AssertionError', '(', '"Telegram didn\'t accept chunk #{} of {}"', '.', 'format', '(', 'file_part', ',', 'path', ')', ')', 'if', 'is_missing_part', ':', 'return', 'if', 'not', 'is_big', ':', 'md5_sum', '.', 'update', '(', 'chunk', ')', 'file_part', '+=', '1', 'if', 'progress', ':', 'progress', '(', 'self', ',', 'min', '(', 'file_part', '*', 'part_size', ',', 'file_size', ')', ',', 'file_size', ',', '*', 'progress_args', ')', 'except', 'Client', '.', 'StopTransmission', ':', 'raise', 'except', 'Exception', 'as', 'e', ':', 'log', '.', 'error', '(', 'e', ',', 'exc_info', '=', 'True', ')', 'else', ':', 'if', 'is_big', ':', 'return', 'types', '.', 'InputFileBig', '(', 'id', '=', 'file_id', ',', 'parts', '=', 'file_total_parts', ',', 'name', '=', 'os', '.', 'path', '.', 'basename', '(', 'path', ')', ',', ')', 'else', ':', 'return', 'types', '.', 'InputFile', '(', 'id', '=', 'file_id', ',', 'parts', '=', 'file_total_parts', ',', 'name', '=', 'os', '.', 'path', '.', 'basename', '(', 'path', ')', ',', 'md5_checksum', '=', 'md5_sum', ')', 'finally', ':', 'session', '.', 'stop', '(', ')'] | Use this method to upload a file onto Telegram servers, without actually sending the message to anyone.
This is a utility method intended to be used **only** when working with Raw Functions (i.e: a Telegram API
method you wish to use which is not available yet in the Client class as an easy-to-use method), whenever an
InputFile type is required.
Args:
path (``str``):
The path of the file you want to upload that exists on your local machine.
file_id (``int``, *optional*):
In case a file part expired, pass the file_id and the file_part to retry uploading that specific chunk.
file_part (``int``, *optional*):
In case a file part expired, pass the file_id and the file_part to retry uploading that specific chunk.
progress (``callable``, *optional*):
Pass a callback function to view the upload progress.
The function must take *(client, current, total, \*args)* as positional arguments (look at the section
below for a detailed description).
progress_args (``tuple``, *optional*):
Extra custom arguments for the progress callback function. Useful, for example, if you want to pass
a chat_id and a message_id in order to edit a message with the updated progress.
Other Parameters:
client (:obj:`Client <pyrogram.Client>`):
The Client itself, useful when you want to call other API methods inside the callback function.
current (``int``):
The amount of bytes uploaded so far.
total (``int``):
The size of the file.
*args (``tuple``, *optional*):
Extra custom arguments as defined in the *progress_args* parameter.
You can either keep *\*args* or add every single extra argument in your function signature.
Returns:
On success, the uploaded file is returned in form of an InputFile object.
Raises:
:class:`RPCError <pyrogram.RPCError>` in case of a Telegram RPC error. | ['Use', 'this', 'method', 'to', 'upload', 'a', 'file', 'onto', 'Telegram', 'servers', 'without', 'actually', 'sending', 'the', 'message', 'to', 'anyone', '.'] | train | https://github.com/pyrogram/pyrogram/blob/e7258a341ba905cfa86264c22040654db732ec1c/pyrogram/client/client.py#L1388-L1519 |
443 | dwavesystems/dimod | dimod/generators/random.py | uniform | def uniform(graph, vartype, low=0.0, high=1.0, cls=BinaryQuadraticModel,
seed=None):
"""Generate a bqm with random biases and offset.
Biases and offset are drawn from a uniform distribution range (low, high).
Args:
graph (int/tuple[nodes, edges]/:obj:`~networkx.Graph`):
The graph to build the bqm loops on. Either an integer n, interpreted as a
complete graph of size n, or a nodes/edges pair, or a NetworkX graph.
vartype (:class:`.Vartype`/str/set):
Variable type for the binary quadratic model. Accepted input values:
* :class:`.Vartype.SPIN`, ``'SPIN'``, ``{-1, 1}``
* :class:`.Vartype.BINARY`, ``'BINARY'``, ``{0, 1}``
low (float, optional, default=0.0):
The low end of the range for the random biases.
high (float, optional, default=1.0):
The high end of the range for the random biases.
cls (:class:`.BinaryQuadraticModel`):
Binary quadratic model class to build from.
seed (int, optional, default=None):
Random seed.
Returns:
:obj:`.BinaryQuadraticModel`
"""
if seed is None:
seed = numpy.random.randint(2**32, dtype=np.uint32)
r = numpy.random.RandomState(seed)
variables, edges = graph
index = {v: idx for idx, v in enumerate(variables)}
if edges:
irow, icol = zip(*((index[u], index[v]) for u, v in edges))
else:
irow = icol = tuple()
ldata = r.uniform(low, high, size=len(variables))
qdata = r.uniform(low, high, size=len(irow))
offset = r.uniform(low, high)
return cls.from_numpy_vectors(ldata, (irow, icol, qdata), offset, vartype,
variable_order=variables) | python | def uniform(graph, vartype, low=0.0, high=1.0, cls=BinaryQuadraticModel,
seed=None):
"""Generate a bqm with random biases and offset.
Biases and offset are drawn from a uniform distribution range (low, high).
Args:
graph (int/tuple[nodes, edges]/:obj:`~networkx.Graph`):
The graph to build the bqm loops on. Either an integer n, interpreted as a
complete graph of size n, or a nodes/edges pair, or a NetworkX graph.
vartype (:class:`.Vartype`/str/set):
Variable type for the binary quadratic model. Accepted input values:
* :class:`.Vartype.SPIN`, ``'SPIN'``, ``{-1, 1}``
* :class:`.Vartype.BINARY`, ``'BINARY'``, ``{0, 1}``
low (float, optional, default=0.0):
The low end of the range for the random biases.
high (float, optional, default=1.0):
The high end of the range for the random biases.
cls (:class:`.BinaryQuadraticModel`):
Binary quadratic model class to build from.
seed (int, optional, default=None):
Random seed.
Returns:
:obj:`.BinaryQuadraticModel`
"""
if seed is None:
seed = numpy.random.randint(2**32, dtype=np.uint32)
r = numpy.random.RandomState(seed)
variables, edges = graph
index = {v: idx for idx, v in enumerate(variables)}
if edges:
irow, icol = zip(*((index[u], index[v]) for u, v in edges))
else:
irow = icol = tuple()
ldata = r.uniform(low, high, size=len(variables))
qdata = r.uniform(low, high, size=len(irow))
offset = r.uniform(low, high)
return cls.from_numpy_vectors(ldata, (irow, icol, qdata), offset, vartype,
variable_order=variables) | ['def', 'uniform', '(', 'graph', ',', 'vartype', ',', 'low', '=', '0.0', ',', 'high', '=', '1.0', ',', 'cls', '=', 'BinaryQuadraticModel', ',', 'seed', '=', 'None', ')', ':', 'if', 'seed', 'is', 'None', ':', 'seed', '=', 'numpy', '.', 'random', '.', 'randint', '(', '2', '**', '32', ',', 'dtype', '=', 'np', '.', 'uint32', ')', 'r', '=', 'numpy', '.', 'random', '.', 'RandomState', '(', 'seed', ')', 'variables', ',', 'edges', '=', 'graph', 'index', '=', '{', 'v', ':', 'idx', 'for', 'idx', ',', 'v', 'in', 'enumerate', '(', 'variables', ')', '}', 'if', 'edges', ':', 'irow', ',', 'icol', '=', 'zip', '(', '*', '(', '(', 'index', '[', 'u', ']', ',', 'index', '[', 'v', ']', ')', 'for', 'u', ',', 'v', 'in', 'edges', ')', ')', 'else', ':', 'irow', '=', 'icol', '=', 'tuple', '(', ')', 'ldata', '=', 'r', '.', 'uniform', '(', 'low', ',', 'high', ',', 'size', '=', 'len', '(', 'variables', ')', ')', 'qdata', '=', 'r', '.', 'uniform', '(', 'low', ',', 'high', ',', 'size', '=', 'len', '(', 'irow', ')', ')', 'offset', '=', 'r', '.', 'uniform', '(', 'low', ',', 'high', ')', 'return', 'cls', '.', 'from_numpy_vectors', '(', 'ldata', ',', '(', 'irow', ',', 'icol', ',', 'qdata', ')', ',', 'offset', ',', 'vartype', ',', 'variable_order', '=', 'variables', ')'] | Generate a bqm with random biases and offset.
Biases and offset are drawn from a uniform distribution range (low, high).
Args:
graph (int/tuple[nodes, edges]/:obj:`~networkx.Graph`):
The graph to build the bqm loops on. Either an integer n, interpreted as a
complete graph of size n, or a nodes/edges pair, or a NetworkX graph.
vartype (:class:`.Vartype`/str/set):
Variable type for the binary quadratic model. Accepted input values:
* :class:`.Vartype.SPIN`, ``'SPIN'``, ``{-1, 1}``
* :class:`.Vartype.BINARY`, ``'BINARY'``, ``{0, 1}``
low (float, optional, default=0.0):
The low end of the range for the random biases.
high (float, optional, default=1.0):
The high end of the range for the random biases.
cls (:class:`.BinaryQuadraticModel`):
Binary quadratic model class to build from.
seed (int, optional, default=None):
Random seed.
Returns:
:obj:`.BinaryQuadraticModel` | ['Generate', 'a', 'bqm', 'with', 'random', 'biases', 'and', 'offset', '.'] | train | https://github.com/dwavesystems/dimod/blob/beff1b7f86b559d923ac653c1de6d593876d6d38/dimod/generators/random.py#L28-L79 |
444 | apache/incubator-mxnet | example/bayesian-methods/bdk_demo.py | synthetic_grad | def synthetic_grad(X, theta, sigma1, sigma2, sigmax, rescale_grad=1.0, grad=None):
"""Get synthetic gradient value"""
if grad is None:
grad = nd.empty(theta.shape, theta.context)
theta1 = theta.asnumpy()[0]
theta2 = theta.asnumpy()[1]
v1 = sigma1 ** 2
v2 = sigma2 ** 2
vx = sigmax ** 2
denominator = numpy.exp(-(X - theta1) ** 2 / (2 * vx)) + numpy.exp(
-(X - theta1 - theta2) ** 2 / (2 * vx))
grad_npy = numpy.zeros(theta.shape)
grad_npy[0] = -rescale_grad * ((numpy.exp(-(X - theta1) ** 2 / (2 * vx)) * (X - theta1) / vx
+ numpy.exp(-(X - theta1 - theta2) ** 2 / (2 * vx)) *
(X - theta1 - theta2) / vx) / denominator).sum() + theta1 / v1
grad_npy[1] = -rescale_grad * ((numpy.exp(-(X - theta1 - theta2) ** 2 / (2 * vx)) *
(X - theta1 - theta2) / vx) / denominator).sum() + theta2 / v2
grad[:] = grad_npy
return grad | python | def synthetic_grad(X, theta, sigma1, sigma2, sigmax, rescale_grad=1.0, grad=None):
"""Get synthetic gradient value"""
if grad is None:
grad = nd.empty(theta.shape, theta.context)
theta1 = theta.asnumpy()[0]
theta2 = theta.asnumpy()[1]
v1 = sigma1 ** 2
v2 = sigma2 ** 2
vx = sigmax ** 2
denominator = numpy.exp(-(X - theta1) ** 2 / (2 * vx)) + numpy.exp(
-(X - theta1 - theta2) ** 2 / (2 * vx))
grad_npy = numpy.zeros(theta.shape)
grad_npy[0] = -rescale_grad * ((numpy.exp(-(X - theta1) ** 2 / (2 * vx)) * (X - theta1) / vx
+ numpy.exp(-(X - theta1 - theta2) ** 2 / (2 * vx)) *
(X - theta1 - theta2) / vx) / denominator).sum() + theta1 / v1
grad_npy[1] = -rescale_grad * ((numpy.exp(-(X - theta1 - theta2) ** 2 / (2 * vx)) *
(X - theta1 - theta2) / vx) / denominator).sum() + theta2 / v2
grad[:] = grad_npy
return grad | ['def', 'synthetic_grad', '(', 'X', ',', 'theta', ',', 'sigma1', ',', 'sigma2', ',', 'sigmax', ',', 'rescale_grad', '=', '1.0', ',', 'grad', '=', 'None', ')', ':', 'if', 'grad', 'is', 'None', ':', 'grad', '=', 'nd', '.', 'empty', '(', 'theta', '.', 'shape', ',', 'theta', '.', 'context', ')', 'theta1', '=', 'theta', '.', 'asnumpy', '(', ')', '[', '0', ']', 'theta2', '=', 'theta', '.', 'asnumpy', '(', ')', '[', '1', ']', 'v1', '=', 'sigma1', '**', '2', 'v2', '=', 'sigma2', '**', '2', 'vx', '=', 'sigmax', '**', '2', 'denominator', '=', 'numpy', '.', 'exp', '(', '-', '(', 'X', '-', 'theta1', ')', '**', '2', '/', '(', '2', '*', 'vx', ')', ')', '+', 'numpy', '.', 'exp', '(', '-', '(', 'X', '-', 'theta1', '-', 'theta2', ')', '**', '2', '/', '(', '2', '*', 'vx', ')', ')', 'grad_npy', '=', 'numpy', '.', 'zeros', '(', 'theta', '.', 'shape', ')', 'grad_npy', '[', '0', ']', '=', '-', 'rescale_grad', '*', '(', '(', 'numpy', '.', 'exp', '(', '-', '(', 'X', '-', 'theta1', ')', '**', '2', '/', '(', '2', '*', 'vx', ')', ')', '*', '(', 'X', '-', 'theta1', ')', '/', 'vx', '+', 'numpy', '.', 'exp', '(', '-', '(', 'X', '-', 'theta1', '-', 'theta2', ')', '**', '2', '/', '(', '2', '*', 'vx', ')', ')', '*', '(', 'X', '-', 'theta1', '-', 'theta2', ')', '/', 'vx', ')', '/', 'denominator', ')', '.', 'sum', '(', ')', '+', 'theta1', '/', 'v1', 'grad_npy', '[', '1', ']', '=', '-', 'rescale_grad', '*', '(', '(', 'numpy', '.', 'exp', '(', '-', '(', 'X', '-', 'theta1', '-', 'theta2', ')', '**', '2', '/', '(', '2', '*', 'vx', ')', ')', '*', '(', 'X', '-', 'theta1', '-', 'theta2', ')', '/', 'vx', ')', '/', 'denominator', ')', '.', 'sum', '(', ')', '+', 'theta2', '/', 'v2', 'grad', '[', ':', ']', '=', 'grad_npy', 'return', 'grad'] | Get synthetic gradient value | ['Get', 'synthetic', 'gradient', 'value'] | train | https://github.com/apache/incubator-mxnet/blob/1af29e9c060a4c7d60eeaacba32afdb9a7775ba7/example/bayesian-methods/bdk_demo.py#L121-L139 |
445 | codelv/enaml-native-cli | enamlnativecli/main.py | Link.find_packages | def find_packages(path):
""" Find all java files matching the "*Package.java" pattern within
the given enaml package directory relative to the java source path.
"""
matches = []
root = join(path, 'src', 'main', 'java')
for folder, dirnames, filenames in os.walk(root):
for filename in fnmatch.filter(filenames, '*Package.java'):
#: Open and make sure it's an EnamlPackage somewhere
with open(join(folder, filename)) as f:
if "implements EnamlPackage" in f.read():
package = os.path.relpath(folder, root)
matches.append(os.path.join(package, filename))
return matches | python | def find_packages(path):
""" Find all java files matching the "*Package.java" pattern within
the given enaml package directory relative to the java source path.
"""
matches = []
root = join(path, 'src', 'main', 'java')
for folder, dirnames, filenames in os.walk(root):
for filename in fnmatch.filter(filenames, '*Package.java'):
#: Open and make sure it's an EnamlPackage somewhere
with open(join(folder, filename)) as f:
if "implements EnamlPackage" in f.read():
package = os.path.relpath(folder, root)
matches.append(os.path.join(package, filename))
return matches | ['def', 'find_packages', '(', 'path', ')', ':', 'matches', '=', '[', ']', 'root', '=', 'join', '(', 'path', ',', "'src'", ',', "'main'", ',', "'java'", ')', 'for', 'folder', ',', 'dirnames', ',', 'filenames', 'in', 'os', '.', 'walk', '(', 'root', ')', ':', 'for', 'filename', 'in', 'fnmatch', '.', 'filter', '(', 'filenames', ',', "'*Package.java'", ')', ':', "#: Open and make sure it's an EnamlPackage somewhere", 'with', 'open', '(', 'join', '(', 'folder', ',', 'filename', ')', ')', 'as', 'f', ':', 'if', '"implements EnamlPackage"', 'in', 'f', '.', 'read', '(', ')', ':', 'package', '=', 'os', '.', 'path', '.', 'relpath', '(', 'folder', ',', 'root', ')', 'matches', '.', 'append', '(', 'os', '.', 'path', '.', 'join', '(', 'package', ',', 'filename', ')', ')', 'return', 'matches'] | Find all java files matching the "*Package.java" pattern within
the given enaml package directory relative to the java source path. | ['Find', 'all', 'java', 'files', 'matching', 'the', '*', 'Package', '.', 'java', 'pattern', 'within', 'the', 'given', 'enaml', 'package', 'directory', 'relative', 'to', 'the', 'java', 'source', 'path', '.'] | train | https://github.com/codelv/enaml-native-cli/blob/81d6faa7e3dd437956f661c512031e49c0d44b63/enamlnativecli/main.py#L868-L881 |
446 | rbuffat/pyepw | pyepw/epw.py | Location.export | def export(self, top=True):
"""Exports object to its string representation.
Args:
top (bool): if True appends `internal_name` before values.
All non list objects should be exported with value top=True,
all list objects, that are embedded in as fields inlist objects
should be exported with `top`=False
Returns:
str: The objects string representation
"""
out = []
if top:
out.append(self._internal_name)
out.append(self._to_str(self.city))
out.append(self._to_str(self.state_province_region))
out.append(self._to_str(self.country))
out.append(self._to_str(self.source))
out.append(self._to_str(self.wmo))
out.append(self._to_str(self.latitude))
out.append(self._to_str(self.longitude))
out.append(self._to_str(self.timezone))
out.append(self._to_str(self.elevation))
return ",".join(out) | python | def export(self, top=True):
"""Exports object to its string representation.
Args:
top (bool): if True appends `internal_name` before values.
All non list objects should be exported with value top=True,
all list objects, that are embedded in as fields inlist objects
should be exported with `top`=False
Returns:
str: The objects string representation
"""
out = []
if top:
out.append(self._internal_name)
out.append(self._to_str(self.city))
out.append(self._to_str(self.state_province_region))
out.append(self._to_str(self.country))
out.append(self._to_str(self.source))
out.append(self._to_str(self.wmo))
out.append(self._to_str(self.latitude))
out.append(self._to_str(self.longitude))
out.append(self._to_str(self.timezone))
out.append(self._to_str(self.elevation))
return ",".join(out) | ['def', 'export', '(', 'self', ',', 'top', '=', 'True', ')', ':', 'out', '=', '[', ']', 'if', 'top', ':', 'out', '.', 'append', '(', 'self', '.', '_internal_name', ')', 'out', '.', 'append', '(', 'self', '.', '_to_str', '(', 'self', '.', 'city', ')', ')', 'out', '.', 'append', '(', 'self', '.', '_to_str', '(', 'self', '.', 'state_province_region', ')', ')', 'out', '.', 'append', '(', 'self', '.', '_to_str', '(', 'self', '.', 'country', ')', ')', 'out', '.', 'append', '(', 'self', '.', '_to_str', '(', 'self', '.', 'source', ')', ')', 'out', '.', 'append', '(', 'self', '.', '_to_str', '(', 'self', '.', 'wmo', ')', ')', 'out', '.', 'append', '(', 'self', '.', '_to_str', '(', 'self', '.', 'latitude', ')', ')', 'out', '.', 'append', '(', 'self', '.', '_to_str', '(', 'self', '.', 'longitude', ')', ')', 'out', '.', 'append', '(', 'self', '.', '_to_str', '(', 'self', '.', 'timezone', ')', ')', 'out', '.', 'append', '(', 'self', '.', '_to_str', '(', 'self', '.', 'elevation', ')', ')', 'return', '","', '.', 'join', '(', 'out', ')'] | Exports object to its string representation.
Args:
top (bool): if True appends `internal_name` before values.
All non list objects should be exported with value top=True,
all list objects, that are embedded in as fields inlist objects
should be exported with `top`=False
Returns:
str: The objects string representation | ['Exports', 'object', 'to', 'its', 'string', 'representation', '.'] | train | https://github.com/rbuffat/pyepw/blob/373d4d3c8386c8d35789f086ac5f6018c2711745/pyepw/epw.py#L451-L476 |
447 | crocs-muni/roca | roca/detect.py | RocaFingerprinter.dump | def dump(self, ret):
"""
Dumps the return value
:param ret:
:return:
"""
if self.args.flatten:
ret = drop_none(flatten(ret))
logger.info('Dump: \n' + json.dumps(ret, cls=AutoJSONEncoder, indent=2 if self.args.indent else None)) | python | def dump(self, ret):
"""
Dumps the return value
:param ret:
:return:
"""
if self.args.flatten:
ret = drop_none(flatten(ret))
logger.info('Dump: \n' + json.dumps(ret, cls=AutoJSONEncoder, indent=2 if self.args.indent else None)) | ['def', 'dump', '(', 'self', ',', 'ret', ')', ':', 'if', 'self', '.', 'args', '.', 'flatten', ':', 'ret', '=', 'drop_none', '(', 'flatten', '(', 'ret', ')', ')', 'logger', '.', 'info', '(', "'Dump: \\n'", '+', 'json', '.', 'dumps', '(', 'ret', ',', 'cls', '=', 'AutoJSONEncoder', ',', 'indent', '=', '2', 'if', 'self', '.', 'args', '.', 'indent', 'else', 'None', ')', ')'] | Dumps the return value
:param ret:
:return: | ['Dumps', 'the', 'return', 'value', ':', 'param', 'ret', ':', ':', 'return', ':'] | train | https://github.com/crocs-muni/roca/blob/74ad6ce63c428d83dcffce9c5e26ef7b9e30faa5/roca/detect.py#L2123-L2132 |
448 | xtream1101/cutil | cutil/__init__.py | remove_html_tag | def remove_html_tag(input_str='', tag=None):
"""
Returns a string with the html tag and all its contents from a string
"""
result = input_str
if tag is not None:
pattern = re.compile('<{tag}[\s\S]+?/{tag}>'.format(tag=tag))
result = re.sub(pattern, '', str(input_str))
return result | python | def remove_html_tag(input_str='', tag=None):
"""
Returns a string with the html tag and all its contents from a string
"""
result = input_str
if tag is not None:
pattern = re.compile('<{tag}[\s\S]+?/{tag}>'.format(tag=tag))
result = re.sub(pattern, '', str(input_str))
return result | ['def', 'remove_html_tag', '(', 'input_str', '=', "''", ',', 'tag', '=', 'None', ')', ':', 'result', '=', 'input_str', 'if', 'tag', 'is', 'not', 'None', ':', 'pattern', '=', 're', '.', 'compile', '(', "'<{tag}[\\s\\S]+?/{tag}>'", '.', 'format', '(', 'tag', '=', 'tag', ')', ')', 'result', '=', 're', '.', 'sub', '(', 'pattern', ',', "''", ',', 'str', '(', 'input_str', ')', ')', 'return', 'result'] | Returns a string with the html tag and all its contents from a string | ['Returns', 'a', 'string', 'with', 'the', 'html', 'tag', 'and', 'all', 'its', 'contents', 'from', 'a', 'string'] | train | https://github.com/xtream1101/cutil/blob/2e4d1f00e66154b44d4ccffb9b1db3f37e87f2e8/cutil/__init__.py#L527-L536 |
449 | MoseleyBioinformaticsLab/ctfile | ctfile/ctfile.py | Ctab._to_ctfile_counts_line | def _to_ctfile_counts_line(self, key):
"""Create counts line in ``CTfile`` format.
:param str key: Counts line key.
:return: Counts line string.
:rtype: :py:class:`str`
"""
counter = OrderedCounter(self.counts_line_format)
self[key]['number_of_atoms'] = str(len(self.atoms))
self[key]['number_of_bonds'] = str(len(self.bonds))
counts_line = ''.join([str(value).rjust(spacing) for value, spacing
in zip(self[key].values(), counter.values())])
return '{}\n'.format(counts_line) | python | def _to_ctfile_counts_line(self, key):
"""Create counts line in ``CTfile`` format.
:param str key: Counts line key.
:return: Counts line string.
:rtype: :py:class:`str`
"""
counter = OrderedCounter(self.counts_line_format)
self[key]['number_of_atoms'] = str(len(self.atoms))
self[key]['number_of_bonds'] = str(len(self.bonds))
counts_line = ''.join([str(value).rjust(spacing) for value, spacing
in zip(self[key].values(), counter.values())])
return '{}\n'.format(counts_line) | ['def', '_to_ctfile_counts_line', '(', 'self', ',', 'key', ')', ':', 'counter', '=', 'OrderedCounter', '(', 'self', '.', 'counts_line_format', ')', 'self', '[', 'key', ']', '[', "'number_of_atoms'", ']', '=', 'str', '(', 'len', '(', 'self', '.', 'atoms', ')', ')', 'self', '[', 'key', ']', '[', "'number_of_bonds'", ']', '=', 'str', '(', 'len', '(', 'self', '.', 'bonds', ')', ')', 'counts_line', '=', "''", '.', 'join', '(', '[', 'str', '(', 'value', ')', '.', 'rjust', '(', 'spacing', ')', 'for', 'value', ',', 'spacing', 'in', 'zip', '(', 'self', '[', 'key', ']', '.', 'values', '(', ')', ',', 'counter', '.', 'values', '(', ')', ')', ']', ')', 'return', "'{}\\n'", '.', 'format', '(', 'counts_line', ')'] | Create counts line in ``CTfile`` format.
:param str key: Counts line key.
:return: Counts line string.
:rtype: :py:class:`str` | ['Create', 'counts', 'line', 'in', 'CTfile', 'format', '.'] | train | https://github.com/MoseleyBioinformaticsLab/ctfile/blob/eae864126cd9102207df5d363a3222256a0f1396/ctfile/ctfile.py#L346-L358 |
450 | wilfilho/BingTranslator | BingTranslator/__init__.py | Translator._make_request | def _make_request(self, params, translation_url, headers):
"""
This is the final step, where the request is made, the data is
retrieved and returned.
"""
resp = requests.get(translation_url, params=params, headers=headers)
resp.encoding = "UTF-8-sig"
result = resp.json()
return result | python | def _make_request(self, params, translation_url, headers):
"""
This is the final step, where the request is made, the data is
retrieved and returned.
"""
resp = requests.get(translation_url, params=params, headers=headers)
resp.encoding = "UTF-8-sig"
result = resp.json()
return result | ['def', '_make_request', '(', 'self', ',', 'params', ',', 'translation_url', ',', 'headers', ')', ':', 'resp', '=', 'requests', '.', 'get', '(', 'translation_url', ',', 'params', '=', 'params', ',', 'headers', '=', 'headers', ')', 'resp', '.', 'encoding', '=', '"UTF-8-sig"', 'result', '=', 'resp', '.', 'json', '(', ')', 'return', 'result'] | This is the final step, where the request is made, the data is
retrieved and returned. | ['This', 'is', 'the', 'final', 'step', 'where', 'the', 'request', 'is', 'made', 'the', 'data', 'is', 'retrieved', 'and', 'returned', '.'] | train | https://github.com/wilfilho/BingTranslator/blob/6bada6fe1ac4177cc7dc62ff16dab561ba714534/BingTranslator/__init__.py#L64-L72 |
451 | Microsoft/azure-devops-python-api | azure-devops/azure/devops/v5_0/member_entitlement_management/member_entitlement_management_client.py | MemberEntitlementManagementClient.update_user_entitlement | def update_user_entitlement(self, document, user_id):
"""UpdateUserEntitlement.
[Preview API] Edit the entitlements (License, Extensions, Projects, Teams etc) for a user.
:param :class:`<[JsonPatchOperation]> <azure.devops.v5_0.member_entitlement_management.models.[JsonPatchOperation]>` document: JsonPatchDocument containing the operations to perform on the user.
:param str user_id: ID of the user.
:rtype: :class:`<UserEntitlementsPatchResponse> <azure.devops.v5_0.member_entitlement_management.models.UserEntitlementsPatchResponse>`
"""
route_values = {}
if user_id is not None:
route_values['userId'] = self._serialize.url('user_id', user_id, 'str')
content = self._serialize.body(document, '[JsonPatchOperation]')
response = self._send(http_method='PATCH',
location_id='8480c6eb-ce60-47e9-88df-eca3c801638b',
version='5.0-preview.2',
route_values=route_values,
content=content,
media_type='application/json-patch+json')
return self._deserialize('UserEntitlementsPatchResponse', response) | python | def update_user_entitlement(self, document, user_id):
"""UpdateUserEntitlement.
[Preview API] Edit the entitlements (License, Extensions, Projects, Teams etc) for a user.
:param :class:`<[JsonPatchOperation]> <azure.devops.v5_0.member_entitlement_management.models.[JsonPatchOperation]>` document: JsonPatchDocument containing the operations to perform on the user.
:param str user_id: ID of the user.
:rtype: :class:`<UserEntitlementsPatchResponse> <azure.devops.v5_0.member_entitlement_management.models.UserEntitlementsPatchResponse>`
"""
route_values = {}
if user_id is not None:
route_values['userId'] = self._serialize.url('user_id', user_id, 'str')
content = self._serialize.body(document, '[JsonPatchOperation]')
response = self._send(http_method='PATCH',
location_id='8480c6eb-ce60-47e9-88df-eca3c801638b',
version='5.0-preview.2',
route_values=route_values,
content=content,
media_type='application/json-patch+json')
return self._deserialize('UserEntitlementsPatchResponse', response) | ['def', 'update_user_entitlement', '(', 'self', ',', 'document', ',', 'user_id', ')', ':', 'route_values', '=', '{', '}', 'if', 'user_id', 'is', 'not', 'None', ':', 'route_values', '[', "'userId'", ']', '=', 'self', '.', '_serialize', '.', 'url', '(', "'user_id'", ',', 'user_id', ',', "'str'", ')', 'content', '=', 'self', '.', '_serialize', '.', 'body', '(', 'document', ',', "'[JsonPatchOperation]'", ')', 'response', '=', 'self', '.', '_send', '(', 'http_method', '=', "'PATCH'", ',', 'location_id', '=', "'8480c6eb-ce60-47e9-88df-eca3c801638b'", ',', 'version', '=', "'5.0-preview.2'", ',', 'route_values', '=', 'route_values', ',', 'content', '=', 'content', ',', 'media_type', '=', "'application/json-patch+json'", ')', 'return', 'self', '.', '_deserialize', '(', "'UserEntitlementsPatchResponse'", ',', 'response', ')'] | UpdateUserEntitlement.
[Preview API] Edit the entitlements (License, Extensions, Projects, Teams etc) for a user.
:param :class:`<[JsonPatchOperation]> <azure.devops.v5_0.member_entitlement_management.models.[JsonPatchOperation]>` document: JsonPatchDocument containing the operations to perform on the user.
:param str user_id: ID of the user.
:rtype: :class:`<UserEntitlementsPatchResponse> <azure.devops.v5_0.member_entitlement_management.models.UserEntitlementsPatchResponse>` | ['UpdateUserEntitlement', '.', '[', 'Preview', 'API', ']', 'Edit', 'the', 'entitlements', '(', 'License', 'Extensions', 'Projects', 'Teams', 'etc', ')', 'for', 'a', 'user', '.', ':', 'param', ':', 'class', ':', '<', '[', 'JsonPatchOperation', ']', '>', '<azure', '.', 'devops', '.', 'v5_0', '.', 'member_entitlement_management', '.', 'models', '.', '[', 'JsonPatchOperation', ']', '>', 'document', ':', 'JsonPatchDocument', 'containing', 'the', 'operations', 'to', 'perform', 'on', 'the', 'user', '.', ':', 'param', 'str', 'user_id', ':', 'ID', 'of', 'the', 'user', '.', ':', 'rtype', ':', ':', 'class', ':', '<UserEntitlementsPatchResponse', '>', '<azure', '.', 'devops', '.', 'v5_0', '.', 'member_entitlement_management', '.', 'models', '.', 'UserEntitlementsPatchResponse', '>'] | train | https://github.com/Microsoft/azure-devops-python-api/blob/4777ffda2f5052fabbaddb2abe9cb434e0cf1aa8/azure-devops/azure/devops/v5_0/member_entitlement_management/member_entitlement_management_client.py#L257-L274 |
452 | Turbo87/aerofiles | aerofiles/igc/writer.py | Writer.write_task_metadata | def write_task_metadata(
self, declaration_datetime=None, flight_date=None,
task_number=None, turnpoints=None, text=None):
"""
Write the task declaration metadata record::
writer.write_task_metadata(
datetime.datetime(2014, 4, 13, 12, 53, 02),
task_number=42,
turnpoints=3,
)
# -> C140413125302000000004203
There are sensible defaults in place for all parameters except for the
``turnpoints`` parameter. If you don't pass that parameter the method
will raise a :class:`ValueError`. The other parameter defaults are
mentioned in the list below.
:param declaration_datetime: a :class:`datetime.datetime` instance of
the UTC date and time at the time of declaration (default: current
date and time)
:param flight_date: a :class:`datetime.date` instance of the intended
date of the flight (default: ``000000``, which means "use
declaration date")
:param task_number: task number for the flight date or an integer-based
identifier (default: ``0001``)
:param turnpoints: the number of turnpoints in the task (not counting
start and finish points!)
:param text: optional text to append to the metadata record
"""
if declaration_datetime is None:
declaration_datetime = datetime.datetime.utcnow()
if isinstance(declaration_datetime, datetime.datetime):
declaration_datetime = (
self.format_date(declaration_datetime) +
self.format_time(declaration_datetime)
)
elif not patterns.DATETIME.match(declaration_datetime):
raise ValueError('Invalid declaration datetime: %s' %
declaration_datetime)
if flight_date is None:
flight_date = '000000'
else:
flight_date = self.format_date(flight_date)
if task_number is None:
task_number = 1
elif not isinstance(task_number, int):
raise ValueError('Invalid task number: %s' % task_number)
if not isinstance(turnpoints, int):
raise ValueError('Invalid turnpoints: %s' % turnpoints)
record = '{0}{1}{2:04d}{3:02d}'.format(
declaration_datetime,
flight_date,
task_number,
turnpoints,
)
if text:
record += text
self.write_record('C', record) | python | def write_task_metadata(
self, declaration_datetime=None, flight_date=None,
task_number=None, turnpoints=None, text=None):
"""
Write the task declaration metadata record::
writer.write_task_metadata(
datetime.datetime(2014, 4, 13, 12, 53, 02),
task_number=42,
turnpoints=3,
)
# -> C140413125302000000004203
There are sensible defaults in place for all parameters except for the
``turnpoints`` parameter. If you don't pass that parameter the method
will raise a :class:`ValueError`. The other parameter defaults are
mentioned in the list below.
:param declaration_datetime: a :class:`datetime.datetime` instance of
the UTC date and time at the time of declaration (default: current
date and time)
:param flight_date: a :class:`datetime.date` instance of the intended
date of the flight (default: ``000000``, which means "use
declaration date")
:param task_number: task number for the flight date or an integer-based
identifier (default: ``0001``)
:param turnpoints: the number of turnpoints in the task (not counting
start and finish points!)
:param text: optional text to append to the metadata record
"""
if declaration_datetime is None:
declaration_datetime = datetime.datetime.utcnow()
if isinstance(declaration_datetime, datetime.datetime):
declaration_datetime = (
self.format_date(declaration_datetime) +
self.format_time(declaration_datetime)
)
elif not patterns.DATETIME.match(declaration_datetime):
raise ValueError('Invalid declaration datetime: %s' %
declaration_datetime)
if flight_date is None:
flight_date = '000000'
else:
flight_date = self.format_date(flight_date)
if task_number is None:
task_number = 1
elif not isinstance(task_number, int):
raise ValueError('Invalid task number: %s' % task_number)
if not isinstance(turnpoints, int):
raise ValueError('Invalid turnpoints: %s' % turnpoints)
record = '{0}{1}{2:04d}{3:02d}'.format(
declaration_datetime,
flight_date,
task_number,
turnpoints,
)
if text:
record += text
self.write_record('C', record) | ['def', 'write_task_metadata', '(', 'self', ',', 'declaration_datetime', '=', 'None', ',', 'flight_date', '=', 'None', ',', 'task_number', '=', 'None', ',', 'turnpoints', '=', 'None', ',', 'text', '=', 'None', ')', ':', 'if', 'declaration_datetime', 'is', 'None', ':', 'declaration_datetime', '=', 'datetime', '.', 'datetime', '.', 'utcnow', '(', ')', 'if', 'isinstance', '(', 'declaration_datetime', ',', 'datetime', '.', 'datetime', ')', ':', 'declaration_datetime', '=', '(', 'self', '.', 'format_date', '(', 'declaration_datetime', ')', '+', 'self', '.', 'format_time', '(', 'declaration_datetime', ')', ')', 'elif', 'not', 'patterns', '.', 'DATETIME', '.', 'match', '(', 'declaration_datetime', ')', ':', 'raise', 'ValueError', '(', "'Invalid declaration datetime: %s'", '%', 'declaration_datetime', ')', 'if', 'flight_date', 'is', 'None', ':', 'flight_date', '=', "'000000'", 'else', ':', 'flight_date', '=', 'self', '.', 'format_date', '(', 'flight_date', ')', 'if', 'task_number', 'is', 'None', ':', 'task_number', '=', '1', 'elif', 'not', 'isinstance', '(', 'task_number', ',', 'int', ')', ':', 'raise', 'ValueError', '(', "'Invalid task number: %s'", '%', 'task_number', ')', 'if', 'not', 'isinstance', '(', 'turnpoints', ',', 'int', ')', ':', 'raise', 'ValueError', '(', "'Invalid turnpoints: %s'", '%', 'turnpoints', ')', 'record', '=', "'{0}{1}{2:04d}{3:02d}'", '.', 'format', '(', 'declaration_datetime', ',', 'flight_date', ',', 'task_number', ',', 'turnpoints', ',', ')', 'if', 'text', ':', 'record', '+=', 'text', 'self', '.', 'write_record', '(', "'C'", ',', 'record', ')'] | Write the task declaration metadata record::
writer.write_task_metadata(
datetime.datetime(2014, 4, 13, 12, 53, 02),
task_number=42,
turnpoints=3,
)
# -> C140413125302000000004203
There are sensible defaults in place for all parameters except for the
``turnpoints`` parameter. If you don't pass that parameter the method
will raise a :class:`ValueError`. The other parameter defaults are
mentioned in the list below.
:param declaration_datetime: a :class:`datetime.datetime` instance of
the UTC date and time at the time of declaration (default: current
date and time)
:param flight_date: a :class:`datetime.date` instance of the intended
date of the flight (default: ``000000``, which means "use
declaration date")
:param task_number: task number for the flight date or an integer-based
identifier (default: ``0001``)
:param turnpoints: the number of turnpoints in the task (not counting
start and finish points!)
:param text: optional text to append to the metadata record | ['Write', 'the', 'task', 'declaration', 'metadata', 'record', '::'] | train | https://github.com/Turbo87/aerofiles/blob/d8b7b04a1fcea5c98f89500de1164619a4ec7ef4/aerofiles/igc/writer.py#L484-L550 |
453 | portfoliome/postpy | postpy/dml.py | insert | def insert(conn, qualified_name: str, column_names, records):
"""Insert a collection of namedtuple records."""
query = create_insert_statement(qualified_name, column_names)
with conn:
with conn.cursor(cursor_factory=NamedTupleCursor) as cursor:
for record in records:
cursor.execute(query, record) | python | def insert(conn, qualified_name: str, column_names, records):
"""Insert a collection of namedtuple records."""
query = create_insert_statement(qualified_name, column_names)
with conn:
with conn.cursor(cursor_factory=NamedTupleCursor) as cursor:
for record in records:
cursor.execute(query, record) | ['def', 'insert', '(', 'conn', ',', 'qualified_name', ':', 'str', ',', 'column_names', ',', 'records', ')', ':', 'query', '=', 'create_insert_statement', '(', 'qualified_name', ',', 'column_names', ')', 'with', 'conn', ':', 'with', 'conn', '.', 'cursor', '(', 'cursor_factory', '=', 'NamedTupleCursor', ')', 'as', 'cursor', ':', 'for', 'record', 'in', 'records', ':', 'cursor', '.', 'execute', '(', 'query', ',', 'record', ')'] | Insert a collection of namedtuple records. | ['Insert', 'a', 'collection', 'of', 'namedtuple', 'records', '.'] | train | https://github.com/portfoliome/postpy/blob/fe26199131b15295fc5f669a0ad2a7f47bf490ee/postpy/dml.py#L30-L38 |
454 | cytoscape/py2cytoscape | py2cytoscape/cyrest/command.py | command.quit | def quit(self,verbose=False):
"""
This command causes Cytoscape to exit. It is typically used at the end
of a script file.
:param verbose: print more
"""
response=api(url=self.__url+"/quit", verbose=verbose)
return response | python | def quit(self,verbose=False):
"""
This command causes Cytoscape to exit. It is typically used at the end
of a script file.
:param verbose: print more
"""
response=api(url=self.__url+"/quit", verbose=verbose)
return response | ['def', 'quit', '(', 'self', ',', 'verbose', '=', 'False', ')', ':', 'response', '=', 'api', '(', 'url', '=', 'self', '.', '__url', '+', '"/quit"', ',', 'verbose', '=', 'verbose', ')', 'return', 'response'] | This command causes Cytoscape to exit. It is typically used at the end
of a script file.
:param verbose: print more | ['This', 'command', 'causes', 'Cytoscape', 'to', 'exit', '.', 'It', 'is', 'typically', 'used', 'at', 'the', 'end', 'of', 'a', 'script', 'file', '.'] | train | https://github.com/cytoscape/py2cytoscape/blob/dd34de8d028f512314d0057168df7fef7c5d5195/py2cytoscape/cyrest/command.py#L51-L59 |
455 | philipbergen/popen | popen/__init__.py | Sh.env | def env(self, **kw):
'''
Allows adding/overriding env vars in the execution context.
:param kw: Key-value pairs
:return: self
'''
self._original_env = kw
if self._env is None:
self._env = dict(os.environ)
self._env.update({k: unicode(v) for k, v in kw.iteritems()})
return self | python | def env(self, **kw):
'''
Allows adding/overriding env vars in the execution context.
:param kw: Key-value pairs
:return: self
'''
self._original_env = kw
if self._env is None:
self._env = dict(os.environ)
self._env.update({k: unicode(v) for k, v in kw.iteritems()})
return self | ['def', 'env', '(', 'self', ',', '*', '*', 'kw', ')', ':', 'self', '.', '_original_env', '=', 'kw', 'if', 'self', '.', '_env', 'is', 'None', ':', 'self', '.', '_env', '=', 'dict', '(', 'os', '.', 'environ', ')', 'self', '.', '_env', '.', 'update', '(', '{', 'k', ':', 'unicode', '(', 'v', ')', 'for', 'k', ',', 'v', 'in', 'kw', '.', 'iteritems', '(', ')', '}', ')', 'return', 'self'] | Allows adding/overriding env vars in the execution context.
:param kw: Key-value pairs
:return: self | ['Allows', 'adding', '/', 'overriding', 'env', 'vars', 'in', 'the', 'execution', 'context', '.', ':', 'param', 'kw', ':', 'Key', '-', 'value', 'pairs', ':', 'return', ':', 'self'] | train | https://github.com/philipbergen/popen/blob/85a6897475dd865403ec3b704fb5fb0e93f0ff64/popen/__init__.py#L113-L123 |
456 | TeamHG-Memex/eli5 | eli5/sklearn/utils.py | has_intercept | def has_intercept(estimator):
# type: (Any) -> bool
""" Return True if an estimator has intercept fit. """
if hasattr(estimator, 'fit_intercept'):
return estimator.fit_intercept
if hasattr(estimator, 'intercept_'):
if estimator.intercept_ is None:
return False
# scikit-learn sets intercept to zero vector if it is not fit
return np.any(estimator.intercept_)
return False | python | def has_intercept(estimator):
# type: (Any) -> bool
""" Return True if an estimator has intercept fit. """
if hasattr(estimator, 'fit_intercept'):
return estimator.fit_intercept
if hasattr(estimator, 'intercept_'):
if estimator.intercept_ is None:
return False
# scikit-learn sets intercept to zero vector if it is not fit
return np.any(estimator.intercept_)
return False | ['def', 'has_intercept', '(', 'estimator', ')', ':', '# type: (Any) -> bool', 'if', 'hasattr', '(', 'estimator', ',', "'fit_intercept'", ')', ':', 'return', 'estimator', '.', 'fit_intercept', 'if', 'hasattr', '(', 'estimator', ',', "'intercept_'", ')', ':', 'if', 'estimator', '.', 'intercept_', 'is', 'None', ':', 'return', 'False', '# scikit-learn sets intercept to zero vector if it is not fit', 'return', 'np', '.', 'any', '(', 'estimator', '.', 'intercept_', ')', 'return', 'False'] | Return True if an estimator has intercept fit. | ['Return', 'True', 'if', 'an', 'estimator', 'has', 'intercept', 'fit', '.'] | train | https://github.com/TeamHG-Memex/eli5/blob/371b402a0676295c05e582a2dd591f7af476b86b/eli5/sklearn/utils.py#L57-L67 |
457 | RiotGames/cloud-inquisitor | backend/cloud_inquisitor/plugins/views/roles.py | RoleGet.get | def get(self, roleId):
"""Get a specific role information"""
role = db.Role.find_one(Role.role_id == roleId)
if not role:
return self.make_response('No such role found', HTTP.NOT_FOUND)
return self.make_response({'role': role}) | python | def get(self, roleId):
"""Get a specific role information"""
role = db.Role.find_one(Role.role_id == roleId)
if not role:
return self.make_response('No such role found', HTTP.NOT_FOUND)
return self.make_response({'role': role}) | ['def', 'get', '(', 'self', ',', 'roleId', ')', ':', 'role', '=', 'db', '.', 'Role', '.', 'find_one', '(', 'Role', '.', 'role_id', '==', 'roleId', ')', 'if', 'not', 'role', ':', 'return', 'self', '.', 'make_response', '(', "'No such role found'", ',', 'HTTP', '.', 'NOT_FOUND', ')', 'return', 'self', '.', 'make_response', '(', '{', "'role'", ':', 'role', '}', ')'] | Get a specific role information | ['Get', 'a', 'specific', 'role', 'information'] | train | https://github.com/RiotGames/cloud-inquisitor/blob/181dc2566ca59fc855f695b7fcc2c3b934e6ee9f/backend/cloud_inquisitor/plugins/views/roles.py#L62-L69 |
458 | boriel/zxbasic | arch/zx48k/backend/__init__.py | _paramf | def _paramf(ins):
""" Pushes 40bit (float) param into the stack
"""
output = _float_oper(ins.quad[1])
output.extend(_fpush())
return output | python | def _paramf(ins):
""" Pushes 40bit (float) param into the stack
"""
output = _float_oper(ins.quad[1])
output.extend(_fpush())
return output | ['def', '_paramf', '(', 'ins', ')', ':', 'output', '=', '_float_oper', '(', 'ins', '.', 'quad', '[', '1', ']', ')', 'output', '.', 'extend', '(', '_fpush', '(', ')', ')', 'return', 'output'] | Pushes 40bit (float) param into the stack | ['Pushes', '40bit', '(', 'float', ')', 'param', 'into', 'the', 'stack'] | train | https://github.com/boriel/zxbasic/blob/23b28db10e41117805bdb3c0f78543590853b132/arch/zx48k/backend/__init__.py#L1600-L1605 |
459 | arokem/python-matlab-bridge | pymatbridge/messenger/make.py | which | def which(filename):
'''This takes a given filename; tries to find it in the environment path;
then checks if it is executable. This returns the full path to the filename
if found and executable. Otherwise this returns None.
Note
----
This function is taken from the pexpect module, see module doc-string for
license.
'''
# Special case where filename contains an explicit path.
if os.path.dirname(filename) != '' and is_executable_file(filename):
return filename
if 'PATH' not in os.environ or os.environ['PATH'] == '':
p = os.defpath
else:
p = os.environ['PATH']
pathlist = p.split(os.pathsep)
for path in pathlist:
ff = os.path.join(path, filename)
if pty:
if is_executable_file(ff):
return ff
else:
pathext = os.environ.get('Pathext', '.exe;.com;.bat;.cmd')
pathext = pathext.split(os.pathsep) + ['']
for ext in pathext:
if os.access(ff + ext, os.X_OK):
return ff + ext
return None | python | def which(filename):
'''This takes a given filename; tries to find it in the environment path;
then checks if it is executable. This returns the full path to the filename
if found and executable. Otherwise this returns None.
Note
----
This function is taken from the pexpect module, see module doc-string for
license.
'''
# Special case where filename contains an explicit path.
if os.path.dirname(filename) != '' and is_executable_file(filename):
return filename
if 'PATH' not in os.environ or os.environ['PATH'] == '':
p = os.defpath
else:
p = os.environ['PATH']
pathlist = p.split(os.pathsep)
for path in pathlist:
ff = os.path.join(path, filename)
if pty:
if is_executable_file(ff):
return ff
else:
pathext = os.environ.get('Pathext', '.exe;.com;.bat;.cmd')
pathext = pathext.split(os.pathsep) + ['']
for ext in pathext:
if os.access(ff + ext, os.X_OK):
return ff + ext
return None | ['def', 'which', '(', 'filename', ')', ':', '# Special case where filename contains an explicit path.', 'if', 'os', '.', 'path', '.', 'dirname', '(', 'filename', ')', '!=', "''", 'and', 'is_executable_file', '(', 'filename', ')', ':', 'return', 'filename', 'if', "'PATH'", 'not', 'in', 'os', '.', 'environ', 'or', 'os', '.', 'environ', '[', "'PATH'", ']', '==', "''", ':', 'p', '=', 'os', '.', 'defpath', 'else', ':', 'p', '=', 'os', '.', 'environ', '[', "'PATH'", ']', 'pathlist', '=', 'p', '.', 'split', '(', 'os', '.', 'pathsep', ')', 'for', 'path', 'in', 'pathlist', ':', 'ff', '=', 'os', '.', 'path', '.', 'join', '(', 'path', ',', 'filename', ')', 'if', 'pty', ':', 'if', 'is_executable_file', '(', 'ff', ')', ':', 'return', 'ff', 'else', ':', 'pathext', '=', 'os', '.', 'environ', '.', 'get', '(', "'Pathext'", ',', "'.exe;.com;.bat;.cmd'", ')', 'pathext', '=', 'pathext', '.', 'split', '(', 'os', '.', 'pathsep', ')', '+', '[', "''", ']', 'for', 'ext', 'in', 'pathext', ':', 'if', 'os', '.', 'access', '(', 'ff', '+', 'ext', ',', 'os', '.', 'X_OK', ')', ':', 'return', 'ff', '+', 'ext', 'return', 'None'] | This takes a given filename; tries to find it in the environment path;
then checks if it is executable. This returns the full path to the filename
if found and executable. Otherwise this returns None.
Note
----
This function is taken from the pexpect module, see module doc-string for
license. | ['This', 'takes', 'a', 'given', 'filename', ';', 'tries', 'to', 'find', 'it', 'in', 'the', 'environment', 'path', ';', 'then', 'checks', 'if', 'it', 'is', 'executable', '.', 'This', 'returns', 'the', 'full', 'path', 'to', 'the', 'filename', 'if', 'found', 'and', 'executable', '.', 'Otherwise', 'this', 'returns', 'None', '.'] | train | https://github.com/arokem/python-matlab-bridge/blob/9822c7b55435662f4f033c5479cc03fea2255755/pymatbridge/messenger/make.py#L88-L118 |
460 | GiulioRossetti/dynetx | dynetx/classes/dyngraph.py | DynGraph.add_path | def add_path(self, nodes, t=None):
"""Add a path at time t.
Parameters
----------
nodes : iterable container
A container of nodes.
t : snapshot id (default=None)
See Also
--------
add_path, add_cycle
Examples
--------
>>> G = dn.DynGraph()
>>> G.add_path([0,1,2,3], t=0)
"""
nlist = list(nodes)
interaction = zip(nlist[:-1], nlist[1:])
self.add_interactions_from(interaction, t) | python | def add_path(self, nodes, t=None):
"""Add a path at time t.
Parameters
----------
nodes : iterable container
A container of nodes.
t : snapshot id (default=None)
See Also
--------
add_path, add_cycle
Examples
--------
>>> G = dn.DynGraph()
>>> G.add_path([0,1,2,3], t=0)
"""
nlist = list(nodes)
interaction = zip(nlist[:-1], nlist[1:])
self.add_interactions_from(interaction, t) | ['def', 'add_path', '(', 'self', ',', 'nodes', ',', 't', '=', 'None', ')', ':', 'nlist', '=', 'list', '(', 'nodes', ')', 'interaction', '=', 'zip', '(', 'nlist', '[', ':', '-', '1', ']', ',', 'nlist', '[', '1', ':', ']', ')', 'self', '.', 'add_interactions_from', '(', 'interaction', ',', 't', ')'] | Add a path at time t.
Parameters
----------
nodes : iterable container
A container of nodes.
t : snapshot id (default=None)
See Also
--------
add_path, add_cycle
Examples
--------
>>> G = dn.DynGraph()
>>> G.add_path([0,1,2,3], t=0) | ['Add', 'a', 'path', 'at', 'time', 't', '.'] | train | https://github.com/GiulioRossetti/dynetx/blob/634e2b38f8950885aebfa079dad7d5e8d7563f1d/dynetx/classes/dyngraph.py#L835-L855 |
461 | mbj4668/pyang | pyang/yin_parser.py | YinParser.split_qname | def split_qname(qname):
"""Split `qname` into namespace URI and local name
Return namespace and local name as a tuple. This is a static
method."""
res = qname.split(YinParser.ns_sep)
if len(res) == 1: # no namespace
return None, res[0]
else:
return res | python | def split_qname(qname):
"""Split `qname` into namespace URI and local name
Return namespace and local name as a tuple. This is a static
method."""
res = qname.split(YinParser.ns_sep)
if len(res) == 1: # no namespace
return None, res[0]
else:
return res | ['def', 'split_qname', '(', 'qname', ')', ':', 'res', '=', 'qname', '.', 'split', '(', 'YinParser', '.', 'ns_sep', ')', 'if', 'len', '(', 'res', ')', '==', '1', ':', '# no namespace', 'return', 'None', ',', 'res', '[', '0', ']', 'else', ':', 'return', 'res'] | Split `qname` into namespace URI and local name
Return namespace and local name as a tuple. This is a static
method. | ['Split', 'qname', 'into', 'namespace', 'URI', 'and', 'local', 'name'] | train | https://github.com/mbj4668/pyang/blob/f2a5cc3142162e5b9ee4e18d154568d939ff63dd/pyang/yin_parser.py#L55-L64 |
462 | rstoneback/pysat | pysat/instruments/sw_kp.py | filter_geoquiet | def filter_geoquiet(sat, maxKp=None, filterTime=None, kpData=None, kp_inst=None):
"""Filters pysat.Instrument data for given time after Kp drops below gate.
Loads Kp data for the same timeframe covered by sat and sets sat.data to
NaN for times when Kp > maxKp and for filterTime after Kp drops below maxKp.
Parameters
----------
sat : pysat.Instrument
Instrument to be filtered
maxKp : float
Maximum Kp value allowed. Kp values above this trigger
sat.data filtering.
filterTime : int
Number of hours to filter data after Kp drops below maxKp
kpData : pysat.Instrument (optional)
Kp pysat.Instrument object with data already loaded
kp_inst : pysat.Instrument (optional)
Kp pysat.Instrument object ready to load Kp data.Overrides kpData.
Returns
-------
None : NoneType
sat Instrument object modified in place
"""
if kp_inst is not None:
kp_inst.load(date=sat.date, verifyPad=True)
kpData = kp_inst
elif kpData is None:
kp = pysat.Instrument('sw', 'kp', pad=pds.DateOffset(days=1))
kp.load(date=sat.date, verifyPad=True)
kpData = kp
if maxKp is None:
maxKp = 3+ 1./3.
if filterTime is None:
filterTime = 24
# now the defaults are ensured, let's do some filtering
# date of satellite data
date = sat.date
selData = kpData[date-pds.DateOffset(days=1):date+pds.DateOffset(days=1)]
ind, = np.where(selData['kp'] >= maxKp)
for lind in ind:
sat.data[selData.index[lind]:(selData.index[lind]+pds.DateOffset(hours=filterTime) )] = np.NaN
sat.data = sat.data.dropna(axis=0, how='all')
return | python | def filter_geoquiet(sat, maxKp=None, filterTime=None, kpData=None, kp_inst=None):
"""Filters pysat.Instrument data for given time after Kp drops below gate.
Loads Kp data for the same timeframe covered by sat and sets sat.data to
NaN for times when Kp > maxKp and for filterTime after Kp drops below maxKp.
Parameters
----------
sat : pysat.Instrument
Instrument to be filtered
maxKp : float
Maximum Kp value allowed. Kp values above this trigger
sat.data filtering.
filterTime : int
Number of hours to filter data after Kp drops below maxKp
kpData : pysat.Instrument (optional)
Kp pysat.Instrument object with data already loaded
kp_inst : pysat.Instrument (optional)
Kp pysat.Instrument object ready to load Kp data.Overrides kpData.
Returns
-------
None : NoneType
sat Instrument object modified in place
"""
if kp_inst is not None:
kp_inst.load(date=sat.date, verifyPad=True)
kpData = kp_inst
elif kpData is None:
kp = pysat.Instrument('sw', 'kp', pad=pds.DateOffset(days=1))
kp.load(date=sat.date, verifyPad=True)
kpData = kp
if maxKp is None:
maxKp = 3+ 1./3.
if filterTime is None:
filterTime = 24
# now the defaults are ensured, let's do some filtering
# date of satellite data
date = sat.date
selData = kpData[date-pds.DateOffset(days=1):date+pds.DateOffset(days=1)]
ind, = np.where(selData['kp'] >= maxKp)
for lind in ind:
sat.data[selData.index[lind]:(selData.index[lind]+pds.DateOffset(hours=filterTime) )] = np.NaN
sat.data = sat.data.dropna(axis=0, how='all')
return | ['def', 'filter_geoquiet', '(', 'sat', ',', 'maxKp', '=', 'None', ',', 'filterTime', '=', 'None', ',', 'kpData', '=', 'None', ',', 'kp_inst', '=', 'None', ')', ':', 'if', 'kp_inst', 'is', 'not', 'None', ':', 'kp_inst', '.', 'load', '(', 'date', '=', 'sat', '.', 'date', ',', 'verifyPad', '=', 'True', ')', 'kpData', '=', 'kp_inst', 'elif', 'kpData', 'is', 'None', ':', 'kp', '=', 'pysat', '.', 'Instrument', '(', "'sw'", ',', "'kp'", ',', 'pad', '=', 'pds', '.', 'DateOffset', '(', 'days', '=', '1', ')', ')', 'kp', '.', 'load', '(', 'date', '=', 'sat', '.', 'date', ',', 'verifyPad', '=', 'True', ')', 'kpData', '=', 'kp', 'if', 'maxKp', 'is', 'None', ':', 'maxKp', '=', '3', '+', '1.', '/', '3.', 'if', 'filterTime', 'is', 'None', ':', 'filterTime', '=', '24', "# now the defaults are ensured, let's do some filtering", '# date of satellite data', 'date', '=', 'sat', '.', 'date', 'selData', '=', 'kpData', '[', 'date', '-', 'pds', '.', 'DateOffset', '(', 'days', '=', '1', ')', ':', 'date', '+', 'pds', '.', 'DateOffset', '(', 'days', '=', '1', ')', ']', 'ind', ',', '=', 'np', '.', 'where', '(', 'selData', '[', "'kp'", ']', '>=', 'maxKp', ')', 'for', 'lind', 'in', 'ind', ':', 'sat', '.', 'data', '[', 'selData', '.', 'index', '[', 'lind', ']', ':', '(', 'selData', '.', 'index', '[', 'lind', ']', '+', 'pds', '.', 'DateOffset', '(', 'hours', '=', 'filterTime', ')', ')', ']', '=', 'np', '.', 'NaN', 'sat', '.', 'data', '=', 'sat', '.', 'data', '.', 'dropna', '(', 'axis', '=', '0', ',', 'how', '=', "'all'", ')', 'return'] | Filters pysat.Instrument data for given time after Kp drops below gate.
Loads Kp data for the same timeframe covered by sat and sets sat.data to
NaN for times when Kp > maxKp and for filterTime after Kp drops below maxKp.
Parameters
----------
sat : pysat.Instrument
Instrument to be filtered
maxKp : float
Maximum Kp value allowed. Kp values above this trigger
sat.data filtering.
filterTime : int
Number of hours to filter data after Kp drops below maxKp
kpData : pysat.Instrument (optional)
Kp pysat.Instrument object with data already loaded
kp_inst : pysat.Instrument (optional)
Kp pysat.Instrument object ready to load Kp data.Overrides kpData.
Returns
-------
None : NoneType
sat Instrument object modified in place | ['Filters', 'pysat', '.', 'Instrument', 'data', 'for', 'given', 'time', 'after', 'Kp', 'drops', 'below', 'gate', '.', 'Loads', 'Kp', 'data', 'for', 'the', 'same', 'timeframe', 'covered', 'by', 'sat', 'and', 'sets', 'sat', '.', 'data', 'to', 'NaN', 'for', 'times', 'when', 'Kp', '>', 'maxKp', 'and', 'for', 'filterTime', 'after', 'Kp', 'drops', 'below', 'maxKp', '.', 'Parameters', '----------', 'sat', ':', 'pysat', '.', 'Instrument', 'Instrument', 'to', 'be', 'filtered', 'maxKp', ':', 'float', 'Maximum', 'Kp', 'value', 'allowed', '.', 'Kp', 'values', 'above', 'this', 'trigger', 'sat', '.', 'data', 'filtering', '.', 'filterTime', ':', 'int', 'Number', 'of', 'hours', 'to', 'filter', 'data', 'after', 'Kp', 'drops', 'below', 'maxKp', 'kpData', ':', 'pysat', '.', 'Instrument', '(', 'optional', ')', 'Kp', 'pysat', '.', 'Instrument', 'object', 'with', 'data', 'already', 'loaded', 'kp_inst', ':', 'pysat', '.', 'Instrument', '(', 'optional', ')', 'Kp', 'pysat', '.', 'Instrument', 'object', 'ready', 'to', 'load', 'Kp', 'data', '.', 'Overrides', 'kpData', '.', 'Returns', '-------', 'None', ':', 'NoneType', 'sat', 'Instrument', 'object', 'modified', 'in', 'place'] | train | https://github.com/rstoneback/pysat/blob/4ae1afd80e15e4449397d39dce8c3e969c32c422/pysat/instruments/sw_kp.py#L236-L286 |
463 | Vital-Fernandez/dazer | bin/lib/CodeTools/various.py | vitools.ufloatDict_nominal | def ufloatDict_nominal(self, ufloat_dict):
'This gives us a dictionary of nominal values from a dictionary of uncertainties'
return OrderedDict(izip(ufloat_dict.keys(), map(lambda x: x.nominal_value, ufloat_dict.values()))) | python | def ufloatDict_nominal(self, ufloat_dict):
'This gives us a dictionary of nominal values from a dictionary of uncertainties'
return OrderedDict(izip(ufloat_dict.keys(), map(lambda x: x.nominal_value, ufloat_dict.values()))) | ['def', 'ufloatDict_nominal', '(', 'self', ',', 'ufloat_dict', ')', ':', 'return', 'OrderedDict', '(', 'izip', '(', 'ufloat_dict', '.', 'keys', '(', ')', ',', 'map', '(', 'lambda', 'x', ':', 'x', '.', 'nominal_value', ',', 'ufloat_dict', '.', 'values', '(', ')', ')', ')', ')'] | This gives us a dictionary of nominal values from a dictionary of uncertainties | ['This', 'gives', 'us', 'a', 'dictionary', 'of', 'nominal', 'values', 'from', 'a', 'dictionary', 'of', 'uncertainties'] | train | https://github.com/Vital-Fernandez/dazer/blob/3c9ae8ae6d40ea33f22cc20dc11365d6d6e65244/bin/lib/CodeTools/various.py#L75-L77 |
464 | etcher-be/elib_config | elib_config/_setup.py | ELIBConfig.setup | def setup(
cls,
app_version: str,
app_name: str,
config_file_path: str,
config_sep_str: str,
root_path: typing.Optional[typing.List[str]] = None,
):
"""
Configures elib_config in one fell swoop
:param app_version: version of the application
:param app_name:name of the application
:param config_file_path: path to the config file to use
:param config_sep_str: separator for config values paths
:param root_path: list of strings that will be pre-pended to *all* config values paths (useful to setup a
prefix for the whole app)
"""
cls.app_version = app_version
cls.app_name = app_name
cls.config_file_path = config_file_path
cls.config_sep_str = config_sep_str
cls.root_path = root_path | python | def setup(
cls,
app_version: str,
app_name: str,
config_file_path: str,
config_sep_str: str,
root_path: typing.Optional[typing.List[str]] = None,
):
"""
Configures elib_config in one fell swoop
:param app_version: version of the application
:param app_name:name of the application
:param config_file_path: path to the config file to use
:param config_sep_str: separator for config values paths
:param root_path: list of strings that will be pre-pended to *all* config values paths (useful to setup a
prefix for the whole app)
"""
cls.app_version = app_version
cls.app_name = app_name
cls.config_file_path = config_file_path
cls.config_sep_str = config_sep_str
cls.root_path = root_path | ['def', 'setup', '(', 'cls', ',', 'app_version', ':', 'str', ',', 'app_name', ':', 'str', ',', 'config_file_path', ':', 'str', ',', 'config_sep_str', ':', 'str', ',', 'root_path', ':', 'typing', '.', 'Optional', '[', 'typing', '.', 'List', '[', 'str', ']', ']', '=', 'None', ',', ')', ':', 'cls', '.', 'app_version', '=', 'app_version', 'cls', '.', 'app_name', '=', 'app_name', 'cls', '.', 'config_file_path', '=', 'config_file_path', 'cls', '.', 'config_sep_str', '=', 'config_sep_str', 'cls', '.', 'root_path', '=', 'root_path'] | Configures elib_config in one fell swoop
:param app_version: version of the application
:param app_name:name of the application
:param config_file_path: path to the config file to use
:param config_sep_str: separator for config values paths
:param root_path: list of strings that will be pre-pended to *all* config values paths (useful to setup a
prefix for the whole app) | ['Configures', 'elib_config', 'in', 'one', 'fell', 'swoop'] | train | https://github.com/etcher-be/elib_config/blob/5d8c839e84d70126620ab0186dc1f717e5868bd0/elib_config/_setup.py#L40-L62 |
465 | MolSSI-BSE/basis_set_exchange | basis_set_exchange/converters/convert.py | convert_basis | def convert_basis(basis_dict, fmt, header=None):
'''
Returns the basis set data as a string representing
the data in the specified output format
'''
# make converters case insensitive
fmt = fmt.lower()
if fmt not in _converter_map:
raise RuntimeError('Unknown basis set format "{}"'.format(fmt))
converter = _converter_map[fmt]
# Determine if the converter supports all the types in the basis_dict
if converter['valid'] is not None:
ftypes = set(basis_dict['function_types'])
if ftypes > converter['valid']:
raise RuntimeError('Converter {} does not support all function types: {}'.format(fmt, str(ftypes)))
# Actually do the conversion
ret_str = converter['function'](basis_dict)
if header is not None and fmt != 'json':
comment_str = _converter_map[fmt]['comment']
header_str = comment_str + comment_str.join(header.splitlines(True))
ret_str = header_str + '\n\n' + ret_str
# HACK - Psi4 requires the first non-comment line be spherical/cartesian
# so we have to add that before the header
if fmt == 'psi4':
types = basis_dict['function_types']
harm_type = 'spherical' if 'spherical_gto' in types else 'cartesian'
ret_str = harm_type + '\n\n' + ret_str
return ret_str | python | def convert_basis(basis_dict, fmt, header=None):
'''
Returns the basis set data as a string representing
the data in the specified output format
'''
# make converters case insensitive
fmt = fmt.lower()
if fmt not in _converter_map:
raise RuntimeError('Unknown basis set format "{}"'.format(fmt))
converter = _converter_map[fmt]
# Determine if the converter supports all the types in the basis_dict
if converter['valid'] is not None:
ftypes = set(basis_dict['function_types'])
if ftypes > converter['valid']:
raise RuntimeError('Converter {} does not support all function types: {}'.format(fmt, str(ftypes)))
# Actually do the conversion
ret_str = converter['function'](basis_dict)
if header is not None and fmt != 'json':
comment_str = _converter_map[fmt]['comment']
header_str = comment_str + comment_str.join(header.splitlines(True))
ret_str = header_str + '\n\n' + ret_str
# HACK - Psi4 requires the first non-comment line be spherical/cartesian
# so we have to add that before the header
if fmt == 'psi4':
types = basis_dict['function_types']
harm_type = 'spherical' if 'spherical_gto' in types else 'cartesian'
ret_str = harm_type + '\n\n' + ret_str
return ret_str | ['def', 'convert_basis', '(', 'basis_dict', ',', 'fmt', ',', 'header', '=', 'None', ')', ':', '# make converters case insensitive', 'fmt', '=', 'fmt', '.', 'lower', '(', ')', 'if', 'fmt', 'not', 'in', '_converter_map', ':', 'raise', 'RuntimeError', '(', '\'Unknown basis set format "{}"\'', '.', 'format', '(', 'fmt', ')', ')', 'converter', '=', '_converter_map', '[', 'fmt', ']', '# Determine if the converter supports all the types in the basis_dict', 'if', 'converter', '[', "'valid'", ']', 'is', 'not', 'None', ':', 'ftypes', '=', 'set', '(', 'basis_dict', '[', "'function_types'", ']', ')', 'if', 'ftypes', '>', 'converter', '[', "'valid'", ']', ':', 'raise', 'RuntimeError', '(', "'Converter {} does not support all function types: {}'", '.', 'format', '(', 'fmt', ',', 'str', '(', 'ftypes', ')', ')', ')', '# Actually do the conversion', 'ret_str', '=', 'converter', '[', "'function'", ']', '(', 'basis_dict', ')', 'if', 'header', 'is', 'not', 'None', 'and', 'fmt', '!=', "'json'", ':', 'comment_str', '=', '_converter_map', '[', 'fmt', ']', '[', "'comment'", ']', 'header_str', '=', 'comment_str', '+', 'comment_str', '.', 'join', '(', 'header', '.', 'splitlines', '(', 'True', ')', ')', 'ret_str', '=', 'header_str', '+', "'\\n\\n'", '+', 'ret_str', '# HACK - Psi4 requires the first non-comment line be spherical/cartesian', '# so we have to add that before the header', 'if', 'fmt', '==', "'psi4'", ':', 'types', '=', 'basis_dict', '[', "'function_types'", ']', 'harm_type', '=', "'spherical'", 'if', "'spherical_gto'", 'in', 'types', 'else', "'cartesian'", 'ret_str', '=', 'harm_type', '+', "'\\n\\n'", '+', 'ret_str', 'return', 'ret_str'] | Returns the basis set data as a string representing
the data in the specified output format | ['Returns', 'the', 'basis', 'set', 'data', 'as', 'a', 'string', 'representing', 'the', 'data', 'in', 'the', 'specified', 'output', 'format'] | train | https://github.com/MolSSI-BSE/basis_set_exchange/blob/e79110aaeb65f392ed5032420322dee3336948f7/basis_set_exchange/converters/convert.py#L82-L116 |
466 | Othernet-Project/conz | conz/console.py | Console.menu | def menu(self, choices, prompt='Please choose from the provided options:',
error='Invalid choice', intro=None, strict=True, default=None,
numerator=lambda x: [i + 1 for i in range(x)],
formatter=lambda x, y: '{0:>3}) {1}'.format(x, y),
clean=utils.safeint):
""" Print a menu
The choices must be an iterable of two-tuples where the first value is
the value of the menu item, and the second is the label for that
matches the value.
The menu will be printed with numeric choices. For example::
1) foo
2) bar
Formatting of the number is controlled by the formatter function which
can be overridden by passing the ``formatter`` argument.
The numbers used for the menu are generated using the numerator
function which can be specified using the ``numerator`` function. This
function must take the number of choices and return the same number of
items that will be used as choice characters as a list.
The cleaner function is passed to ``pvpl()`` method can be customized
using ``clean`` argument. This function should generally be customized
whenever ``numerator`` is customized, as default cleaner converts
input to integers to match the default numerator.
Optional ``intro`` argument can be passed to print a message above the
menu.
The return value of this method is the value user has chosen. The
prompt will keep asking the user for input until a valid choice is
selected. Each time an invalid selection is made, error message is
printed. This message can be customized using ``error`` argument.
If ``strict`` argument is set, then only values in choices are allowed,
otherwise any value will be allowed. The ``default`` argument can be
used to define what value is returned in case user select an invalid
value when strict checking is off.
"""
numbers = list(numerator(len(choices)))
labels = (label for _, label in choices)
values = [value for value, _ in choices]
# Print intro and menu itself
if intro:
self.pstd('\n' + utils.rewrap_long(intro))
for n, label in zip(numbers, labels):
self.pstd(formatter(n, label))
# Define the validator
validator = lambda x: x in numbers
val = self.rvpl(prompt, error=error, validator=validator, clean=clean,
strict=strict, default=default)
if not strict and val == default:
return val
return values[numbers.index(val)] | python | def menu(self, choices, prompt='Please choose from the provided options:',
error='Invalid choice', intro=None, strict=True, default=None,
numerator=lambda x: [i + 1 for i in range(x)],
formatter=lambda x, y: '{0:>3}) {1}'.format(x, y),
clean=utils.safeint):
""" Print a menu
The choices must be an iterable of two-tuples where the first value is
the value of the menu item, and the second is the label for that
matches the value.
The menu will be printed with numeric choices. For example::
1) foo
2) bar
Formatting of the number is controlled by the formatter function which
can be overridden by passing the ``formatter`` argument.
The numbers used for the menu are generated using the numerator
function which can be specified using the ``numerator`` function. This
function must take the number of choices and return the same number of
items that will be used as choice characters as a list.
The cleaner function is passed to ``pvpl()`` method can be customized
using ``clean`` argument. This function should generally be customized
whenever ``numerator`` is customized, as default cleaner converts
input to integers to match the default numerator.
Optional ``intro`` argument can be passed to print a message above the
menu.
The return value of this method is the value user has chosen. The
prompt will keep asking the user for input until a valid choice is
selected. Each time an invalid selection is made, error message is
printed. This message can be customized using ``error`` argument.
If ``strict`` argument is set, then only values in choices are allowed,
otherwise any value will be allowed. The ``default`` argument can be
used to define what value is returned in case user select an invalid
value when strict checking is off.
"""
numbers = list(numerator(len(choices)))
labels = (label for _, label in choices)
values = [value for value, _ in choices]
# Print intro and menu itself
if intro:
self.pstd('\n' + utils.rewrap_long(intro))
for n, label in zip(numbers, labels):
self.pstd(formatter(n, label))
# Define the validator
validator = lambda x: x in numbers
val = self.rvpl(prompt, error=error, validator=validator, clean=clean,
strict=strict, default=default)
if not strict and val == default:
return val
return values[numbers.index(val)] | ['def', 'menu', '(', 'self', ',', 'choices', ',', 'prompt', '=', "'Please choose from the provided options:'", ',', 'error', '=', "'Invalid choice'", ',', 'intro', '=', 'None', ',', 'strict', '=', 'True', ',', 'default', '=', 'None', ',', 'numerator', '=', 'lambda', 'x', ':', '[', 'i', '+', '1', 'for', 'i', 'in', 'range', '(', 'x', ')', ']', ',', 'formatter', '=', 'lambda', 'x', ',', 'y', ':', "'{0:>3}) {1}'", '.', 'format', '(', 'x', ',', 'y', ')', ',', 'clean', '=', 'utils', '.', 'safeint', ')', ':', 'numbers', '=', 'list', '(', 'numerator', '(', 'len', '(', 'choices', ')', ')', ')', 'labels', '=', '(', 'label', 'for', '_', ',', 'label', 'in', 'choices', ')', 'values', '=', '[', 'value', 'for', 'value', ',', '_', 'in', 'choices', ']', '# Print intro and menu itself', 'if', 'intro', ':', 'self', '.', 'pstd', '(', "'\\n'", '+', 'utils', '.', 'rewrap_long', '(', 'intro', ')', ')', 'for', 'n', ',', 'label', 'in', 'zip', '(', 'numbers', ',', 'labels', ')', ':', 'self', '.', 'pstd', '(', 'formatter', '(', 'n', ',', 'label', ')', ')', '# Define the validator', 'validator', '=', 'lambda', 'x', ':', 'x', 'in', 'numbers', 'val', '=', 'self', '.', 'rvpl', '(', 'prompt', ',', 'error', '=', 'error', ',', 'validator', '=', 'validator', ',', 'clean', '=', 'clean', ',', 'strict', '=', 'strict', ',', 'default', '=', 'default', ')', 'if', 'not', 'strict', 'and', 'val', '==', 'default', ':', 'return', 'val', 'return', 'values', '[', 'numbers', '.', 'index', '(', 'val', ')', ']'] | Print a menu
The choices must be an iterable of two-tuples where the first value is
the value of the menu item, and the second is the label for that
matches the value.
The menu will be printed with numeric choices. For example::
1) foo
2) bar
Formatting of the number is controlled by the formatter function which
can be overridden by passing the ``formatter`` argument.
The numbers used for the menu are generated using the numerator
function which can be specified using the ``numerator`` function. This
function must take the number of choices and return the same number of
items that will be used as choice characters as a list.
The cleaner function is passed to ``pvpl()`` method can be customized
using ``clean`` argument. This function should generally be customized
whenever ``numerator`` is customized, as default cleaner converts
input to integers to match the default numerator.
Optional ``intro`` argument can be passed to print a message above the
menu.
The return value of this method is the value user has chosen. The
prompt will keep asking the user for input until a valid choice is
selected. Each time an invalid selection is made, error message is
printed. This message can be customized using ``error`` argument.
If ``strict`` argument is set, then only values in choices are allowed,
otherwise any value will be allowed. The ``default`` argument can be
used to define what value is returned in case user select an invalid
value when strict checking is off. | ['Print', 'a', 'menu'] | train | https://github.com/Othernet-Project/conz/blob/051214fa95a837c21595b03426a2c54c522d07a0/conz/console.py#L183-L239 |
467 | bronto/javasphinx | javasphinx/compiler.py | JavadocRestCompiler.__output_see | def __output_see(self, see):
""" Convert the argument to a @see tag to rest """
if see.startswith('<a href'):
# HTML link -- <a href="...">...</a>
return self.__html_to_rst(see)
elif '"' in see:
# Plain text
return see
else:
# Type reference (default)
return ':java:ref:`%s`' % (see.replace('#', '.').replace(' ', ''),) | python | def __output_see(self, see):
""" Convert the argument to a @see tag to rest """
if see.startswith('<a href'):
# HTML link -- <a href="...">...</a>
return self.__html_to_rst(see)
elif '"' in see:
# Plain text
return see
else:
# Type reference (default)
return ':java:ref:`%s`' % (see.replace('#', '.').replace(' ', ''),) | ['def', '__output_see', '(', 'self', ',', 'see', ')', ':', 'if', 'see', '.', 'startswith', '(', "'<a href'", ')', ':', '# HTML link -- <a href="...">...</a>', 'return', 'self', '.', '__html_to_rst', '(', 'see', ')', 'elif', '\'"\'', 'in', 'see', ':', '# Plain text', 'return', 'see', 'else', ':', '# Type reference (default)', 'return', "':java:ref:`%s`'", '%', '(', 'see', '.', 'replace', '(', "'#'", ',', "'.'", ')', '.', 'replace', '(', "' '", ',', "''", ')', ',', ')'] | Convert the argument to a @see tag to rest | ['Convert', 'the', 'argument', 'to', 'a'] | train | https://github.com/bronto/javasphinx/blob/cd1df27f1d70efaae079b74573efdd8e069ff02d/javasphinx/compiler.py#L95-L106 |
468 | roclark/sportsreference | sportsreference/ncaab/teams.py | Teams._retrieve_all_teams | def _retrieve_all_teams(self, year):
"""
Find and create Team instances for all teams in the given season.
For a given season, parses the specified NCAAB stats table and finds
all requested stats. Each team then has a Team instance created which
includes all requested stats and a few identifiers, such as the team's
name and abbreviation. All of the individual Team instances are added
to a list.
Note that this method is called directly once Teams is invoked and does
not need to be called manually.
Parameters
----------
year : string
The requested year to pull stats from.
"""
team_data_dict = {}
if not year:
year = utils._find_year_for_season('ncaab')
doc = pq(BASIC_STATS_URL % year)
teams_list = utils._get_stats_table(doc, 'table#basic_school_stats')
doc = pq(BASIC_OPPONENT_STATS_URL % year)
opp_list = utils._get_stats_table(doc, 'table#basic_opp_stats')
doc = pq(ADVANCED_STATS_URL % year)
adv_teams_list = utils._get_stats_table(doc, 'table#adv_school_stats')
doc = pq(ADVANCED_OPPONENT_STATS_URL % year)
adv_opp_list = utils._get_stats_table(doc, 'table#adv_opp_stats')
for stats_list in [teams_list, opp_list, adv_teams_list, adv_opp_list]:
team_data_dict = self._add_stats_data(stats_list, team_data_dict)
for team_name, team_data in team_data_dict.items():
team = Team(team_data['data'],
self._conferences_dict[team_name.lower()],
year)
self._teams.append(team) | python | def _retrieve_all_teams(self, year):
"""
Find and create Team instances for all teams in the given season.
For a given season, parses the specified NCAAB stats table and finds
all requested stats. Each team then has a Team instance created which
includes all requested stats and a few identifiers, such as the team's
name and abbreviation. All of the individual Team instances are added
to a list.
Note that this method is called directly once Teams is invoked and does
not need to be called manually.
Parameters
----------
year : string
The requested year to pull stats from.
"""
team_data_dict = {}
if not year:
year = utils._find_year_for_season('ncaab')
doc = pq(BASIC_STATS_URL % year)
teams_list = utils._get_stats_table(doc, 'table#basic_school_stats')
doc = pq(BASIC_OPPONENT_STATS_URL % year)
opp_list = utils._get_stats_table(doc, 'table#basic_opp_stats')
doc = pq(ADVANCED_STATS_URL % year)
adv_teams_list = utils._get_stats_table(doc, 'table#adv_school_stats')
doc = pq(ADVANCED_OPPONENT_STATS_URL % year)
adv_opp_list = utils._get_stats_table(doc, 'table#adv_opp_stats')
for stats_list in [teams_list, opp_list, adv_teams_list, adv_opp_list]:
team_data_dict = self._add_stats_data(stats_list, team_data_dict)
for team_name, team_data in team_data_dict.items():
team = Team(team_data['data'],
self._conferences_dict[team_name.lower()],
year)
self._teams.append(team) | ['def', '_retrieve_all_teams', '(', 'self', ',', 'year', ')', ':', 'team_data_dict', '=', '{', '}', 'if', 'not', 'year', ':', 'year', '=', 'utils', '.', '_find_year_for_season', '(', "'ncaab'", ')', 'doc', '=', 'pq', '(', 'BASIC_STATS_URL', '%', 'year', ')', 'teams_list', '=', 'utils', '.', '_get_stats_table', '(', 'doc', ',', "'table#basic_school_stats'", ')', 'doc', '=', 'pq', '(', 'BASIC_OPPONENT_STATS_URL', '%', 'year', ')', 'opp_list', '=', 'utils', '.', '_get_stats_table', '(', 'doc', ',', "'table#basic_opp_stats'", ')', 'doc', '=', 'pq', '(', 'ADVANCED_STATS_URL', '%', 'year', ')', 'adv_teams_list', '=', 'utils', '.', '_get_stats_table', '(', 'doc', ',', "'table#adv_school_stats'", ')', 'doc', '=', 'pq', '(', 'ADVANCED_OPPONENT_STATS_URL', '%', 'year', ')', 'adv_opp_list', '=', 'utils', '.', '_get_stats_table', '(', 'doc', ',', "'table#adv_opp_stats'", ')', 'for', 'stats_list', 'in', '[', 'teams_list', ',', 'opp_list', ',', 'adv_teams_list', ',', 'adv_opp_list', ']', ':', 'team_data_dict', '=', 'self', '.', '_add_stats_data', '(', 'stats_list', ',', 'team_data_dict', ')', 'for', 'team_name', ',', 'team_data', 'in', 'team_data_dict', '.', 'items', '(', ')', ':', 'team', '=', 'Team', '(', 'team_data', '[', "'data'", ']', ',', 'self', '.', '_conferences_dict', '[', 'team_name', '.', 'lower', '(', ')', ']', ',', 'year', ')', 'self', '.', '_teams', '.', 'append', '(', 'team', ')'] | Find and create Team instances for all teams in the given season.
For a given season, parses the specified NCAAB stats table and finds
all requested stats. Each team then has a Team instance created which
includes all requested stats and a few identifiers, such as the team's
name and abbreviation. All of the individual Team instances are added
to a list.
Note that this method is called directly once Teams is invoked and does
not need to be called manually.
Parameters
----------
year : string
The requested year to pull stats from. | ['Find', 'and', 'create', 'Team', 'instances', 'for', 'all', 'teams', 'in', 'the', 'given', 'season', '.'] | train | https://github.com/roclark/sportsreference/blob/ea0bae432be76450e137671d2998eb38f962dffd/sportsreference/ncaab/teams.py#L1096-L1134 |
469 | payu-org/payu | payu/envmod.py | module | def module(command, *args):
"""Run the modulecmd tool and use its Python-formatted output to set the
environment variables."""
if 'MODULESHOME' not in os.environ:
print('payu: warning: No Environment Modules found; skipping {0} call.'
''.format(command))
return
modulecmd = ('{0}/bin/modulecmd'.format(os.environ['MODULESHOME']))
cmd = '{0} python {1} {2}'.format(modulecmd, command, ' '.join(args))
envs, _ = subprocess.Popen(shlex.split(cmd),
stdout=subprocess.PIPE).communicate()
exec(envs) | python | def module(command, *args):
"""Run the modulecmd tool and use its Python-formatted output to set the
environment variables."""
if 'MODULESHOME' not in os.environ:
print('payu: warning: No Environment Modules found; skipping {0} call.'
''.format(command))
return
modulecmd = ('{0}/bin/modulecmd'.format(os.environ['MODULESHOME']))
cmd = '{0} python {1} {2}'.format(modulecmd, command, ' '.join(args))
envs, _ = subprocess.Popen(shlex.split(cmd),
stdout=subprocess.PIPE).communicate()
exec(envs) | ['def', 'module', '(', 'command', ',', '*', 'args', ')', ':', 'if', "'MODULESHOME'", 'not', 'in', 'os', '.', 'environ', ':', 'print', '(', "'payu: warning: No Environment Modules found; skipping {0} call.'", "''", '.', 'format', '(', 'command', ')', ')', 'return', 'modulecmd', '=', '(', "'{0}/bin/modulecmd'", '.', 'format', '(', 'os', '.', 'environ', '[', "'MODULESHOME'", ']', ')', ')', 'cmd', '=', "'{0} python {1} {2}'", '.', 'format', '(', 'modulecmd', ',', 'command', ',', "' '", '.', 'join', '(', 'args', ')', ')', 'envs', ',', '_', '=', 'subprocess', '.', 'Popen', '(', 'shlex', '.', 'split', '(', 'cmd', ')', ',', 'stdout', '=', 'subprocess', '.', 'PIPE', ')', '.', 'communicate', '(', ')', 'exec', '(', 'envs', ')'] | Run the modulecmd tool and use its Python-formatted output to set the
environment variables. | ['Run', 'the', 'modulecmd', 'tool', 'and', 'use', 'its', 'Python', '-', 'formatted', 'output', 'to', 'set', 'the', 'environment', 'variables', '.'] | train | https://github.com/payu-org/payu/blob/1442a9a226012eff248b8097cc1eaabc3e224867/payu/envmod.py#L68-L83 |
470 | softlayer/softlayer-python | SoftLayer/managers/network.py | NetworkManager.edit_securitygroup_rule | def edit_securitygroup_rule(self, group_id, rule_id, remote_ip=None,
remote_group=None, direction=None,
ethertype=None, port_max=None,
port_min=None, protocol=None):
"""Edit a security group rule.
:param int group_id: The ID of the security group the rule belongs to
:param int rule_id: The ID of the rule to edit
:param str remote_ip: The remote IP or CIDR to enforce the rule on
:param int remote_group: The remote security group ID to enforce
the rule on
:param str direction: The direction to enforce (egress or ingress)
:param str ethertype: The ethertype to enforce (IPv4 or IPv6)
:param str port_max: The upper port bound to enforce
:param str port_min: The lower port bound to enforce
:param str protocol: The protocol to enforce (icmp, udp, tcp)
"""
successful = False
obj = {}
if remote_ip is not None:
obj['remoteIp'] = remote_ip
if remote_group is not None:
obj['remoteGroupId'] = remote_group
if direction is not None:
obj['direction'] = direction
if ethertype is not None:
obj['ethertype'] = ethertype
if port_max is not None:
obj['portRangeMax'] = port_max
if port_min is not None:
obj['portRangeMin'] = port_min
if protocol is not None:
obj['protocol'] = protocol
if obj:
obj['id'] = rule_id
successful = self.security_group.editRules([obj], id=group_id)
return successful | python | def edit_securitygroup_rule(self, group_id, rule_id, remote_ip=None,
remote_group=None, direction=None,
ethertype=None, port_max=None,
port_min=None, protocol=None):
"""Edit a security group rule.
:param int group_id: The ID of the security group the rule belongs to
:param int rule_id: The ID of the rule to edit
:param str remote_ip: The remote IP or CIDR to enforce the rule on
:param int remote_group: The remote security group ID to enforce
the rule on
:param str direction: The direction to enforce (egress or ingress)
:param str ethertype: The ethertype to enforce (IPv4 or IPv6)
:param str port_max: The upper port bound to enforce
:param str port_min: The lower port bound to enforce
:param str protocol: The protocol to enforce (icmp, udp, tcp)
"""
successful = False
obj = {}
if remote_ip is not None:
obj['remoteIp'] = remote_ip
if remote_group is not None:
obj['remoteGroupId'] = remote_group
if direction is not None:
obj['direction'] = direction
if ethertype is not None:
obj['ethertype'] = ethertype
if port_max is not None:
obj['portRangeMax'] = port_max
if port_min is not None:
obj['portRangeMin'] = port_min
if protocol is not None:
obj['protocol'] = protocol
if obj:
obj['id'] = rule_id
successful = self.security_group.editRules([obj], id=group_id)
return successful | ['def', 'edit_securitygroup_rule', '(', 'self', ',', 'group_id', ',', 'rule_id', ',', 'remote_ip', '=', 'None', ',', 'remote_group', '=', 'None', ',', 'direction', '=', 'None', ',', 'ethertype', '=', 'None', ',', 'port_max', '=', 'None', ',', 'port_min', '=', 'None', ',', 'protocol', '=', 'None', ')', ':', 'successful', '=', 'False', 'obj', '=', '{', '}', 'if', 'remote_ip', 'is', 'not', 'None', ':', 'obj', '[', "'remoteIp'", ']', '=', 'remote_ip', 'if', 'remote_group', 'is', 'not', 'None', ':', 'obj', '[', "'remoteGroupId'", ']', '=', 'remote_group', 'if', 'direction', 'is', 'not', 'None', ':', 'obj', '[', "'direction'", ']', '=', 'direction', 'if', 'ethertype', 'is', 'not', 'None', ':', 'obj', '[', "'ethertype'", ']', '=', 'ethertype', 'if', 'port_max', 'is', 'not', 'None', ':', 'obj', '[', "'portRangeMax'", ']', '=', 'port_max', 'if', 'port_min', 'is', 'not', 'None', ':', 'obj', '[', "'portRangeMin'", ']', '=', 'port_min', 'if', 'protocol', 'is', 'not', 'None', ':', 'obj', '[', "'protocol'", ']', '=', 'protocol', 'if', 'obj', ':', 'obj', '[', "'id'", ']', '=', 'rule_id', 'successful', '=', 'self', '.', 'security_group', '.', 'editRules', '(', '[', 'obj', ']', ',', 'id', '=', 'group_id', ')', 'return', 'successful'] | Edit a security group rule.
:param int group_id: The ID of the security group the rule belongs to
:param int rule_id: The ID of the rule to edit
:param str remote_ip: The remote IP or CIDR to enforce the rule on
:param int remote_group: The remote security group ID to enforce
the rule on
:param str direction: The direction to enforce (egress or ingress)
:param str ethertype: The ethertype to enforce (IPv4 or IPv6)
:param str port_max: The upper port bound to enforce
:param str port_min: The lower port bound to enforce
:param str protocol: The protocol to enforce (icmp, udp, tcp) | ['Edit', 'a', 'security', 'group', 'rule', '.'] | train | https://github.com/softlayer/softlayer-python/blob/9f181be08cc3668353b05a6de0cb324f52cff6fa/SoftLayer/managers/network.py#L304-L342 |
471 | ministryofjustice/django-form-error-reporting | form_error_reporting.py | GARequestErrorReportingMixin.get_ga_query_dict | def get_ga_query_dict(self):
"""
Adds user agent and IP to the default hit parameters
"""
query_dict = super(GARequestErrorReportingMixin, self).get_ga_query_dict()
request = self.get_ga_request()
if not request:
return query_dict
user_ip = request.META.get('HTTP_X_FORWARDED_FOR', request.META.get('REMOTE_ADDR', ''))
user_ip = user_ip.split(',')[0].strip()
user_agent = request.META.get('HTTP_USER_AGENT')
user_language = request.META.get('HTTP_ACCEPT_LANGUAGE')
if user_ip:
query_dict['uip'] = user_ip
if user_agent:
query_dict['ua'] = user_agent
if user_language:
query_dict['ul'] = user_language
return query_dict | python | def get_ga_query_dict(self):
"""
Adds user agent and IP to the default hit parameters
"""
query_dict = super(GARequestErrorReportingMixin, self).get_ga_query_dict()
request = self.get_ga_request()
if not request:
return query_dict
user_ip = request.META.get('HTTP_X_FORWARDED_FOR', request.META.get('REMOTE_ADDR', ''))
user_ip = user_ip.split(',')[0].strip()
user_agent = request.META.get('HTTP_USER_AGENT')
user_language = request.META.get('HTTP_ACCEPT_LANGUAGE')
if user_ip:
query_dict['uip'] = user_ip
if user_agent:
query_dict['ua'] = user_agent
if user_language:
query_dict['ul'] = user_language
return query_dict | ['def', 'get_ga_query_dict', '(', 'self', ')', ':', 'query_dict', '=', 'super', '(', 'GARequestErrorReportingMixin', ',', 'self', ')', '.', 'get_ga_query_dict', '(', ')', 'request', '=', 'self', '.', 'get_ga_request', '(', ')', 'if', 'not', 'request', ':', 'return', 'query_dict', 'user_ip', '=', 'request', '.', 'META', '.', 'get', '(', "'HTTP_X_FORWARDED_FOR'", ',', 'request', '.', 'META', '.', 'get', '(', "'REMOTE_ADDR'", ',', "''", ')', ')', 'user_ip', '=', 'user_ip', '.', 'split', '(', "','", ')', '[', '0', ']', '.', 'strip', '(', ')', 'user_agent', '=', 'request', '.', 'META', '.', 'get', '(', "'HTTP_USER_AGENT'", ')', 'user_language', '=', 'request', '.', 'META', '.', 'get', '(', "'HTTP_ACCEPT_LANGUAGE'", ')', 'if', 'user_ip', ':', 'query_dict', '[', "'uip'", ']', '=', 'user_ip', 'if', 'user_agent', ':', 'query_dict', '[', "'ua'", ']', '=', 'user_agent', 'if', 'user_language', ':', 'query_dict', '[', "'ul'", ']', '=', 'user_language', 'return', 'query_dict'] | Adds user agent and IP to the default hit parameters | ['Adds', 'user', 'agent', 'and', 'IP', 'to', 'the', 'default', 'hit', 'parameters'] | train | https://github.com/ministryofjustice/django-form-error-reporting/blob/2d08dd5cc4321e1abf49241c515ccd7050d9f828/form_error_reporting.py#L179-L197 |
472 | relekang/python-semantic-release | semantic_release/ci_checks.py | checker | def checker(func: Callable) -> Callable:
"""
A decorator that will convert AssertionErrors into
CiVerificationError.
:param func: A function that will raise AssertionError
:return: The given function wrapped to raise a CiVerificationError on AssertionError
"""
def func_wrapper(*args, **kwargs):
try:
func(*args, **kwargs)
return True
except AssertionError:
raise CiVerificationError(
'The verification check for the environment did not pass.'
)
return func_wrapper | python | def checker(func: Callable) -> Callable:
"""
A decorator that will convert AssertionErrors into
CiVerificationError.
:param func: A function that will raise AssertionError
:return: The given function wrapped to raise a CiVerificationError on AssertionError
"""
def func_wrapper(*args, **kwargs):
try:
func(*args, **kwargs)
return True
except AssertionError:
raise CiVerificationError(
'The verification check for the environment did not pass.'
)
return func_wrapper | ['def', 'checker', '(', 'func', ':', 'Callable', ')', '->', 'Callable', ':', 'def', 'func_wrapper', '(', '*', 'args', ',', '*', '*', 'kwargs', ')', ':', 'try', ':', 'func', '(', '*', 'args', ',', '*', '*', 'kwargs', ')', 'return', 'True', 'except', 'AssertionError', ':', 'raise', 'CiVerificationError', '(', "'The verification check for the environment did not pass.'", ')', 'return', 'func_wrapper'] | A decorator that will convert AssertionErrors into
CiVerificationError.
:param func: A function that will raise AssertionError
:return: The given function wrapped to raise a CiVerificationError on AssertionError | ['A', 'decorator', 'that', 'will', 'convert', 'AssertionErrors', 'into', 'CiVerificationError', '.'] | train | https://github.com/relekang/python-semantic-release/blob/76123f410180599a19e7c48da413880185bbea20/semantic_release/ci_checks.py#L9-L27 |
473 | arne-cl/discoursegraphs | src/discoursegraphs/readwrite/conano.py | ConanoDocumentGraph.is_valid | def is_valid(self, tree):
"""
returns true, iff the order of the tokens in the graph are the
same as in the Conano file (converted to plain text).
"""
conano_plaintext = etree.tostring(tree, encoding='utf8', method='text')
token_str_list = conano_plaintext.split()
for i, plain_token in enumerate(token_str_list):
graph_token = self.node[self.tokens[i]][self.ns+':token']
if ensure_unicode(plain_token) != graph_token:
sys.stderr.write(
"Conano tokenizations don't match: {0} vs. {1} "
"({2})".format(plain_token, graph_token))
return False
return True | python | def is_valid(self, tree):
"""
returns true, iff the order of the tokens in the graph are the
same as in the Conano file (converted to plain text).
"""
conano_plaintext = etree.tostring(tree, encoding='utf8', method='text')
token_str_list = conano_plaintext.split()
for i, plain_token in enumerate(token_str_list):
graph_token = self.node[self.tokens[i]][self.ns+':token']
if ensure_unicode(plain_token) != graph_token:
sys.stderr.write(
"Conano tokenizations don't match: {0} vs. {1} "
"({2})".format(plain_token, graph_token))
return False
return True | ['def', 'is_valid', '(', 'self', ',', 'tree', ')', ':', 'conano_plaintext', '=', 'etree', '.', 'tostring', '(', 'tree', ',', 'encoding', '=', "'utf8'", ',', 'method', '=', "'text'", ')', 'token_str_list', '=', 'conano_plaintext', '.', 'split', '(', ')', 'for', 'i', ',', 'plain_token', 'in', 'enumerate', '(', 'token_str_list', ')', ':', 'graph_token', '=', 'self', '.', 'node', '[', 'self', '.', 'tokens', '[', 'i', ']', ']', '[', 'self', '.', 'ns', '+', "':token'", ']', 'if', 'ensure_unicode', '(', 'plain_token', ')', '!=', 'graph_token', ':', 'sys', '.', 'stderr', '.', 'write', '(', '"Conano tokenizations don\'t match: {0} vs. {1} "', '"({2})"', '.', 'format', '(', 'plain_token', ',', 'graph_token', ')', ')', 'return', 'False', 'return', 'True'] | returns true, iff the order of the tokens in the graph are the
same as in the Conano file (converted to plain text). | ['returns', 'true', 'iff', 'the', 'order', 'of', 'the', 'tokens', 'in', 'the', 'graph', 'are', 'the', 'same', 'as', 'in', 'the', 'Conano', 'file', '(', 'converted', 'to', 'plain', 'text', ')', '.'] | train | https://github.com/arne-cl/discoursegraphs/blob/842f0068a3190be2c75905754521b176b25a54fb/src/discoursegraphs/readwrite/conano.py#L148-L162 |
474 | tamasgal/km3pipe | km3modules/k40.py | correct_means | def correct_means(means, opt_t0s, combs):
"""Applies optimal t0s to gaussians means.
Should be around zero afterwards.
Parameters
----------
means: numpy array of means of gaussians of all PMT combinations
opt_t0s: numpy array of optimal t0 values for all PMTs
combs: pmt combinations used to correct
Returns
-------
corrected_means: numpy array of corrected gaussian means for all PMT combs
"""
corrected_means = np.array([(opt_t0s[comb[1]] - opt_t0s[comb[0]]) - mean
for mean, comb in zip(means, combs)])
return corrected_means | python | def correct_means(means, opt_t0s, combs):
"""Applies optimal t0s to gaussians means.
Should be around zero afterwards.
Parameters
----------
means: numpy array of means of gaussians of all PMT combinations
opt_t0s: numpy array of optimal t0 values for all PMTs
combs: pmt combinations used to correct
Returns
-------
corrected_means: numpy array of corrected gaussian means for all PMT combs
"""
corrected_means = np.array([(opt_t0s[comb[1]] - opt_t0s[comb[0]]) - mean
for mean, comb in zip(means, combs)])
return corrected_means | ['def', 'correct_means', '(', 'means', ',', 'opt_t0s', ',', 'combs', ')', ':', 'corrected_means', '=', 'np', '.', 'array', '(', '[', '(', 'opt_t0s', '[', 'comb', '[', '1', ']', ']', '-', 'opt_t0s', '[', 'comb', '[', '0', ']', ']', ')', '-', 'mean', 'for', 'mean', ',', 'comb', 'in', 'zip', '(', 'means', ',', 'combs', ')', ']', ')', 'return', 'corrected_means'] | Applies optimal t0s to gaussians means.
Should be around zero afterwards.
Parameters
----------
means: numpy array of means of gaussians of all PMT combinations
opt_t0s: numpy array of optimal t0 values for all PMTs
combs: pmt combinations used to correct
Returns
-------
corrected_means: numpy array of corrected gaussian means for all PMT combs | ['Applies', 'optimal', 't0s', 'to', 'gaussians', 'means', '.'] | train | https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3modules/k40.py#L798-L816 |
475 | yymao/generic-catalog-reader | GCR/base.py | BaseGenericCatalog.has_quantity | def has_quantity(self, quantity, include_native=True):
"""
Check if *quantity* is available in this catalog
Parameters
----------
quantity : str
a quantity name to check
include_native : bool, optional
whether or not to include native quantity names when checking
Returns
-------
has_quantity : bool
True if the quantities are all available; otherwise False
"""
if include_native:
return all(q in self._native_quantities for q in self._translate_quantities({quantity}))
return quantity in self._quantity_modifiers | python | def has_quantity(self, quantity, include_native=True):
"""
Check if *quantity* is available in this catalog
Parameters
----------
quantity : str
a quantity name to check
include_native : bool, optional
whether or not to include native quantity names when checking
Returns
-------
has_quantity : bool
True if the quantities are all available; otherwise False
"""
if include_native:
return all(q in self._native_quantities for q in self._translate_quantities({quantity}))
return quantity in self._quantity_modifiers | ['def', 'has_quantity', '(', 'self', ',', 'quantity', ',', 'include_native', '=', 'True', ')', ':', 'if', 'include_native', ':', 'return', 'all', '(', 'q', 'in', 'self', '.', '_native_quantities', 'for', 'q', 'in', 'self', '.', '_translate_quantities', '(', '{', 'quantity', '}', ')', ')', 'return', 'quantity', 'in', 'self', '.', '_quantity_modifiers'] | Check if *quantity* is available in this catalog
Parameters
----------
quantity : str
a quantity name to check
include_native : bool, optional
whether or not to include native quantity names when checking
Returns
-------
has_quantity : bool
True if the quantities are all available; otherwise False | ['Check', 'if', '*', 'quantity', '*', 'is', 'available', 'in', 'this', 'catalog'] | train | https://github.com/yymao/generic-catalog-reader/blob/bc6267ac41b9f68106ed6065184469ac13fdc0b6/GCR/base.py#L79-L100 |
476 | bcbio/bcbio-nextgen | bcbio/variation/mutect2.py | _af_filter | def _af_filter(data, in_file, out_file):
"""Soft-filter variants with AF below min_allele_fraction (appends "MinAF" to FILTER)
"""
min_freq = float(utils.get_in(data["config"], ("algorithm", "min_allele_fraction"), 10)) / 100.0
logger.debug("Filtering MuTect2 calls with allele fraction threshold of %s" % min_freq)
ungz_out_file = "%s.vcf" % utils.splitext_plus(out_file)[0]
if not utils.file_exists(ungz_out_file) and not utils.file_exists(ungz_out_file + ".gz"):
with file_transaction(data, ungz_out_file) as tx_out_file:
vcf = cyvcf2.VCF(in_file)
vcf.add_filter_to_header({
'ID': 'MinAF',
'Description': 'Allele frequency is lower than %s%% ' % (min_freq*100) + (
'(configured in bcbio as min_allele_fraction)'
if utils.get_in(data["config"], ("algorithm", "min_allele_fraction"))
else '(default threshold in bcbio; override with min_allele_fraction in the algorithm section)')})
w = cyvcf2.Writer(tx_out_file, vcf)
# GATK 3.x can produce VCFs without sample names for empty VCFs
try:
tumor_index = vcf.samples.index(dd.get_sample_name(data))
except ValueError:
tumor_index = None
for rec in vcf:
if tumor_index is not None and np.all(rec.format('AF')[tumor_index] < min_freq):
vcfutils.cyvcf_add_filter(rec, 'MinAF')
w.write_record(rec)
w.close()
return vcfutils.bgzip_and_index(ungz_out_file, data["config"]) | python | def _af_filter(data, in_file, out_file):
"""Soft-filter variants with AF below min_allele_fraction (appends "MinAF" to FILTER)
"""
min_freq = float(utils.get_in(data["config"], ("algorithm", "min_allele_fraction"), 10)) / 100.0
logger.debug("Filtering MuTect2 calls with allele fraction threshold of %s" % min_freq)
ungz_out_file = "%s.vcf" % utils.splitext_plus(out_file)[0]
if not utils.file_exists(ungz_out_file) and not utils.file_exists(ungz_out_file + ".gz"):
with file_transaction(data, ungz_out_file) as tx_out_file:
vcf = cyvcf2.VCF(in_file)
vcf.add_filter_to_header({
'ID': 'MinAF',
'Description': 'Allele frequency is lower than %s%% ' % (min_freq*100) + (
'(configured in bcbio as min_allele_fraction)'
if utils.get_in(data["config"], ("algorithm", "min_allele_fraction"))
else '(default threshold in bcbio; override with min_allele_fraction in the algorithm section)')})
w = cyvcf2.Writer(tx_out_file, vcf)
# GATK 3.x can produce VCFs without sample names for empty VCFs
try:
tumor_index = vcf.samples.index(dd.get_sample_name(data))
except ValueError:
tumor_index = None
for rec in vcf:
if tumor_index is not None and np.all(rec.format('AF')[tumor_index] < min_freq):
vcfutils.cyvcf_add_filter(rec, 'MinAF')
w.write_record(rec)
w.close()
return vcfutils.bgzip_and_index(ungz_out_file, data["config"]) | ['def', '_af_filter', '(', 'data', ',', 'in_file', ',', 'out_file', ')', ':', 'min_freq', '=', 'float', '(', 'utils', '.', 'get_in', '(', 'data', '[', '"config"', ']', ',', '(', '"algorithm"', ',', '"min_allele_fraction"', ')', ',', '10', ')', ')', '/', '100.0', 'logger', '.', 'debug', '(', '"Filtering MuTect2 calls with allele fraction threshold of %s"', '%', 'min_freq', ')', 'ungz_out_file', '=', '"%s.vcf"', '%', 'utils', '.', 'splitext_plus', '(', 'out_file', ')', '[', '0', ']', 'if', 'not', 'utils', '.', 'file_exists', '(', 'ungz_out_file', ')', 'and', 'not', 'utils', '.', 'file_exists', '(', 'ungz_out_file', '+', '".gz"', ')', ':', 'with', 'file_transaction', '(', 'data', ',', 'ungz_out_file', ')', 'as', 'tx_out_file', ':', 'vcf', '=', 'cyvcf2', '.', 'VCF', '(', 'in_file', ')', 'vcf', '.', 'add_filter_to_header', '(', '{', "'ID'", ':', "'MinAF'", ',', "'Description'", ':', "'Allele frequency is lower than %s%% '", '%', '(', 'min_freq', '*', '100', ')', '+', '(', "'(configured in bcbio as min_allele_fraction)'", 'if', 'utils', '.', 'get_in', '(', 'data', '[', '"config"', ']', ',', '(', '"algorithm"', ',', '"min_allele_fraction"', ')', ')', 'else', "'(default threshold in bcbio; override with min_allele_fraction in the algorithm section)'", ')', '}', ')', 'w', '=', 'cyvcf2', '.', 'Writer', '(', 'tx_out_file', ',', 'vcf', ')', '# GATK 3.x can produce VCFs without sample names for empty VCFs', 'try', ':', 'tumor_index', '=', 'vcf', '.', 'samples', '.', 'index', '(', 'dd', '.', 'get_sample_name', '(', 'data', ')', ')', 'except', 'ValueError', ':', 'tumor_index', '=', 'None', 'for', 'rec', 'in', 'vcf', ':', 'if', 'tumor_index', 'is', 'not', 'None', 'and', 'np', '.', 'all', '(', 'rec', '.', 'format', '(', "'AF'", ')', '[', 'tumor_index', ']', '<', 'min_freq', ')', ':', 'vcfutils', '.', 'cyvcf_add_filter', '(', 'rec', ',', "'MinAF'", ')', 'w', '.', 'write_record', '(', 'rec', ')', 'w', '.', 'close', '(', ')', 'return', 'vcfutils', '.', 'bgzip_and_index', '(', 'ungz_out_file', ',', 'data', '[', '"config"', ']', ')'] | Soft-filter variants with AF below min_allele_fraction (appends "MinAF" to FILTER) | ['Soft', '-', 'filter', 'variants', 'with', 'AF', 'below', 'min_allele_fraction', '(', 'appends', 'MinAF', 'to', 'FILTER', ')'] | train | https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/mutect2.py#L134-L160 |
477 | tensorflow/tensorboard | tensorboard/plugins/hparams/backend_context.py | _protobuf_value_type | def _protobuf_value_type(value):
"""Returns the type of the google.protobuf.Value message as an api.DataType.
Returns None if the type of 'value' is not one of the types supported in
api_pb2.DataType.
Args:
value: google.protobuf.Value message.
"""
if value.HasField("number_value"):
return api_pb2.DATA_TYPE_FLOAT64
if value.HasField("string_value"):
return api_pb2.DATA_TYPE_STRING
if value.HasField("bool_value"):
return api_pb2.DATA_TYPE_BOOL
return None | python | def _protobuf_value_type(value):
"""Returns the type of the google.protobuf.Value message as an api.DataType.
Returns None if the type of 'value' is not one of the types supported in
api_pb2.DataType.
Args:
value: google.protobuf.Value message.
"""
if value.HasField("number_value"):
return api_pb2.DATA_TYPE_FLOAT64
if value.HasField("string_value"):
return api_pb2.DATA_TYPE_STRING
if value.HasField("bool_value"):
return api_pb2.DATA_TYPE_BOOL
return None | ['def', '_protobuf_value_type', '(', 'value', ')', ':', 'if', 'value', '.', 'HasField', '(', '"number_value"', ')', ':', 'return', 'api_pb2', '.', 'DATA_TYPE_FLOAT64', 'if', 'value', '.', 'HasField', '(', '"string_value"', ')', ':', 'return', 'api_pb2', '.', 'DATA_TYPE_STRING', 'if', 'value', '.', 'HasField', '(', '"bool_value"', ')', ':', 'return', 'api_pb2', '.', 'DATA_TYPE_BOOL', 'return', 'None'] | Returns the type of the google.protobuf.Value message as an api.DataType.
Returns None if the type of 'value' is not one of the types supported in
api_pb2.DataType.
Args:
value: google.protobuf.Value message. | ['Returns', 'the', 'type', 'of', 'the', 'google', '.', 'protobuf', '.', 'Value', 'message', 'as', 'an', 'api', '.', 'DataType', '.'] | train | https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/hparams/backend_context.py#L280-L295 |
478 | wglass/lighthouse | lighthouse/service.py | Service.run_checks | def run_checks(self):
"""
Iterates over the configured ports and runs the checks on each one.
Returns a two-element tuple: the first is the set of ports that
transitioned from down to up, the second is the set of ports that
transitioned from up to down.
Also handles the case where a check for a since-removed port is run,
marking the port as down regardless of the check's result and removing
the check(s) for the port.
"""
came_up = set()
went_down = set()
for port in self.ports:
checks = self.checks[port].values()
if not checks:
logger.warn("No checks defined for self: %s", self.name)
for check in checks:
check.run()
checks_pass = all([check.passing for check in checks])
if self.is_up[port] in (False, None) and checks_pass:
came_up.add(port)
self.is_up[port] = True
elif self.is_up[port] in (True, None) and not checks_pass:
went_down.add(port)
self.is_up[port] = False
for unused_port in set(self.checks.keys()) - self.ports:
went_down.add(unused_port)
del self.checks[unused_port]
return came_up, went_down | python | def run_checks(self):
"""
Iterates over the configured ports and runs the checks on each one.
Returns a two-element tuple: the first is the set of ports that
transitioned from down to up, the second is the set of ports that
transitioned from up to down.
Also handles the case where a check for a since-removed port is run,
marking the port as down regardless of the check's result and removing
the check(s) for the port.
"""
came_up = set()
went_down = set()
for port in self.ports:
checks = self.checks[port].values()
if not checks:
logger.warn("No checks defined for self: %s", self.name)
for check in checks:
check.run()
checks_pass = all([check.passing for check in checks])
if self.is_up[port] in (False, None) and checks_pass:
came_up.add(port)
self.is_up[port] = True
elif self.is_up[port] in (True, None) and not checks_pass:
went_down.add(port)
self.is_up[port] = False
for unused_port in set(self.checks.keys()) - self.ports:
went_down.add(unused_port)
del self.checks[unused_port]
return came_up, went_down | ['def', 'run_checks', '(', 'self', ')', ':', 'came_up', '=', 'set', '(', ')', 'went_down', '=', 'set', '(', ')', 'for', 'port', 'in', 'self', '.', 'ports', ':', 'checks', '=', 'self', '.', 'checks', '[', 'port', ']', '.', 'values', '(', ')', 'if', 'not', 'checks', ':', 'logger', '.', 'warn', '(', '"No checks defined for self: %s"', ',', 'self', '.', 'name', ')', 'for', 'check', 'in', 'checks', ':', 'check', '.', 'run', '(', ')', 'checks_pass', '=', 'all', '(', '[', 'check', '.', 'passing', 'for', 'check', 'in', 'checks', ']', ')', 'if', 'self', '.', 'is_up', '[', 'port', ']', 'in', '(', 'False', ',', 'None', ')', 'and', 'checks_pass', ':', 'came_up', '.', 'add', '(', 'port', ')', 'self', '.', 'is_up', '[', 'port', ']', '=', 'True', 'elif', 'self', '.', 'is_up', '[', 'port', ']', 'in', '(', 'True', ',', 'None', ')', 'and', 'not', 'checks_pass', ':', 'went_down', '.', 'add', '(', 'port', ')', 'self', '.', 'is_up', '[', 'port', ']', '=', 'False', 'for', 'unused_port', 'in', 'set', '(', 'self', '.', 'checks', '.', 'keys', '(', ')', ')', '-', 'self', '.', 'ports', ':', 'went_down', '.', 'add', '(', 'unused_port', ')', 'del', 'self', '.', 'checks', '[', 'unused_port', ']', 'return', 'came_up', ',', 'went_down'] | Iterates over the configured ports and runs the checks on each one.
Returns a two-element tuple: the first is the set of ports that
transitioned from down to up, the second is the set of ports that
transitioned from up to down.
Also handles the case where a check for a since-removed port is run,
marking the port as down regardless of the check's result and removing
the check(s) for the port. | ['Iterates', 'over', 'the', 'configured', 'ports', 'and', 'runs', 'the', 'checks', 'on', 'each', 'one', '.'] | train | https://github.com/wglass/lighthouse/blob/f4ce6550895acc31e433ede0c05d366718a3ffe5/lighthouse/service.py#L148-L185 |
479 | DLR-RM/RAFCON | source/rafcon/core/states/state.py | State.input_data_ports | def input_data_ports(self, input_data_ports):
"""Property for the _input_data_ports field
See Property.
:param dict input_data_ports: Dictionary that maps :class:`int` data_port_ids onto values of type
:class:`rafcon.core.state_elements.data_port.InputDataPort`
:raises exceptions.TypeError: if the input_data_ports parameter has the wrong type
:raises exceptions.AttributeError: if the key of the input dictionary and the id of the data port do not match
"""
if not isinstance(input_data_ports, dict):
raise TypeError("input_data_ports must be of type dict")
if [port_id for port_id, port in input_data_ports.items() if not port_id == port.data_port_id]:
raise AttributeError("The key of the input dictionary and the id of the data port do not match")
# This is a fix for older state machines, which didn't distinguish between input and output ports
for port_id, port in input_data_ports.items():
if not isinstance(port, InputDataPort):
if isinstance(port, DataPort):
port = InputDataPort(port.name, port.data_type, port.default_value, port.data_port_id)
input_data_ports[port_id] = port
else:
raise TypeError("Elements of input_data_ports must be of type InputDataPort, given: {0}".format(
type(port).__name__))
old_input_data_ports = self._input_data_ports
self._input_data_ports = input_data_ports
for port_id, port in input_data_ports.items():
try:
port.parent = self
except ValueError:
self._input_data_ports = old_input_data_ports
raise
# check that all old_input_data_ports are no more referencing self as there parent
for old_input_data_port in old_input_data_ports.values():
if old_input_data_port not in self._input_data_ports.values() and old_input_data_port.parent is self:
old_input_data_port.parent = None | python | def input_data_ports(self, input_data_ports):
"""Property for the _input_data_ports field
See Property.
:param dict input_data_ports: Dictionary that maps :class:`int` data_port_ids onto values of type
:class:`rafcon.core.state_elements.data_port.InputDataPort`
:raises exceptions.TypeError: if the input_data_ports parameter has the wrong type
:raises exceptions.AttributeError: if the key of the input dictionary and the id of the data port do not match
"""
if not isinstance(input_data_ports, dict):
raise TypeError("input_data_ports must be of type dict")
if [port_id for port_id, port in input_data_ports.items() if not port_id == port.data_port_id]:
raise AttributeError("The key of the input dictionary and the id of the data port do not match")
# This is a fix for older state machines, which didn't distinguish between input and output ports
for port_id, port in input_data_ports.items():
if not isinstance(port, InputDataPort):
if isinstance(port, DataPort):
port = InputDataPort(port.name, port.data_type, port.default_value, port.data_port_id)
input_data_ports[port_id] = port
else:
raise TypeError("Elements of input_data_ports must be of type InputDataPort, given: {0}".format(
type(port).__name__))
old_input_data_ports = self._input_data_ports
self._input_data_ports = input_data_ports
for port_id, port in input_data_ports.items():
try:
port.parent = self
except ValueError:
self._input_data_ports = old_input_data_ports
raise
# check that all old_input_data_ports are no more referencing self as there parent
for old_input_data_port in old_input_data_ports.values():
if old_input_data_port not in self._input_data_ports.values() and old_input_data_port.parent is self:
old_input_data_port.parent = None | ['def', 'input_data_ports', '(', 'self', ',', 'input_data_ports', ')', ':', 'if', 'not', 'isinstance', '(', 'input_data_ports', ',', 'dict', ')', ':', 'raise', 'TypeError', '(', '"input_data_ports must be of type dict"', ')', 'if', '[', 'port_id', 'for', 'port_id', ',', 'port', 'in', 'input_data_ports', '.', 'items', '(', ')', 'if', 'not', 'port_id', '==', 'port', '.', 'data_port_id', ']', ':', 'raise', 'AttributeError', '(', '"The key of the input dictionary and the id of the data port do not match"', ')', "# This is a fix for older state machines, which didn't distinguish between input and output ports", 'for', 'port_id', ',', 'port', 'in', 'input_data_ports', '.', 'items', '(', ')', ':', 'if', 'not', 'isinstance', '(', 'port', ',', 'InputDataPort', ')', ':', 'if', 'isinstance', '(', 'port', ',', 'DataPort', ')', ':', 'port', '=', 'InputDataPort', '(', 'port', '.', 'name', ',', 'port', '.', 'data_type', ',', 'port', '.', 'default_value', ',', 'port', '.', 'data_port_id', ')', 'input_data_ports', '[', 'port_id', ']', '=', 'port', 'else', ':', 'raise', 'TypeError', '(', '"Elements of input_data_ports must be of type InputDataPort, given: {0}"', '.', 'format', '(', 'type', '(', 'port', ')', '.', '__name__', ')', ')', 'old_input_data_ports', '=', 'self', '.', '_input_data_ports', 'self', '.', '_input_data_ports', '=', 'input_data_ports', 'for', 'port_id', ',', 'port', 'in', 'input_data_ports', '.', 'items', '(', ')', ':', 'try', ':', 'port', '.', 'parent', '=', 'self', 'except', 'ValueError', ':', 'self', '.', '_input_data_ports', '=', 'old_input_data_ports', 'raise', '# check that all old_input_data_ports are no more referencing self as there parent', 'for', 'old_input_data_port', 'in', 'old_input_data_ports', '.', 'values', '(', ')', ':', 'if', 'old_input_data_port', 'not', 'in', 'self', '.', '_input_data_ports', '.', 'values', '(', ')', 'and', 'old_input_data_port', '.', 'parent', 'is', 'self', ':', 'old_input_data_port', '.', 'parent', '=', 'None'] | Property for the _input_data_ports field
See Property.
:param dict input_data_ports: Dictionary that maps :class:`int` data_port_ids onto values of type
:class:`rafcon.core.state_elements.data_port.InputDataPort`
:raises exceptions.TypeError: if the input_data_ports parameter has the wrong type
:raises exceptions.AttributeError: if the key of the input dictionary and the id of the data port do not match | ['Property', 'for', 'the', '_input_data_ports', 'field'] | train | https://github.com/DLR-RM/RAFCON/blob/24942ef1a904531f49ab8830a1dbb604441be498/source/rafcon/core/states/state.py#L1052-L1089 |
480 | pycontribs/pyrax | pyrax/cloudloadbalancers.py | CloudLoadBalancerManager.delete_node | def delete_node(self, loadbalancer, node):
"""Removes the node from its load balancer."""
lb = node.parent
if not lb:
raise exc.UnattachedNode("No parent Load Balancer for this node "
"could be determined.")
resp, body = self.api.method_delete("/loadbalancers/%s/nodes/%s" %
(lb.id, node.id))
return resp, body | python | def delete_node(self, loadbalancer, node):
"""Removes the node from its load balancer."""
lb = node.parent
if not lb:
raise exc.UnattachedNode("No parent Load Balancer for this node "
"could be determined.")
resp, body = self.api.method_delete("/loadbalancers/%s/nodes/%s" %
(lb.id, node.id))
return resp, body | ['def', 'delete_node', '(', 'self', ',', 'loadbalancer', ',', 'node', ')', ':', 'lb', '=', 'node', '.', 'parent', 'if', 'not', 'lb', ':', 'raise', 'exc', '.', 'UnattachedNode', '(', '"No parent Load Balancer for this node "', '"could be determined."', ')', 'resp', ',', 'body', '=', 'self', '.', 'api', '.', 'method_delete', '(', '"/loadbalancers/%s/nodes/%s"', '%', '(', 'lb', '.', 'id', ',', 'node', '.', 'id', ')', ')', 'return', 'resp', ',', 'body'] | Removes the node from its load balancer. | ['Removes', 'the', 'node', 'from', 'its', 'load', 'balancer', '.'] | train | https://github.com/pycontribs/pyrax/blob/9ddfd5064b3a292d7337906f3b2d5dce95b50b99/pyrax/cloudloadbalancers.py#L532-L540 |
481 | dfm/george | george/modeling.py | Model.parameter_vector | def parameter_vector(self):
"""An array of all parameters (including frozen parameters)"""
return np.array([getattr(self, k) for k in self.parameter_names]) | python | def parameter_vector(self):
"""An array of all parameters (including frozen parameters)"""
return np.array([getattr(self, k) for k in self.parameter_names]) | ['def', 'parameter_vector', '(', 'self', ')', ':', 'return', 'np', '.', 'array', '(', '[', 'getattr', '(', 'self', ',', 'k', ')', 'for', 'k', 'in', 'self', '.', 'parameter_names', ']', ')'] | An array of all parameters (including frozen parameters) | ['An', 'array', 'of', 'all', 'parameters', '(', 'including', 'frozen', 'parameters', ')'] | train | https://github.com/dfm/george/blob/44819680036387625ee89f81c55104f3c1600759/george/modeling.py#L164-L166 |
482 | ga4gh/ga4gh-server | ga4gh/server/gff3.py | Gff3Parser._open | def _open(self):
"""
open input file, optionally with decompression
"""
if self.fileName.endswith(".gz"):
return gzip.open(self.fileName)
elif self.fileName.endswith(".bz2"):
return bz2.BZ2File(self.fileName)
else:
return open(self.fileName) | python | def _open(self):
"""
open input file, optionally with decompression
"""
if self.fileName.endswith(".gz"):
return gzip.open(self.fileName)
elif self.fileName.endswith(".bz2"):
return bz2.BZ2File(self.fileName)
else:
return open(self.fileName) | ['def', '_open', '(', 'self', ')', ':', 'if', 'self', '.', 'fileName', '.', 'endswith', '(', '".gz"', ')', ':', 'return', 'gzip', '.', 'open', '(', 'self', '.', 'fileName', ')', 'elif', 'self', '.', 'fileName', '.', 'endswith', '(', '".bz2"', ')', ':', 'return', 'bz2', '.', 'BZ2File', '(', 'self', '.', 'fileName', ')', 'else', ':', 'return', 'open', '(', 'self', '.', 'fileName', ')'] | open input file, optionally with decompression | ['open', 'input', 'file', 'optionally', 'with', 'decompression'] | train | https://github.com/ga4gh/ga4gh-server/blob/1aa18922ef136db8604f6f098cb1732cba6f2a76/ga4gh/server/gff3.py#L240-L249 |
483 | oauthlib/oauthlib | oauthlib/common.py | extract_params | def extract_params(raw):
"""Extract parameters and return them as a list of 2-tuples.
Will successfully extract parameters from urlencoded query strings,
dicts, or lists of 2-tuples. Empty strings/dicts/lists will return an
empty list of parameters. Any other input will result in a return
value of None.
"""
if isinstance(raw, (bytes, unicode_type)):
try:
params = urldecode(raw)
except ValueError:
params = None
elif hasattr(raw, '__iter__'):
try:
dict(raw)
except ValueError:
params = None
except TypeError:
params = None
else:
params = list(raw.items() if isinstance(raw, dict) else raw)
params = decode_params_utf8(params)
else:
params = None
return params | python | def extract_params(raw):
"""Extract parameters and return them as a list of 2-tuples.
Will successfully extract parameters from urlencoded query strings,
dicts, or lists of 2-tuples. Empty strings/dicts/lists will return an
empty list of parameters. Any other input will result in a return
value of None.
"""
if isinstance(raw, (bytes, unicode_type)):
try:
params = urldecode(raw)
except ValueError:
params = None
elif hasattr(raw, '__iter__'):
try:
dict(raw)
except ValueError:
params = None
except TypeError:
params = None
else:
params = list(raw.items() if isinstance(raw, dict) else raw)
params = decode_params_utf8(params)
else:
params = None
return params | ['def', 'extract_params', '(', 'raw', ')', ':', 'if', 'isinstance', '(', 'raw', ',', '(', 'bytes', ',', 'unicode_type', ')', ')', ':', 'try', ':', 'params', '=', 'urldecode', '(', 'raw', ')', 'except', 'ValueError', ':', 'params', '=', 'None', 'elif', 'hasattr', '(', 'raw', ',', "'__iter__'", ')', ':', 'try', ':', 'dict', '(', 'raw', ')', 'except', 'ValueError', ':', 'params', '=', 'None', 'except', 'TypeError', ':', 'params', '=', 'None', 'else', ':', 'params', '=', 'list', '(', 'raw', '.', 'items', '(', ')', 'if', 'isinstance', '(', 'raw', ',', 'dict', ')', 'else', 'raw', ')', 'params', '=', 'decode_params_utf8', '(', 'params', ')', 'else', ':', 'params', '=', 'None', 'return', 'params'] | Extract parameters and return them as a list of 2-tuples.
Will successfully extract parameters from urlencoded query strings,
dicts, or lists of 2-tuples. Empty strings/dicts/lists will return an
empty list of parameters. Any other input will result in a return
value of None. | ['Extract', 'parameters', 'and', 'return', 'them', 'as', 'a', 'list', 'of', '2', '-', 'tuples', '.'] | train | https://github.com/oauthlib/oauthlib/blob/30321dd3c0ca784d3508a1970cf90d9f76835c79/oauthlib/common.py#L168-L194 |
484 | saltstack/salt | salt/client/ssh/wrapper/state.py | low | def low(data, **kwargs):
'''
Execute a single low data call
This function is mostly intended for testing the state system
CLI Example:
.. code-block:: bash
salt '*' state.low '{"state": "pkg", "fun": "installed", "name": "vi"}'
'''
st_kwargs = __salt__.kwargs
__opts__['grains'] = __grains__
chunks = [data]
st_ = salt.client.ssh.state.SSHHighState(
__opts__,
__pillar__,
__salt__,
__context__['fileclient'])
for chunk in chunks:
chunk['__id__'] = chunk['name'] if not chunk.get('__id__') else chunk['__id__']
err = st_.state.verify_data(data)
if err:
return err
file_refs = salt.client.ssh.state.lowstate_file_refs(
chunks,
_merge_extra_filerefs(
kwargs.get('extra_filerefs', ''),
__opts__.get('extra_filerefs', '')
)
)
roster = salt.roster.Roster(__opts__, __opts__.get('roster', 'flat'))
roster_grains = roster.opts['grains']
# Create the tar containing the state pkg and relevant files.
trans_tar = salt.client.ssh.state.prep_trans_tar(
__context__['fileclient'],
chunks,
file_refs,
__pillar__,
st_kwargs['id_'],
roster_grains)
trans_tar_sum = salt.utils.hashutils.get_hash(trans_tar, __opts__['hash_type'])
cmd = 'state.pkg {0}/salt_state.tgz pkg_sum={1} hash_type={2}'.format(
__opts__['thin_dir'],
trans_tar_sum,
__opts__['hash_type'])
single = salt.client.ssh.Single(
__opts__,
cmd,
fsclient=__context__['fileclient'],
minion_opts=__salt__.minion_opts,
**st_kwargs)
single.shell.send(
trans_tar,
'{0}/salt_state.tgz'.format(__opts__['thin_dir']))
stdout, stderr, _ = single.cmd_block()
# Clean up our tar
try:
os.remove(trans_tar)
except (OSError, IOError):
pass
# Read in the JSON data and return the data structure
try:
return salt.utils.json.loads(stdout)
except Exception as e:
log.error("JSON Render failed for: %s\n%s", stdout, stderr)
log.error(six.text_type(e))
# If for some reason the json load fails, return the stdout
return stdout | python | def low(data, **kwargs):
'''
Execute a single low data call
This function is mostly intended for testing the state system
CLI Example:
.. code-block:: bash
salt '*' state.low '{"state": "pkg", "fun": "installed", "name": "vi"}'
'''
st_kwargs = __salt__.kwargs
__opts__['grains'] = __grains__
chunks = [data]
st_ = salt.client.ssh.state.SSHHighState(
__opts__,
__pillar__,
__salt__,
__context__['fileclient'])
for chunk in chunks:
chunk['__id__'] = chunk['name'] if not chunk.get('__id__') else chunk['__id__']
err = st_.state.verify_data(data)
if err:
return err
file_refs = salt.client.ssh.state.lowstate_file_refs(
chunks,
_merge_extra_filerefs(
kwargs.get('extra_filerefs', ''),
__opts__.get('extra_filerefs', '')
)
)
roster = salt.roster.Roster(__opts__, __opts__.get('roster', 'flat'))
roster_grains = roster.opts['grains']
# Create the tar containing the state pkg and relevant files.
trans_tar = salt.client.ssh.state.prep_trans_tar(
__context__['fileclient'],
chunks,
file_refs,
__pillar__,
st_kwargs['id_'],
roster_grains)
trans_tar_sum = salt.utils.hashutils.get_hash(trans_tar, __opts__['hash_type'])
cmd = 'state.pkg {0}/salt_state.tgz pkg_sum={1} hash_type={2}'.format(
__opts__['thin_dir'],
trans_tar_sum,
__opts__['hash_type'])
single = salt.client.ssh.Single(
__opts__,
cmd,
fsclient=__context__['fileclient'],
minion_opts=__salt__.minion_opts,
**st_kwargs)
single.shell.send(
trans_tar,
'{0}/salt_state.tgz'.format(__opts__['thin_dir']))
stdout, stderr, _ = single.cmd_block()
# Clean up our tar
try:
os.remove(trans_tar)
except (OSError, IOError):
pass
# Read in the JSON data and return the data structure
try:
return salt.utils.json.loads(stdout)
except Exception as e:
log.error("JSON Render failed for: %s\n%s", stdout, stderr)
log.error(six.text_type(e))
# If for some reason the json load fails, return the stdout
return stdout | ['def', 'low', '(', 'data', ',', '*', '*', 'kwargs', ')', ':', 'st_kwargs', '=', '__salt__', '.', 'kwargs', '__opts__', '[', "'grains'", ']', '=', '__grains__', 'chunks', '=', '[', 'data', ']', 'st_', '=', 'salt', '.', 'client', '.', 'ssh', '.', 'state', '.', 'SSHHighState', '(', '__opts__', ',', '__pillar__', ',', '__salt__', ',', '__context__', '[', "'fileclient'", ']', ')', 'for', 'chunk', 'in', 'chunks', ':', 'chunk', '[', "'__id__'", ']', '=', 'chunk', '[', "'name'", ']', 'if', 'not', 'chunk', '.', 'get', '(', "'__id__'", ')', 'else', 'chunk', '[', "'__id__'", ']', 'err', '=', 'st_', '.', 'state', '.', 'verify_data', '(', 'data', ')', 'if', 'err', ':', 'return', 'err', 'file_refs', '=', 'salt', '.', 'client', '.', 'ssh', '.', 'state', '.', 'lowstate_file_refs', '(', 'chunks', ',', '_merge_extra_filerefs', '(', 'kwargs', '.', 'get', '(', "'extra_filerefs'", ',', "''", ')', ',', '__opts__', '.', 'get', '(', "'extra_filerefs'", ',', "''", ')', ')', ')', 'roster', '=', 'salt', '.', 'roster', '.', 'Roster', '(', '__opts__', ',', '__opts__', '.', 'get', '(', "'roster'", ',', "'flat'", ')', ')', 'roster_grains', '=', 'roster', '.', 'opts', '[', "'grains'", ']', '# Create the tar containing the state pkg and relevant files.', 'trans_tar', '=', 'salt', '.', 'client', '.', 'ssh', '.', 'state', '.', 'prep_trans_tar', '(', '__context__', '[', "'fileclient'", ']', ',', 'chunks', ',', 'file_refs', ',', '__pillar__', ',', 'st_kwargs', '[', "'id_'", ']', ',', 'roster_grains', ')', 'trans_tar_sum', '=', 'salt', '.', 'utils', '.', 'hashutils', '.', 'get_hash', '(', 'trans_tar', ',', '__opts__', '[', "'hash_type'", ']', ')', 'cmd', '=', "'state.pkg {0}/salt_state.tgz pkg_sum={1} hash_type={2}'", '.', 'format', '(', '__opts__', '[', "'thin_dir'", ']', ',', 'trans_tar_sum', ',', '__opts__', '[', "'hash_type'", ']', ')', 'single', '=', 'salt', '.', 'client', '.', 'ssh', '.', 'Single', '(', '__opts__', ',', 'cmd', ',', 'fsclient', '=', '__context__', '[', "'fileclient'", ']', ',', 'minion_opts', '=', '__salt__', '.', 'minion_opts', ',', '*', '*', 'st_kwargs', ')', 'single', '.', 'shell', '.', 'send', '(', 'trans_tar', ',', "'{0}/salt_state.tgz'", '.', 'format', '(', '__opts__', '[', "'thin_dir'", ']', ')', ')', 'stdout', ',', 'stderr', ',', '_', '=', 'single', '.', 'cmd_block', '(', ')', '# Clean up our tar', 'try', ':', 'os', '.', 'remove', '(', 'trans_tar', ')', 'except', '(', 'OSError', ',', 'IOError', ')', ':', 'pass', '# Read in the JSON data and return the data structure', 'try', ':', 'return', 'salt', '.', 'utils', '.', 'json', '.', 'loads', '(', 'stdout', ')', 'except', 'Exception', 'as', 'e', ':', 'log', '.', 'error', '(', '"JSON Render failed for: %s\\n%s"', ',', 'stdout', ',', 'stderr', ')', 'log', '.', 'error', '(', 'six', '.', 'text_type', '(', 'e', ')', ')', '# If for some reason the json load fails, return the stdout', 'return', 'stdout'] | Execute a single low data call
This function is mostly intended for testing the state system
CLI Example:
.. code-block:: bash
salt '*' state.low '{"state": "pkg", "fun": "installed", "name": "vi"}' | ['Execute', 'a', 'single', 'low', 'data', 'call', 'This', 'function', 'is', 'mostly', 'intended', 'for', 'testing', 'the', 'state', 'system'] | train | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/client/ssh/wrapper/state.py#L338-L410 |
485 | praekeltfoundation/seed-message-sender | message_sender/tasks.py | fire_metric | def fire_metric(metric_name, metric_value):
""" Fires a metric using the MetricsApiClient
"""
metric_value = float(metric_value)
metric = {metric_name: metric_value}
metric_client.fire_metrics(**metric)
return "Fired metric <{}> with value <{}>".format(metric_name, metric_value) | python | def fire_metric(metric_name, metric_value):
""" Fires a metric using the MetricsApiClient
"""
metric_value = float(metric_value)
metric = {metric_name: metric_value}
metric_client.fire_metrics(**metric)
return "Fired metric <{}> with value <{}>".format(metric_name, metric_value) | ['def', 'fire_metric', '(', 'metric_name', ',', 'metric_value', ')', ':', 'metric_value', '=', 'float', '(', 'metric_value', ')', 'metric', '=', '{', 'metric_name', ':', 'metric_value', '}', 'metric_client', '.', 'fire_metrics', '(', '*', '*', 'metric', ')', 'return', '"Fired metric <{}> with value <{}>"', '.', 'format', '(', 'metric_name', ',', 'metric_value', ')'] | Fires a metric using the MetricsApiClient | ['Fires', 'a', 'metric', 'using', 'the', 'MetricsApiClient'] | train | https://github.com/praekeltfoundation/seed-message-sender/blob/257b01635171b9dbe1f5f13baa810c971bb2620e/message_sender/tasks.py#L118-L124 |
486 | bokeh/bokeh | bokeh/core/property/descriptors.py | BasicPropertyDescriptor._get | def _get(self, obj):
''' Internal implementation of instance attribute access for the
``BasicPropertyDescriptor`` getter.
If the value has not been explicitly set by a user, return that
value. Otherwise, return the default.
Args:
obj (HasProps) : the instance to get a value of this property for
Returns:
object
Raises:
RuntimeError
If the |HasProps| instance has not yet been initialized, or if
this descriptor is on a class that is not a |HasProps|.
'''
if not hasattr(obj, '_property_values'):
raise RuntimeError("Cannot get a property value '%s' from a %s instance before HasProps.__init__" %
(self.name, obj.__class__.__name__))
if self.name not in obj._property_values:
return self._get_default(obj)
else:
return obj._property_values[self.name] | python | def _get(self, obj):
''' Internal implementation of instance attribute access for the
``BasicPropertyDescriptor`` getter.
If the value has not been explicitly set by a user, return that
value. Otherwise, return the default.
Args:
obj (HasProps) : the instance to get a value of this property for
Returns:
object
Raises:
RuntimeError
If the |HasProps| instance has not yet been initialized, or if
this descriptor is on a class that is not a |HasProps|.
'''
if not hasattr(obj, '_property_values'):
raise RuntimeError("Cannot get a property value '%s' from a %s instance before HasProps.__init__" %
(self.name, obj.__class__.__name__))
if self.name not in obj._property_values:
return self._get_default(obj)
else:
return obj._property_values[self.name] | ['def', '_get', '(', 'self', ',', 'obj', ')', ':', 'if', 'not', 'hasattr', '(', 'obj', ',', "'_property_values'", ')', ':', 'raise', 'RuntimeError', '(', '"Cannot get a property value \'%s\' from a %s instance before HasProps.__init__"', '%', '(', 'self', '.', 'name', ',', 'obj', '.', '__class__', '.', '__name__', ')', ')', 'if', 'self', '.', 'name', 'not', 'in', 'obj', '.', '_property_values', ':', 'return', 'self', '.', '_get_default', '(', 'obj', ')', 'else', ':', 'return', 'obj', '.', '_property_values', '[', 'self', '.', 'name', ']'] | Internal implementation of instance attribute access for the
``BasicPropertyDescriptor`` getter.
If the value has not been explicitly set by a user, return that
value. Otherwise, return the default.
Args:
obj (HasProps) : the instance to get a value of this property for
Returns:
object
Raises:
RuntimeError
If the |HasProps| instance has not yet been initialized, or if
this descriptor is on a class that is not a |HasProps|. | ['Internal', 'implementation', 'of', 'instance', 'attribute', 'access', 'for', 'the', 'BasicPropertyDescriptor', 'getter', '.'] | train | https://github.com/bokeh/bokeh/blob/dc8cf49e4e4302fd38537ad089ece81fbcca4737/bokeh/core/property/descriptors.py#L671-L697 |
487 | juju/python-libjuju | juju/controller.py | Controller.get_models | async def get_models(self, all_=False, username=None):
"""
.. deprecated:: 0.7.0
Use :meth:`.list_models` instead.
"""
controller_facade = client.ControllerFacade.from_connection(
self.connection())
for attempt in (1, 2, 3):
try:
return await controller_facade.AllModels()
except errors.JujuAPIError as e:
# retry concurrency error until resolved in Juju
# see: https://bugs.launchpad.net/juju/+bug/1721786
if 'has been removed' not in e.message or attempt == 3:
raise | python | async def get_models(self, all_=False, username=None):
"""
.. deprecated:: 0.7.0
Use :meth:`.list_models` instead.
"""
controller_facade = client.ControllerFacade.from_connection(
self.connection())
for attempt in (1, 2, 3):
try:
return await controller_facade.AllModels()
except errors.JujuAPIError as e:
# retry concurrency error until resolved in Juju
# see: https://bugs.launchpad.net/juju/+bug/1721786
if 'has been removed' not in e.message or attempt == 3:
raise | ['async', 'def', 'get_models', '(', 'self', ',', 'all_', '=', 'False', ',', 'username', '=', 'None', ')', ':', 'controller_facade', '=', 'client', '.', 'ControllerFacade', '.', 'from_connection', '(', 'self', '.', 'connection', '(', ')', ')', 'for', 'attempt', 'in', '(', '1', ',', '2', ',', '3', ')', ':', 'try', ':', 'return', 'await', 'controller_facade', '.', 'AllModels', '(', ')', 'except', 'errors', '.', 'JujuAPIError', 'as', 'e', ':', '# retry concurrency error until resolved in Juju', '# see: https://bugs.launchpad.net/juju/+bug/1721786', 'if', "'has been removed'", 'not', 'in', 'e', '.', 'message', 'or', 'attempt', '==', '3', ':', 'raise'] | .. deprecated:: 0.7.0
Use :meth:`.list_models` instead. | ['..', 'deprecated', '::', '0', '.', '7', '.', '0', 'Use', ':', 'meth', ':', '.', 'list_models', 'instead', '.'] | train | https://github.com/juju/python-libjuju/blob/58f0011f4c57cd68830258952fa952eaadca6b38/juju/controller.py#L435-L449 |
488 | jasonrbriggs/stomp.py | stomp/transport.py | Transport.receive | def receive(self):
"""
:rtype: bytes
"""
try:
return self.socket.recv(self.__recv_bytes)
except socket.error:
_, e, _ = sys.exc_info()
if get_errno(e) in (errno.EAGAIN, errno.EINTR):
log.debug("socket read interrupted, restarting")
raise exception.InterruptedException()
if self.is_connected():
raise | python | def receive(self):
"""
:rtype: bytes
"""
try:
return self.socket.recv(self.__recv_bytes)
except socket.error:
_, e, _ = sys.exc_info()
if get_errno(e) in (errno.EAGAIN, errno.EINTR):
log.debug("socket read interrupted, restarting")
raise exception.InterruptedException()
if self.is_connected():
raise | ['def', 'receive', '(', 'self', ')', ':', 'try', ':', 'return', 'self', '.', 'socket', '.', 'recv', '(', 'self', '.', '__recv_bytes', ')', 'except', 'socket', '.', 'error', ':', '_', ',', 'e', ',', '_', '=', 'sys', '.', 'exc_info', '(', ')', 'if', 'get_errno', '(', 'e', ')', 'in', '(', 'errno', '.', 'EAGAIN', ',', 'errno', '.', 'EINTR', ')', ':', 'log', '.', 'debug', '(', '"socket read interrupted, restarting"', ')', 'raise', 'exception', '.', 'InterruptedException', '(', ')', 'if', 'self', '.', 'is_connected', '(', ')', ':', 'raise'] | :rtype: bytes | [':', 'rtype', ':', 'bytes'] | train | https://github.com/jasonrbriggs/stomp.py/blob/643843c5fbf25fd24339dd0e69a9411c3d8b94c7/stomp/transport.py#L645-L657 |
489 | eugene-eeo/piggyback | piggyback/loader.py | import_module | def import_module(path):
"""
Import a module given a dotted *path* in the
form of ``.name(.name)*``, and returns the
last module (unlike ``__import__`` which just
returns the first module).
:param path: The dotted path to the module.
"""
mod = __import__(path, locals={}, globals={})
for item in path.split('.')[1:]:
try:
mod = getattr(mod, item)
except AttributeError:
raise ImportError('No module named %s' % path)
return mod | python | def import_module(path):
"""
Import a module given a dotted *path* in the
form of ``.name(.name)*``, and returns the
last module (unlike ``__import__`` which just
returns the first module).
:param path: The dotted path to the module.
"""
mod = __import__(path, locals={}, globals={})
for item in path.split('.')[1:]:
try:
mod = getattr(mod, item)
except AttributeError:
raise ImportError('No module named %s' % path)
return mod | ['def', 'import_module', '(', 'path', ')', ':', 'mod', '=', '__import__', '(', 'path', ',', 'locals', '=', '{', '}', ',', 'globals', '=', '{', '}', ')', 'for', 'item', 'in', 'path', '.', 'split', '(', "'.'", ')', '[', '1', ':', ']', ':', 'try', ':', 'mod', '=', 'getattr', '(', 'mod', ',', 'item', ')', 'except', 'AttributeError', ':', 'raise', 'ImportError', '(', "'No module named %s'", '%', 'path', ')', 'return', 'mod'] | Import a module given a dotted *path* in the
form of ``.name(.name)*``, and returns the
last module (unlike ``__import__`` which just
returns the first module).
:param path: The dotted path to the module. | ['Import', 'a', 'module', 'given', 'a', 'dotted', '*', 'path', '*', 'in', 'the', 'form', 'of', '.', 'name', '(', '.', 'name', ')', '*', 'and', 'returns', 'the', 'last', 'module', '(', 'unlike', '__import__', 'which', 'just', 'returns', 'the', 'first', 'module', ')', '.'] | train | https://github.com/eugene-eeo/piggyback/blob/0e5efe40a37aa0373d860b8dabab1282bf074270/piggyback/loader.py#L34-L49 |
490 | opereto/pyopereto | pyopereto/client.py | OperetoClient.modify_agent | def modify_agent(self, agent_id, **kwargs):
'''
modify_agent(self, agent_id, **kwargs)
| Modifies agent information (like name)
:Parameters:
* *agent_id* (`string`) -- Identifier of an existing agent
:Example:
.. code-block:: python
opereto_client = OperetoClient()
opereto_client.modify_agent('agentId', name='my new name')
'''
request_data = {'id': agent_id}
request_data.update(**kwargs)
return self._call_rest_api('post', '/agents'+'', data=request_data, error='Failed to modify agent [%s]'%agent_id) | python | def modify_agent(self, agent_id, **kwargs):
'''
modify_agent(self, agent_id, **kwargs)
| Modifies agent information (like name)
:Parameters:
* *agent_id* (`string`) -- Identifier of an existing agent
:Example:
.. code-block:: python
opereto_client = OperetoClient()
opereto_client.modify_agent('agentId', name='my new name')
'''
request_data = {'id': agent_id}
request_data.update(**kwargs)
return self._call_rest_api('post', '/agents'+'', data=request_data, error='Failed to modify agent [%s]'%agent_id) | ['def', 'modify_agent', '(', 'self', ',', 'agent_id', ',', '*', '*', 'kwargs', ')', ':', 'request_data', '=', '{', "'id'", ':', 'agent_id', '}', 'request_data', '.', 'update', '(', '*', '*', 'kwargs', ')', 'return', 'self', '.', '_call_rest_api', '(', "'post'", ',', "'/agents'", '+', "''", ',', 'data', '=', 'request_data', ',', 'error', '=', "'Failed to modify agent [%s]'", '%', 'agent_id', ')'] | modify_agent(self, agent_id, **kwargs)
| Modifies agent information (like name)
:Parameters:
* *agent_id* (`string`) -- Identifier of an existing agent
:Example:
.. code-block:: python
opereto_client = OperetoClient()
opereto_client.modify_agent('agentId', name='my new name') | ['modify_agent', '(', 'self', 'agent_id', '**', 'kwargs', ')'] | train | https://github.com/opereto/pyopereto/blob/16ca987738a7e1b82b52b0b099794a74ed557223/pyopereto/client.py#L856-L874 |
491 | inasafe/inasafe | safe/gis/generic_expressions.py | beautify_date | def beautify_date(inasafe_time, feature, parent):
"""Given an InaSAFE analysis time, it will convert it to a date with
year-month-date format.
For instance:
* beautify_date( @start_datetime ) -> will convert datetime provided by
qgis_variable.
"""
_ = feature, parent # NOQA
datetime_object = parse(inasafe_time)
date = datetime_object.strftime('%Y-%m-%d')
return date | python | def beautify_date(inasafe_time, feature, parent):
"""Given an InaSAFE analysis time, it will convert it to a date with
year-month-date format.
For instance:
* beautify_date( @start_datetime ) -> will convert datetime provided by
qgis_variable.
"""
_ = feature, parent # NOQA
datetime_object = parse(inasafe_time)
date = datetime_object.strftime('%Y-%m-%d')
return date | ['def', 'beautify_date', '(', 'inasafe_time', ',', 'feature', ',', 'parent', ')', ':', '_', '=', 'feature', ',', 'parent', '# NOQA', 'datetime_object', '=', 'parse', '(', 'inasafe_time', ')', 'date', '=', 'datetime_object', '.', 'strftime', '(', "'%Y-%m-%d'", ')', 'return', 'date'] | Given an InaSAFE analysis time, it will convert it to a date with
year-month-date format.
For instance:
* beautify_date( @start_datetime ) -> will convert datetime provided by
qgis_variable. | ['Given', 'an', 'InaSAFE', 'analysis', 'time', 'it', 'will', 'convert', 'it', 'to', 'a', 'date', 'with', 'year', '-', 'month', '-', 'date', 'format', '.'] | train | https://github.com/inasafe/inasafe/blob/831d60abba919f6d481dc94a8d988cc205130724/safe/gis/generic_expressions.py#L279-L290 |
492 | lehins/django-smartfields | smartfields/models.py | SmartfieldsModelMixin.smartfields_get_field_status | def smartfields_get_field_status(self, field_name):
"""A way to find out a status of a filed."""
manager = self._smartfields_managers.get(field_name, None)
if manager is not None:
return manager.get_status(self)
return {'state': 'ready'} | python | def smartfields_get_field_status(self, field_name):
"""A way to find out a status of a filed."""
manager = self._smartfields_managers.get(field_name, None)
if manager is not None:
return manager.get_status(self)
return {'state': 'ready'} | ['def', 'smartfields_get_field_status', '(', 'self', ',', 'field_name', ')', ':', 'manager', '=', 'self', '.', '_smartfields_managers', '.', 'get', '(', 'field_name', ',', 'None', ')', 'if', 'manager', 'is', 'not', 'None', ':', 'return', 'manager', '.', 'get_status', '(', 'self', ')', 'return', '{', "'state'", ':', "'ready'", '}'] | A way to find out a status of a filed. | ['A', 'way', 'to', 'find', 'out', 'a', 'status', 'of', 'a', 'filed', '.'] | train | https://github.com/lehins/django-smartfields/blob/23d4b0b18352f4f40ce8c429735e673ba5191502/smartfields/models.py#L56-L61 |
493 | MycroftAI/adapt | adapt/engine.py | DomainIntentDeterminationEngine._regex_strings | def _regex_strings(self):
"""
A property to link into IntentEngine's _regex_strings.
Warning: this is only for backwards compatiblility and should not be used if you
intend on using domains.
Returns: the domains _regex_strings from its IntentEngine
"""
domain = 0
if domain not in self.domains:
self.register_domain(domain=domain)
return self.domains[domain]._regex_strings | python | def _regex_strings(self):
"""
A property to link into IntentEngine's _regex_strings.
Warning: this is only for backwards compatiblility and should not be used if you
intend on using domains.
Returns: the domains _regex_strings from its IntentEngine
"""
domain = 0
if domain not in self.domains:
self.register_domain(domain=domain)
return self.domains[domain]._regex_strings | ['def', '_regex_strings', '(', 'self', ')', ':', 'domain', '=', '0', 'if', 'domain', 'not', 'in', 'self', '.', 'domains', ':', 'self', '.', 'register_domain', '(', 'domain', '=', 'domain', ')', 'return', 'self', '.', 'domains', '[', 'domain', ']', '.', '_regex_strings'] | A property to link into IntentEngine's _regex_strings.
Warning: this is only for backwards compatiblility and should not be used if you
intend on using domains.
Returns: the domains _regex_strings from its IntentEngine | ['A', 'property', 'to', 'link', 'into', 'IntentEngine', 's', '_regex_strings', '.'] | train | https://github.com/MycroftAI/adapt/blob/334f23248b8e09fb9d84a88398424ec5bd3bae4c/adapt/engine.py#L258-L270 |
494 | tensorflow/cleverhans | scripts/make_confidence_report_bundled.py | main | def main(argv=None):
"""
Make a confidence report and save it to disk.
"""
try:
_name_of_script, filepath = argv
except ValueError:
raise ValueError(argv)
print(filepath)
make_confidence_report_bundled(filepath=filepath,
test_start=FLAGS.test_start,
test_end=FLAGS.test_end,
which_set=FLAGS.which_set,
recipe=FLAGS.recipe,
report_path=FLAGS.report_path, batch_size=FLAGS.batch_size) | python | def main(argv=None):
"""
Make a confidence report and save it to disk.
"""
try:
_name_of_script, filepath = argv
except ValueError:
raise ValueError(argv)
print(filepath)
make_confidence_report_bundled(filepath=filepath,
test_start=FLAGS.test_start,
test_end=FLAGS.test_end,
which_set=FLAGS.which_set,
recipe=FLAGS.recipe,
report_path=FLAGS.report_path, batch_size=FLAGS.batch_size) | ['def', 'main', '(', 'argv', '=', 'None', ')', ':', 'try', ':', '_name_of_script', ',', 'filepath', '=', 'argv', 'except', 'ValueError', ':', 'raise', 'ValueError', '(', 'argv', ')', 'print', '(', 'filepath', ')', 'make_confidence_report_bundled', '(', 'filepath', '=', 'filepath', ',', 'test_start', '=', 'FLAGS', '.', 'test_start', ',', 'test_end', '=', 'FLAGS', '.', 'test_end', ',', 'which_set', '=', 'FLAGS', '.', 'which_set', ',', 'recipe', '=', 'FLAGS', '.', 'recipe', ',', 'report_path', '=', 'FLAGS', '.', 'report_path', ',', 'batch_size', '=', 'FLAGS', '.', 'batch_size', ')'] | Make a confidence report and save it to disk. | ['Make', 'a', 'confidence', 'report', 'and', 'save', 'it', 'to', 'disk', '.'] | train | https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/scripts/make_confidence_report_bundled.py#L42-L56 |
495 | blockstack/blockstack-core | blockstack/lib/client.py | get_atlas_peers | def get_atlas_peers(hostport, timeout=30, my_hostport=None, proxy=None):
"""
Get an atlas peer's neighbors.
Return {'status': True, 'peers': [peers]} on success.
Return {'error': ...} on error
"""
assert hostport or proxy, 'need either hostport or proxy'
peers_schema = {
'type': 'object',
'properties': {
'peers': {
'type': 'array',
'items': {
'type': 'string',
'pattern': '^([^:]+):([1-9][0-9]{1,4})$',
},
},
},
'required': [
'peers'
],
}
schema = json_response_schema( peers_schema )
if proxy is None:
proxy = connect_hostport(hostport)
peers = None
try:
peer_list_resp = proxy.get_atlas_peers()
peer_list_resp = json_validate(schema, peer_list_resp)
if json_is_error(peer_list_resp):
return peer_list_resp
# verify that all strings are host:ports
for peer_hostport in peer_list_resp['peers']:
peer_host, peer_port = url_to_host_port(peer_hostport)
if peer_host is None or peer_port is None:
return {'error': 'Server did not return valid Atlas peers', 'http_status': 503}
peers = peer_list_resp
except ValidationError as ve:
if BLOCKSTACK_DEBUG:
log.exception(ve)
resp = {'error': 'Server response did not match expected schema. You are likely communicating with an out-of-date Blockstack node.', 'http_status': 502}
return resp
except socket.timeout:
log.error("Connection timed out")
resp = {'error': 'Connection to remote host timed out.', 'http_status': 503}
return resp
except socket.error as se:
log.error("Connection error {}".format(se.errno))
resp = {'error': 'Connection to remote host failed.', 'http_status': 502}
return resp
except Exception as ee:
if BLOCKSTACK_DEBUG:
log.exception(ee)
log.error("Caught exception while connecting to Blockstack node: {}".format(ee))
resp = {'error': 'Failed to contact Blockstack node {}. Try again with `--debug`.'.format(hostport), 'http_status': 500}
return resp
return peers | python | def get_atlas_peers(hostport, timeout=30, my_hostport=None, proxy=None):
"""
Get an atlas peer's neighbors.
Return {'status': True, 'peers': [peers]} on success.
Return {'error': ...} on error
"""
assert hostport or proxy, 'need either hostport or proxy'
peers_schema = {
'type': 'object',
'properties': {
'peers': {
'type': 'array',
'items': {
'type': 'string',
'pattern': '^([^:]+):([1-9][0-9]{1,4})$',
},
},
},
'required': [
'peers'
],
}
schema = json_response_schema( peers_schema )
if proxy is None:
proxy = connect_hostport(hostport)
peers = None
try:
peer_list_resp = proxy.get_atlas_peers()
peer_list_resp = json_validate(schema, peer_list_resp)
if json_is_error(peer_list_resp):
return peer_list_resp
# verify that all strings are host:ports
for peer_hostport in peer_list_resp['peers']:
peer_host, peer_port = url_to_host_port(peer_hostport)
if peer_host is None or peer_port is None:
return {'error': 'Server did not return valid Atlas peers', 'http_status': 503}
peers = peer_list_resp
except ValidationError as ve:
if BLOCKSTACK_DEBUG:
log.exception(ve)
resp = {'error': 'Server response did not match expected schema. You are likely communicating with an out-of-date Blockstack node.', 'http_status': 502}
return resp
except socket.timeout:
log.error("Connection timed out")
resp = {'error': 'Connection to remote host timed out.', 'http_status': 503}
return resp
except socket.error as se:
log.error("Connection error {}".format(se.errno))
resp = {'error': 'Connection to remote host failed.', 'http_status': 502}
return resp
except Exception as ee:
if BLOCKSTACK_DEBUG:
log.exception(ee)
log.error("Caught exception while connecting to Blockstack node: {}".format(ee))
resp = {'error': 'Failed to contact Blockstack node {}. Try again with `--debug`.'.format(hostport), 'http_status': 500}
return resp
return peers | ['def', 'get_atlas_peers', '(', 'hostport', ',', 'timeout', '=', '30', ',', 'my_hostport', '=', 'None', ',', 'proxy', '=', 'None', ')', ':', 'assert', 'hostport', 'or', 'proxy', ',', "'need either hostport or proxy'", 'peers_schema', '=', '{', "'type'", ':', "'object'", ',', "'properties'", ':', '{', "'peers'", ':', '{', "'type'", ':', "'array'", ',', "'items'", ':', '{', "'type'", ':', "'string'", ',', "'pattern'", ':', "'^([^:]+):([1-9][0-9]{1,4})$'", ',', '}', ',', '}', ',', '}', ',', "'required'", ':', '[', "'peers'", ']', ',', '}', 'schema', '=', 'json_response_schema', '(', 'peers_schema', ')', 'if', 'proxy', 'is', 'None', ':', 'proxy', '=', 'connect_hostport', '(', 'hostport', ')', 'peers', '=', 'None', 'try', ':', 'peer_list_resp', '=', 'proxy', '.', 'get_atlas_peers', '(', ')', 'peer_list_resp', '=', 'json_validate', '(', 'schema', ',', 'peer_list_resp', ')', 'if', 'json_is_error', '(', 'peer_list_resp', ')', ':', 'return', 'peer_list_resp', '# verify that all strings are host:ports', 'for', 'peer_hostport', 'in', 'peer_list_resp', '[', "'peers'", ']', ':', 'peer_host', ',', 'peer_port', '=', 'url_to_host_port', '(', 'peer_hostport', ')', 'if', 'peer_host', 'is', 'None', 'or', 'peer_port', 'is', 'None', ':', 'return', '{', "'error'", ':', "'Server did not return valid Atlas peers'", ',', "'http_status'", ':', '503', '}', 'peers', '=', 'peer_list_resp', 'except', 'ValidationError', 'as', 've', ':', 'if', 'BLOCKSTACK_DEBUG', ':', 'log', '.', 'exception', '(', 've', ')', 'resp', '=', '{', "'error'", ':', "'Server response did not match expected schema. You are likely communicating with an out-of-date Blockstack node.'", ',', "'http_status'", ':', '502', '}', 'return', 'resp', 'except', 'socket', '.', 'timeout', ':', 'log', '.', 'error', '(', '"Connection timed out"', ')', 'resp', '=', '{', "'error'", ':', "'Connection to remote host timed out.'", ',', "'http_status'", ':', '503', '}', 'return', 'resp', 'except', 'socket', '.', 'error', 'as', 'se', ':', 'log', '.', 'error', '(', '"Connection error {}"', '.', 'format', '(', 'se', '.', 'errno', ')', ')', 'resp', '=', '{', "'error'", ':', "'Connection to remote host failed.'", ',', "'http_status'", ':', '502', '}', 'return', 'resp', 'except', 'Exception', 'as', 'ee', ':', 'if', 'BLOCKSTACK_DEBUG', ':', 'log', '.', 'exception', '(', 'ee', ')', 'log', '.', 'error', '(', '"Caught exception while connecting to Blockstack node: {}"', '.', 'format', '(', 'ee', ')', ')', 'resp', '=', '{', "'error'", ':', "'Failed to contact Blockstack node {}. Try again with `--debug`.'", '.', 'format', '(', 'hostport', ')', ',', "'http_status'", ':', '500', '}', 'return', 'resp', 'return', 'peers'] | Get an atlas peer's neighbors.
Return {'status': True, 'peers': [peers]} on success.
Return {'error': ...} on error | ['Get', 'an', 'atlas', 'peer', 's', 'neighbors', '.', 'Return', '{', 'status', ':', 'True', 'peers', ':', '[', 'peers', ']', '}', 'on', 'success', '.', 'Return', '{', 'error', ':', '...', '}', 'on', 'error'] | train | https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/lib/client.py#L648-L717 |
496 | QUANTAXIS/QUANTAXIS | QUANTAXIS/QAFetch/QATdx.py | QA_fetch_get_hkindex_list | def QA_fetch_get_hkindex_list(ip=None, port=None):
"""[summary]
Keyword Arguments:
ip {[type]} -- [description] (default: {None})
port {[type]} -- [description] (default: {None})
# 港股 HKMARKET
27 5 香港指数 FH
31 2 香港主板 KH
48 2 香港创业板 KG
49 2 香港基金 KT
43 1 B股转H股 HB
"""
global extension_market_list
extension_market_list = QA_fetch_get_extensionmarket_list(
) if extension_market_list is None else extension_market_list
return extension_market_list.query('market==27') | python | def QA_fetch_get_hkindex_list(ip=None, port=None):
"""[summary]
Keyword Arguments:
ip {[type]} -- [description] (default: {None})
port {[type]} -- [description] (default: {None})
# 港股 HKMARKET
27 5 香港指数 FH
31 2 香港主板 KH
48 2 香港创业板 KG
49 2 香港基金 KT
43 1 B股转H股 HB
"""
global extension_market_list
extension_market_list = QA_fetch_get_extensionmarket_list(
) if extension_market_list is None else extension_market_list
return extension_market_list.query('market==27') | ['def', 'QA_fetch_get_hkindex_list', '(', 'ip', '=', 'None', ',', 'port', '=', 'None', ')', ':', 'global', 'extension_market_list', 'extension_market_list', '=', 'QA_fetch_get_extensionmarket_list', '(', ')', 'if', 'extension_market_list', 'is', 'None', 'else', 'extension_market_list', 'return', 'extension_market_list', '.', 'query', '(', "'market==27'", ')'] | [summary]
Keyword Arguments:
ip {[type]} -- [description] (default: {None})
port {[type]} -- [description] (default: {None})
# 港股 HKMARKET
27 5 香港指数 FH
31 2 香港主板 KH
48 2 香港创业板 KG
49 2 香港基金 KT
43 1 B股转H股 HB | ['[', 'summary', ']'] | train | https://github.com/QUANTAXIS/QUANTAXIS/blob/bb1fe424e4108b62a1f712b81a05cf829297a5c0/QUANTAXIS/QAFetch/QATdx.py#L1308-L1328 |
497 | saltstack/salt | salt/modules/netscaler.py | _servicegroup_get_servers | def _servicegroup_get_servers(sg_name, **connection_args):
'''
Returns a list of members of a servicegroup or None
'''
nitro = _connect(**connection_args)
if nitro is None:
return None
sg = NSServiceGroup()
sg.set_servicegroupname(sg_name)
try:
sg = NSServiceGroup.get_servers(nitro, sg)
except NSNitroError as error:
log.debug('netscaler module error - NSServiceGroup.get_servers failed(): %s', error)
sg = None
_disconnect(nitro)
return sg | python | def _servicegroup_get_servers(sg_name, **connection_args):
'''
Returns a list of members of a servicegroup or None
'''
nitro = _connect(**connection_args)
if nitro is None:
return None
sg = NSServiceGroup()
sg.set_servicegroupname(sg_name)
try:
sg = NSServiceGroup.get_servers(nitro, sg)
except NSNitroError as error:
log.debug('netscaler module error - NSServiceGroup.get_servers failed(): %s', error)
sg = None
_disconnect(nitro)
return sg | ['def', '_servicegroup_get_servers', '(', 'sg_name', ',', '*', '*', 'connection_args', ')', ':', 'nitro', '=', '_connect', '(', '*', '*', 'connection_args', ')', 'if', 'nitro', 'is', 'None', ':', 'return', 'None', 'sg', '=', 'NSServiceGroup', '(', ')', 'sg', '.', 'set_servicegroupname', '(', 'sg_name', ')', 'try', ':', 'sg', '=', 'NSServiceGroup', '.', 'get_servers', '(', 'nitro', ',', 'sg', ')', 'except', 'NSNitroError', 'as', 'error', ':', 'log', '.', 'debug', '(', "'netscaler module error - NSServiceGroup.get_servers failed(): %s'", ',', 'error', ')', 'sg', '=', 'None', '_disconnect', '(', 'nitro', ')', 'return', 'sg'] | Returns a list of members of a servicegroup or None | ['Returns', 'a', 'list', 'of', 'members', 'of', 'a', 'servicegroup', 'or', 'None'] | train | https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/netscaler.py#L162-L177 |
498 | dw/mitogen | mitogen/core.py | Router._setup_logging | def _setup_logging(self):
"""
This is done in the :class:`Router` constructor for historical reasons.
It must be called before ExternalContext logs its first messages, but
after logging has been setup. It must also be called when any router is
constructed for a consumer app.
"""
# Here seems as good a place as any.
global _v, _vv
_v = logging.getLogger().level <= logging.DEBUG
_vv = IOLOG.level <= logging.DEBUG | python | def _setup_logging(self):
"""
This is done in the :class:`Router` constructor for historical reasons.
It must be called before ExternalContext logs its first messages, but
after logging has been setup. It must also be called when any router is
constructed for a consumer app.
"""
# Here seems as good a place as any.
global _v, _vv
_v = logging.getLogger().level <= logging.DEBUG
_vv = IOLOG.level <= logging.DEBUG | ['def', '_setup_logging', '(', 'self', ')', ':', '# Here seems as good a place as any.', 'global', '_v', ',', '_vv', '_v', '=', 'logging', '.', 'getLogger', '(', ')', '.', 'level', '<=', 'logging', '.', 'DEBUG', '_vv', '=', 'IOLOG', '.', 'level', '<=', 'logging', '.', 'DEBUG'] | This is done in the :class:`Router` constructor for historical reasons.
It must be called before ExternalContext logs its first messages, but
after logging has been setup. It must also be called when any router is
constructed for a consumer app. | ['This', 'is', 'done', 'in', 'the', ':', 'class', ':', 'Router', 'constructor', 'for', 'historical', 'reasons', '.', 'It', 'must', 'be', 'called', 'before', 'ExternalContext', 'logs', 'its', 'first', 'messages', 'but', 'after', 'logging', 'has', 'been', 'setup', '.', 'It', 'must', 'also', 'be', 'called', 'when', 'any', 'router', 'is', 'constructed', 'for', 'a', 'consumer', 'app', '.'] | train | https://github.com/dw/mitogen/blob/a7fdb55e1300a7e0a5e404b09eb730cf9a525da7/mitogen/core.py#L2499-L2509 |
499 | gitpython-developers/GitPython | git/objects/tree.py | Tree.join | def join(self, file):
"""Find the named object in this tree's contents
:return: ``git.Blob`` or ``git.Tree`` or ``git.Submodule``
:raise KeyError: if given file or tree does not exist in tree"""
msg = "Blob or Tree named %r not found"
if '/' in file:
tree = self
item = self
tokens = file.split('/')
for i, token in enumerate(tokens):
item = tree[token]
if item.type == 'tree':
tree = item
else:
# safety assertion - blobs are at the end of the path
if i != len(tokens) - 1:
raise KeyError(msg % file)
return item
# END handle item type
# END for each token of split path
if item == self:
raise KeyError(msg % file)
return item
else:
for info in self._cache:
if info[2] == file: # [2] == name
return self._map_id_to_type[info[1] >> 12](self.repo, info[0], info[1],
join_path(self.path, info[2]))
# END for each obj
raise KeyError(msg % file) | python | def join(self, file):
"""Find the named object in this tree's contents
:return: ``git.Blob`` or ``git.Tree`` or ``git.Submodule``
:raise KeyError: if given file or tree does not exist in tree"""
msg = "Blob or Tree named %r not found"
if '/' in file:
tree = self
item = self
tokens = file.split('/')
for i, token in enumerate(tokens):
item = tree[token]
if item.type == 'tree':
tree = item
else:
# safety assertion - blobs are at the end of the path
if i != len(tokens) - 1:
raise KeyError(msg % file)
return item
# END handle item type
# END for each token of split path
if item == self:
raise KeyError(msg % file)
return item
else:
for info in self._cache:
if info[2] == file: # [2] == name
return self._map_id_to_type[info[1] >> 12](self.repo, info[0], info[1],
join_path(self.path, info[2]))
# END for each obj
raise KeyError(msg % file) | ['def', 'join', '(', 'self', ',', 'file', ')', ':', 'msg', '=', '"Blob or Tree named %r not found"', 'if', "'/'", 'in', 'file', ':', 'tree', '=', 'self', 'item', '=', 'self', 'tokens', '=', 'file', '.', 'split', '(', "'/'", ')', 'for', 'i', ',', 'token', 'in', 'enumerate', '(', 'tokens', ')', ':', 'item', '=', 'tree', '[', 'token', ']', 'if', 'item', '.', 'type', '==', "'tree'", ':', 'tree', '=', 'item', 'else', ':', '# safety assertion - blobs are at the end of the path', 'if', 'i', '!=', 'len', '(', 'tokens', ')', '-', '1', ':', 'raise', 'KeyError', '(', 'msg', '%', 'file', ')', 'return', 'item', '# END handle item type', '# END for each token of split path', 'if', 'item', '==', 'self', ':', 'raise', 'KeyError', '(', 'msg', '%', 'file', ')', 'return', 'item', 'else', ':', 'for', 'info', 'in', 'self', '.', '_cache', ':', 'if', 'info', '[', '2', ']', '==', 'file', ':', '# [2] == name', 'return', 'self', '.', '_map_id_to_type', '[', 'info', '[', '1', ']', '>>', '12', ']', '(', 'self', '.', 'repo', ',', 'info', '[', '0', ']', ',', 'info', '[', '1', ']', ',', 'join_path', '(', 'self', '.', 'path', ',', 'info', '[', '2', ']', ')', ')', '# END for each obj', 'raise', 'KeyError', '(', 'msg', '%', 'file', ')'] | Find the named object in this tree's contents
:return: ``git.Blob`` or ``git.Tree`` or ``git.Submodule``
:raise KeyError: if given file or tree does not exist in tree | ['Find', 'the', 'named', 'object', 'in', 'this', 'tree', 's', 'contents', ':', 'return', ':', 'git', '.', 'Blob', 'or', 'git', '.', 'Tree', 'or', 'git', '.', 'Submodule'] | train | https://github.com/gitpython-developers/GitPython/blob/1f66e25c25cde2423917ee18c4704fff83b837d1/git/objects/tree.py#L214-L244 |