text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def check_runner(self):
"""Make sure there is a runner."""
if os.getcwd() not in sys.path:
sys.path.append(os.getcwd())
if self.runner is None:
self.runner = Runner(self.comp, exit=self.exit_runner, store=self.mypy) | 0.011407 |
def load_nameserver_credentials(self, working_directory, num_tries=60, interval=1):
"""
loads the nameserver credentials in cases where master and workers share a filesystem
Parameters
----------
working_directory: str
the working directory for the HPB run (see master)
num_tries: int
number of attempts to find the file (default 60)
interval: float
waiting period between the attempts
"""
fn = os.path.join(working_directory, 'HPB_run_%s_pyro.pkl'%self.run_id)
for i in range(num_tries):
try:
with open(fn, 'rb') as fh:
self.nameserver, self.nameserver_port = pickle.load(fh)
return
except FileNotFoundError:
self.logger.warning('config file %s not found (trail %i/%i)'%(fn, i+1, num_tries))
time.sleep(interval)
except:
raise
raise RuntimeError("Could not find the nameserver information, aborting!") | 0.037628 |
def to_element(self, include_namespaces=False):
"""Return an ElementTree Element representing this instance.
Args:
include_namespaces (bool, optional): If True, include xml
namespace attributes on the root element
Return:
~xml.etree.ElementTree.Element: an Element.
"""
elt_attrib = {}
if include_namespaces:
elt_attrib.update({
'xmlns': "urn:schemas-upnp-org:metadata-1-0/DIDL-Lite/",
'xmlns:dc': "http://purl.org/dc/elements/1.1/",
'xmlns:upnp': "urn:schemas-upnp-org:metadata-1-0/upnp/",
})
elt_attrib.update({
'parentID': self.parent_id,
'restricted': 'true' if self.restricted else 'false',
'id': self.item_id
})
elt = XML.Element(self.tag, elt_attrib)
# Add the title, which should always come first, according to the spec
XML.SubElement(elt, 'dc:title').text = self.title
# Add in any resources
for resource in self.resources:
elt.append(resource.to_element())
# Add the rest of the metadata attributes (i.e all those listed in
# _translation) as sub-elements of the item element.
for key, value in self._translation.items():
if hasattr(self, key):
# Some attributes have a namespace of '', which means they
# are in the default namespace. We need to handle those
# carefully
tag = "%s:%s" % value if value[0] else "%s" % value[1]
XML.SubElement(elt, tag).text = ("%s" % getattr(self, key))
# Now add in the item class
XML.SubElement(elt, 'upnp:class').text = self.item_class
# And the desc element
desc_attrib = {'id': 'cdudn', 'nameSpace':
'urn:schemas-rinconnetworks-com:metadata-1-0/'}
desc_elt = XML.SubElement(elt, 'desc', desc_attrib)
desc_elt.text = self.desc
return elt | 0.000975 |
def get_utc_timestamp(self, handle):
"""Return the UTC timestamp."""
fpath = self._fpath_from_handle(handle)
datetime_obj = datetime.datetime.utcfromtimestamp(
os.stat(fpath).st_mtime
)
return timestamp(datetime_obj) | 0.007463 |
def _find_function(name,
region=None, key=None, keyid=None, profile=None):
'''
Given function name, find and return matching Lambda information.
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
for funcs in __utils__['boto3.paged_call'](conn.list_functions):
for func in funcs['Functions']:
if func['FunctionName'] == name:
return func
return None | 0.002208 |
def get_characters(self, *args, **kwargs):
"""
Returns a full CharacterDataWrapper object for this story.
/stories/{storyId}/characters
:returns: CharacterDataWrapper -- A new request to API. Contains full results set.
"""
from .character import Character, CharacterDataWrapper
return self.get_related_resource(Character, CharacterDataWrapper, args, kwargs) | 0.009615 |
def check_log_files_and_publish_updates(self):
"""Get any changes to the log files and push updates to Redis.
Returns:
True if anything was published and false otherwise.
"""
anything_published = False
for file_info in self.open_file_infos:
assert not file_info.file_handle.closed
lines_to_publish = []
max_num_lines_to_read = 100
for _ in range(max_num_lines_to_read):
next_line = file_info.file_handle.readline()
if next_line == "":
break
if next_line[-1] == "\n":
next_line = next_line[:-1]
lines_to_publish.append(next_line)
# Publish the lines if this is a worker process.
filename = file_info.filename.split("/")[-1]
is_worker = (filename.startswith("worker")
and (filename.endswith("out")
or filename.endswith("err")))
if is_worker and file_info.file_position == 0:
if (len(lines_to_publish) > 0 and
lines_to_publish[0].startswith("Ray worker pid: ")):
file_info.worker_pid = int(
lines_to_publish[0].split(" ")[-1])
lines_to_publish = lines_to_publish[1:]
# Record the current position in the file.
file_info.file_position = file_info.file_handle.tell()
if len(lines_to_publish) > 0 and is_worker:
self.redis_client.publish(
ray.gcs_utils.LOG_FILE_CHANNEL,
json.dumps({
"ip": self.ip,
"pid": file_info.worker_pid,
"lines": lines_to_publish
}))
anything_published = True
return anything_published | 0.001037 |
def process(meta):
"""Saves metadata fields in global variables and returns a few
computed fields."""
# pylint: disable=global-statement
global capitalize
global use_cleveref_default
global plusname
global starname
global numbersections
# Read in the metadata fields and do some checking
for name in ['eqnos-cleveref', 'xnos-cleveref', 'cleveref']:
# 'xnos-cleveref' enables cleveref in all 3 of fignos/eqnos/tablenos
# 'cleveref' is deprecated
if name in meta:
use_cleveref_default = check_bool(get_meta(meta, name))
break
for name in ['eqnos-capitalize', 'eqnos-capitalise',
'xnos-capitalize', 'xnos-capitalise']:
# 'eqnos-capitalise' is an alternative spelling
# 'xnos-capitalise' enables capitalise in all 3 of fignos/eqnos/tablenos
# 'xnos-capitalize' is an alternative spelling
if name in meta:
capitalize = check_bool(get_meta(meta, name))
break
if 'eqnos-plus-name' in meta:
tmp = get_meta(meta, 'eqnos-plus-name')
if isinstance(tmp, list):
plusname = tmp
else:
plusname[0] = tmp
assert len(plusname) == 2
for name in plusname:
assert isinstance(name, STRTYPES)
if 'eqnos-star-name' in meta:
tmp = get_meta(meta, 'eqnos-star-name')
if isinstance(tmp, list):
starname = tmp
else:
starname[0] = tmp
assert len(starname) == 2
for name in starname:
assert isinstance(name, STRTYPES)
if 'xnos-number-sections' in meta:
numbersections = check_bool(get_meta(meta, 'xnos-number-sections')) | 0.001154 |
def decode_jwt(encoded_token):
"""
Returns the decoded token from an encoded one. This does all the checks
to insure that the decoded token is valid before returning it.
"""
secret = config.decode_key
algorithm = config.algorithm
audience = config.audience
return jwt.decode(encoded_token, secret, algorithms=[algorithm], audience=audience) | 0.005376 |
def validate(self):
""" Validate filter condition (template method).
"""
super(NumericFilterBase, self).validate()
self.not_null = False
if self._value.startswith('+'):
self._cmp = operator.gt
self._rt_cmp = 'greater'
self._value = self._value[1:]
elif self._value.startswith('-'):
self._cmp = operator.lt
self._rt_cmp = 'less'
self._value = self._value[1:]
else:
self._cmp = operator.eq
self._rt_cmp = 'equal' | 0.003546 |
def ok_check(function, *args, **kwargs):
'''Ensure that the response body is OK'''
req = function(*args, **kwargs)
if req.content.lower() != 'ok':
raise ClientException(req.content)
return req.content | 0.004464 |
def get_alias(self):
"""
Gets the alias for the table or the auto_alias if one is set.
If there isn't any kind of alias, None is returned.
:returns: The table alias, auto_alias, or None
:rtype: str or None
"""
alias = None
if self.alias:
alias = self.alias
elif self.auto_alias:
alias = self.auto_alias
return alias | 0.004751 |
def aes_b64_encrypt(value, secret, block_size=AES.block_size):
""" AES encrypt @value with @secret using the |CFB| mode of AES
with a cryptographically secure initialization vector.
-> (#str) AES encrypted @value
..
from vital.security import aes_encrypt, aes_decrypt
aes_encrypt("Hello, world",
"aLWEFlwgwlreWELFNWEFWLEgwklgbweLKWEBGW")
# -> 'zYgVYMbeOuiHR50aMFinY9JsfyMQCvpzI+LNqNcmZhw='
aes_decrypt(
"zYgVYMbeOuiHR50aMFinY9JsfyMQCvpzI+LNqNcmZhw=",
"aLWEFlwgwlreWELFNWEFWLEgwklgbweLKWEBGW")
# -> 'Hello, world'
..
"""
# iv = randstr(block_size * 2, rng=random)
iv = randstr(block_size * 2)
cipher = AES.new(secret[:32], AES.MODE_CFB, iv[:block_size].encode())
return iv + b64encode(cipher.encrypt(
uniorbytes(value, bytes))).decode('utf-8') | 0.001082 |
def fix_multiple_files(filenames, options, output=None):
"""Fix list of files.
Optionally fix files recursively.
"""
results = []
filenames = find_files(filenames, options.recursive, options.exclude)
if options.jobs > 1:
import multiprocessing
pool = multiprocessing.Pool(options.jobs)
ret = pool.map(_fix_file, [(name, options) for name in filenames])
results.extend([x for x in ret if x is not None])
else:
for name in filenames:
ret = _fix_file((name, options, output))
if ret is None:
continue
if options.diff:
if ret != '':
results.append(ret)
elif options.in_place:
results.append(ret)
else:
original_source = readlines_from_file(name)
if "".join(original_source).splitlines() != ret.splitlines():
results.append(ret)
return results | 0.001008 |
def extract(self, item):
"""Runs the HTML-response trough a list of initialized extractors, a cleaner and compares the results.
:param item: NewscrawlerItem to be processed.
:return: An updated NewscrawlerItem including the results of the extraction
"""
article_candidates = []
for extractor in self.extractor_list:
article_candidates.append(extractor.extract(item))
article_candidates = self.cleaner.clean(article_candidates)
article = self.comparer.compare(item, article_candidates)
item['article_title'] = article.title
item['article_description'] = article.description
item['article_text'] = article.text
item['article_image'] = article.topimage
item['article_author'] = article.author
item['article_publish_date'] = article.publish_date
item['article_language'] = article.language
return item | 0.004242 |
def sync_subscriber(subscriber):
"""Sync a Customer with Stripe api data."""
customer, _created = Customer.get_or_create(subscriber=subscriber)
try:
customer.sync_from_stripe_data(customer.api_retrieve())
customer._sync_subscriptions()
customer._sync_invoices()
customer._sync_cards()
customer._sync_charges()
except InvalidRequestError as e:
print("ERROR: " + str(e))
return customer | 0.029925 |
def set_remote_addr(self, dst_mac, dst_ip):
"""
Configure remote ethernet and IP addresses.
"""
self.dst_mac = dst_mac
self.dst_ip = dst_ip
if not (dst_mac == "FF:FF:FF:FF:FF:FF" or dst_ip == "255.255.255.255"):
self._remote_addr_config = True
LOG.info("[BFD][%s][REMOTE] Remote address configured: %s, %s.",
hex(self._local_discr), self.dst_ip, self.dst_mac) | 0.004484 |
def save_to_npy_file(self, parameter_space,
result_parsing_function,
filename, runs):
"""
Save results to a numpy array file format.
"""
np.save(filename, self.get_results_as_numpy_array(
parameter_space, result_parsing_function, runs=runs)) | 0.01194 |
def GetKey(self, path, cycle=9999, rootpy=True, **kwargs):
"""
Override TDirectory's GetKey and also handle accessing keys nested
arbitrarily deep in subdirectories.
"""
key = super(_DirectoryBase, self).GetKey(path, cycle)
if not key:
raise DoesNotExist
if rootpy:
return asrootpy(key, **kwargs)
return key | 0.005063 |
def _walk(top, topdown=True, onerror=None, followlinks=False):
"""Like Python 3.5's implementation of os.walk() -- faster than
the pre-Python 3.5 version as it uses scandir() internally.
"""
dirs = []
nondirs = []
# We may not have read permission for top, in which case we can't
# get a list of the files the directory contains. os.walk
# always suppressed the exception then, rather than blow up for a
# minor reason when (say) a thousand readable directories are still
# left to visit. That logic is copied here.
try:
scandir_it = scandir(top)
except OSError as error:
if onerror is not None:
onerror(error)
return
while True:
try:
try:
entry = next(scandir_it)
except StopIteration:
break
except OSError as error:
if onerror is not None:
onerror(error)
return
try:
is_dir = entry.is_dir()
except OSError:
# If is_dir() raises an OSError, consider that the entry is not
# a directory, same behaviour than os.path.isdir().
is_dir = False
if is_dir:
dirs.append(entry.name)
else:
nondirs.append(entry.name)
if not topdown and is_dir:
# Bottom-up: recurse into sub-directory, but exclude symlinks to
# directories if followlinks is False
if followlinks:
walk_into = True
else:
try:
is_symlink = entry.is_symlink()
except OSError:
# If is_symlink() raises an OSError, consider that the
# entry is not a symbolic link, same behaviour than
# os.path.islink().
is_symlink = False
walk_into = not is_symlink
if walk_into:
for entry in walk(entry.path, topdown, onerror, followlinks):
yield entry
# Yield before recursion if going top down
if topdown:
yield top, dirs, nondirs
# Recurse into sub-directories
for name in dirs:
new_path = join(top, name)
# Issue #23605: os.path.islink() is used instead of caching
# entry.is_symlink() result during the loop on os.scandir() because
# the caller can replace the directory entry during the "yield"
# above.
if followlinks or not islink(new_path):
for entry in walk(new_path, topdown, onerror, followlinks):
yield entry
else:
# Yield after recursion if going bottom up
yield top, dirs, nondirs | 0.00036 |
def draw_default(self, inside=5, outside=15):
"""
Draw suggested sun disk, limb, and empty background
:param inside: how many pixels from the calculated solar disk edge to go inward for the limb
:param outside: how many pixels from the calculated solar disk edge to go outward for the limb
:return: updates the self.selection_array
"""
# fill everything with empty outer space
if 'outer_space' in self.config.solar_class_index:
self.selection_array[:, :] = self.config.solar_class_index['outer_space']
elif 'empty_outer_space' in self.config.solar_class_index:
self.selection_array[:, :] = self.config.solar_class_index['empty_outer_space']
else:
raise ValueError("outer_space or empty_outer_space must be classes with colors.")
# draw the limb label in its location
self.draw_annulus((self.cx, self.cy),
self.sun_radius_pixel - inside,
self.sun_radius_pixel + outside,
self.selection_array,
self.config.solar_class_index['limb'])
# draw quiet sun in its location
self.draw_circle((self.cx, self.cy),
self.sun_radius_pixel - inside,
self.selection_array,
self.config.solar_class_index['quiet_sun']) | 0.004905 |
def set_oauth_client(self, consumer_key, consumer_secret):
"""Sets the oauth_client attribute
"""
self.oauth_client = oauth1.Client(consumer_key, consumer_secret) | 0.010753 |
def urls(self):
"""
The decoded URL list for this MIME::Type.
The special URL value IANA will be translated into:
http://www.iana.org/assignments/media-types/<mediatype>/<subtype>
The special URL value RFC### will be translated into:
http://www.rfc-editor.org/rfc/rfc###.txt
The special URL value DRAFT:name will be
translated into:
https://datatracker.ietf.org/public/idindex.cgi?
command=id_detail&filename=<name>
The special URL value
LTSW will be translated
into:
http://www.ltsw.se/knbase/internet/<mediatype>.htp
The special
URL value
[token] will
be translated
into:
http://www.iana.org/assignments/contact-people.htm#<token>
These values will be accessible through #urls, which always returns an array.
"""
def _url(el):
if el == 'IANA':
return IANA_URL % (self.media_type, self.sub_type)
elif el == 'LTSW':
return LTSW_URL % self.media_type
match = re.compile('^\{([^=]+)=([^\}]+)\}').match(el)
if match:
return match.group(1, 2)
match = re.compile('^\[([^=]+)=([^\]]+)\]').match(el)
if match:
return [match.group(1), CONTACT_URL % match.group(2)]
for regex in REGEX_URLS:
match = re.compile(regex).match(el)
if match:
return REGEX_URLS[regex] % match.group(1)
return el
return map(_url, self.url) | 0.005535 |
def get_permissions(self):
"""
:returns: list of dicts, or an empty list if there are no permissions.
"""
path = Client.urls['all_permissions']
conns = self._call(path, 'GET')
return conns | 0.008475 |
def create(self, resource_class, content_type):
"""
Creates a representer for the given combination of resource and
content type. This will also find representer factories that were
registered for a base class of the given resource.
"""
rpr_fac = self.__find_representer_factory(resource_class,
content_type)
if rpr_fac is None:
# Register a representer with default configuration on the fly
# and look again.
self.register(resource_class, content_type)
rpr_fac = self.__find_representer_factory(resource_class,
content_type)
return rpr_fac(resource_class) | 0.002591 |
def _soup_strings(soup):
"""Return text strings in soup."""
paragraph_tags = set([
"caption", "details", "h1", "h2", "h3", "h4", "h5", "h6", "li", "p", "td",
"div", "span"
])
skip_children = None
for descendant in soup.descendants:
# If we've treated a tag as a contiguous paragraph, don't re-emit the
# children (see below).
if skip_children is not None:
try:
in_skip = descendant in skip_children # pylint: disable=unsupported-membership-test
except RecursionError: # pylint: disable=undefined-variable
# Possible for this check to hit a nasty infinite recursion because of
# BeautifulSoup __eq__ checks.
in_skip = True
if in_skip:
continue
else:
skip_children = None
# Treat some tags as contiguous paragraphs, regardless of other tags nested
# inside (like <a> or <b>).
if isinstance(descendant, bs4.Tag):
if descendant.name in paragraph_tags:
if descendant.find_all(paragraph_tags):
# If there are nested paragraph tags, don't treat it as a single
# contiguous tag.
continue
skip_children = list(descendant.descendants)
text = " ".join(descendant.get_text(" ", strip=True).split())
if text:
yield text
continue
if (isinstance(descendant, bs4.Comment) or
not isinstance(descendant, bs4.NavigableString)):
continue
text = " ".join(descendant.strip().split())
if text:
yield text | 0.012475 |
def start(self):
"""
Start the daemon
"""
# Check for a pidfile to see if the daemon already runs
pid = None
if os.path.exists(self.pidfile):
try:
pf = file(self.pidfile,'r')
pid = int(pf.read().strip())
pf.close()
except IOError:
pid = None
if pid:
message = "pidfile %s already exist. Daemon already running?\n"
sys.stderr.write(message % self.pidfile)
sys.exit(1)
# Start the daemon
self.daemonize()
self.run() | 0.004847 |
def K(self, X, X2, target):
"""Compute the covariance matrix between X and X2."""
if X2 is None: X2 = X
# i1 = X[:,1]
# i2 = X2[:,1]
# X = X[:,0].reshape(-1,1)
# X2 = X2[:,0].reshape(-1,1)
dist = np.abs(X - X2.T)
ly=1/self.lengthscaleY
lu=np.sqrt(3)/self.lengthscaleU
#ly=self.lengthscaleY
#lu=self.lengthscaleU
k1 = np.exp(-ly*dist)*(2*lu+ly)/(lu+ly)**2
k2 = (np.exp(-lu*dist)*(ly-2*lu+lu*ly*dist-lu**2*dist) + np.exp(-ly*dist)*(2*lu-ly) ) / (ly-lu)**2
k3 = np.exp(-lu*dist) * ( (1+lu*dist)/(lu+ly) + (lu)/(lu+ly)**2 )
np.add(self.varianceU*self.varianceY*(k1+k2+k3), target, target) | 0.023876 |
def modify_prefix(arg, opts, shell_opts):
""" Modify the prefix 'arg' with the options 'opts'
"""
modify_confirmed = shell_opts.force
spec = { 'prefix': arg }
v = get_vrf(opts.get('vrf_rt'), abort=True)
spec['vrf_rt'] = v.rt
res = Prefix.list(spec)
if len(res) == 0:
print("Prefix %s not found in %s." % (arg, vrf_format(v)), file=sys.stderr)
return
p = res[0]
if 'prefix' in opts:
p.prefix = opts['prefix']
if 'description' in opts:
p.description = opts['description']
if 'comment' in opts:
p.comment = opts['comment']
if 'tags' in opts:
tags = list(csv.reader([opts.get('tags', '')], escapechar='\\'))[0]
p.tags = {}
for tag_name in tags:
tag = Tag()
tag.name = tag_name
p.tags[tag_name] = tag
if 'node' in opts:
p.node = opts['node']
if 'type' in opts:
p.type = opts['type']
if 'status' in opts:
p.status = opts['status']
if 'country' in opts:
p.country = opts['country']
if 'order_id' in opts:
p.order_id = opts['order_id']
if 'customer_id' in opts:
p.customer_id = opts['customer_id']
if 'vlan' in opts:
p.vlan = opts['vlan']
if 'alarm_priority' in opts:
p.alarm_priority = opts['alarm_priority']
if 'monitor' in opts:
p.monitor = _str_to_bool(opts['monitor'])
if 'expires' in opts:
p.expires = opts['expires']
for avp in opts.get('extra-attribute', []):
try:
key, value = avp.split('=', 1)
except ValueError:
print("ERROR: Incorrect extra-attribute: %s. Accepted form: 'key=value'\n" % avp, file=sys.stderr)
return
p.avps[key] = value
# Promt user if prefix has authoritative source != nipap
if not modify_confirmed and p.authoritative_source.lower() != 'nipap':
res = input("Prefix %s in %s is managed by system '%s'. Are you sure you want to modify it? [y/n]: " %
(p.prefix, vrf_format(p.vrf), p.authoritative_source))
# If the user declines, short-circuit...
if res.lower() not in [ 'y', 'yes' ]:
print("Operation aborted.")
return
try:
p.save()
except NipapError as exc:
print("Could not save prefix changes: %s" % str(exc), file=sys.stderr)
sys.exit(1)
print("Prefix %s in %s saved." % (p.display_prefix, vrf_format(p.vrf))) | 0.004021 |
def set_key(self, key='C'):
"""Add a key signature event to the track_data."""
if isinstance(key, Key):
key = key.name[0]
self.track_data += self.key_signature_event(key) | 0.009709 |
def send(self, command):
"""
Sends commands to this hypervisor.
:param command: a uBridge hypervisor command
:returns: results as a list
"""
# uBridge responses are of the form:
# 1xx yyyyyy\r\n
# 1xx yyyyyy\r\n
# ...
# 100-yyyy\r\n
# or
# 2xx-yyyy\r\n
#
# Where 1xx is a code from 100-199 for a success or 200-299 for an error
# The result might be multiple lines and might be less than the buffer size
# but still have more data. The only thing we know for sure is the last line
# will begin with '100-' or a '2xx-' and end with '\r\n'
if self._writer is None or self._reader is None:
raise UbridgeError("Not connected")
try:
command = command.strip() + '\n'
log.debug("sending {}".format(command))
self._writer.write(command.encode())
yield from self._writer.drain()
except OSError as e:
raise UbridgeError("Lost communication with {host}:{port} :{error}, Dynamips process running: {run}"
.format(host=self._host, port=self._port, error=e, run=self.is_running()))
# Now retrieve the result
data = []
buf = ''
retries = 0
max_retries = 10
while True:
try:
try:
chunk = yield from self._reader.read(1024)
except asyncio.CancelledError:
# task has been canceled but continue to read
# any remaining data sent by the hypervisor
continue
except ConnectionResetError as e:
# Sometimes WinError 64 (ERROR_NETNAME_DELETED) is returned here on Windows.
# These happen if connection reset is received before IOCP could complete
# a previous operation. Ignore and try again....
log.warning("Connection reset received while reading uBridge response: {}".format(e))
continue
if not chunk:
if retries > max_retries:
raise UbridgeError("No data returned from {host}:{port}, uBridge process running: {run}"
.format(host=self._host, port=self._port, run=self.is_running()))
else:
retries += 1
yield from asyncio.sleep(0.1)
continue
retries = 0
buf += chunk.decode("utf-8")
except OSError as e:
raise UbridgeError("Lost communication with {host}:{port} :{error}, uBridge process running: {run}"
.format(host=self._host, port=self._port, error=e, run=self.is_running()))
# If the buffer doesn't end in '\n' then we can't be done
try:
if buf[-1] != '\n':
continue
except IndexError:
raise UbridgeError("Could not communicate with {host}:{port}, uBridge process running: {run}"
.format(host=self._host, port=self._port, run=self.is_running()))
data += buf.split('\r\n')
if data[-1] == '':
data.pop()
buf = ''
# Does it contain an error code?
if self.error_re.search(data[-1]):
raise UbridgeError(data[-1][4:])
# Or does the last line begin with '100-'? Then we are done!
if data[-1][:4] == '100-':
data[-1] = data[-1][4:]
if data[-1] == 'OK':
data.pop()
break
# Remove success responses codes
for index in range(len(data)):
if self.success_re.search(data[index]):
data[index] = data[index][4:]
log.debug("returned result {}".format(data))
return data | 0.004184 |
def parse_tibia_date(date_str) -> Optional[datetime.date]:
"""Parses a date from the format used in Tibia.com
Accepted format:
- ``MMM DD YYYY``, e.g. ``Jul 23 2015``
Parameters
-----------
date_str: :class:`str`
The date as represented in Tibia.com
Returns
-----------
:class:`datetime.date`, optional
The represented date."""
try:
t = datetime.datetime.strptime(date_str.strip(), "%b %d %Y")
return t.date()
except (ValueError, AttributeError):
return None | 0.001832 |
def count_names_by_namespace(graph, namespace):
"""Get the set of all of the names in a given namespace that are in the graph.
:param pybel.BELGraph graph: A BEL graph
:param str namespace: A namespace keyword
:return: A counter from {name: frequency}
:rtype: collections.Counter
:raises IndexError: if the namespace is not defined in the graph.
"""
if namespace not in graph.defined_namespace_keywords:
raise IndexError('{} is not defined in {}'.format(namespace, graph))
return Counter(_namespace_filtered_iterator(graph, namespace)) | 0.003436 |
def has_perm(self, user, perm, obj=None):
"""Returns True if the given user has the specified permission."""
if not user.is_active:
return False
return perm in self.get_all_permissions(user, obj) | 0.008658 |
def pretty_plot_two_axis(x, y1, y2, xlabel=None, y1label=None, y2label=None,
width=8, height=None, dpi=300):
"""
Variant of pretty_plot that does a dual axis plot. Adapted from matplotlib
examples. Makes it easier to create plots with different axes.
Args:
x (np.ndarray/list): Data for x-axis.
y1 (dict/np.ndarray/list): Data for y1 axis (left). If a dict, it will
be interpreted as a {label: sequence}.
y2 (dict/np.ndarray/list): Data for y2 axis (right). If a dict, it will
be interpreted as a {label: sequence}.
xlabel (str): If not None, this will be the label for the x-axis.
y1label (str): If not None, this will be the label for the y1-axis.
y2label (str): If not None, this will be the label for the y2-axis.
width (float): Width of plot in inches. Defaults to 8in.
height (float): Height of plot in inches. Defaults to width * golden
ratio.
dpi (int): Sets dot per inch for figure. Defaults to 300.
Returns:
matplotlib.pyplot
"""
import palettable.colorbrewer.diverging
colors = palettable.colorbrewer.diverging.RdYlBu_4.mpl_colors
c1 = colors[0]
c2 = colors[-1]
golden_ratio = (math.sqrt(5) - 1) / 2
if not height:
height = int(width * golden_ratio)
import matplotlib.pyplot as plt
width = 12
labelsize = int(width * 3)
ticksize = int(width * 2.5)
styles = ["-", "--", "-.", "."]
fig, ax1 = plt.subplots()
fig.set_size_inches((width, height))
if dpi:
fig.set_dpi(dpi)
if isinstance(y1, dict):
for i, (k, v) in enumerate(y1.items()):
ax1.plot(x, v, c=c1, marker='s', ls=styles[i % len(styles)],
label=k)
ax1.legend(fontsize=labelsize)
else:
ax1.plot(x, y1, c=c1, marker='s', ls='-')
if xlabel:
ax1.set_xlabel(xlabel, fontsize=labelsize)
if y1label:
# Make the y-axis label, ticks and tick labels match the line color.
ax1.set_ylabel(y1label, color=c1, fontsize=labelsize)
ax1.tick_params('x', labelsize=ticksize)
ax1.tick_params('y', colors=c1, labelsize=ticksize)
ax2 = ax1.twinx()
if isinstance(y2, dict):
for i, (k, v) in enumerate(y2.items()):
ax2.plot(x, v, c=c2, marker='o', ls=styles[i % len(styles)],
label=k)
ax2.legend(fontsize=labelsize)
else:
ax2.plot(x, y2, c=c2, marker='o', ls='-')
if y2label:
# Make the y-axis label, ticks and tick labels match the line color.
ax2.set_ylabel(y2label, color=c2, fontsize=labelsize)
ax2.tick_params('y', colors=c2, labelsize=ticksize)
return plt | 0.000363 |
def GenerarPDF(self, archivo="", dest="F"):
"Generar archivo de salida en formato PDF"
try:
self.template.render(archivo, dest=dest)
return True
except Exception, e:
self.Excepcion = str(e)
return False | 0.007299 |
def to_dict(self):
"Post as a dict, for serializing"
d = self.metadata.copy()
d['content'] = self.content
return d | 0.013699 |
def update_cookies(self, cookies: Optional[LooseCookies]) -> None:
"""Update request cookies header."""
if not cookies:
return
c = SimpleCookie()
if hdrs.COOKIE in self.headers:
c.load(self.headers.get(hdrs.COOKIE, ''))
del self.headers[hdrs.COOKIE]
if isinstance(cookies, Mapping):
iter_cookies = cookies.items()
else:
iter_cookies = cookies # type: ignore
for name, value in iter_cookies:
if isinstance(value, Morsel):
# Preserve coded_value
mrsl_val = value.get(value.key, Morsel())
mrsl_val.set(value.key, value.value, value.coded_value) # type: ignore # noqa
c[name] = mrsl_val
else:
c[name] = value # type: ignore
self.headers[hdrs.COOKIE] = c.output(header='', sep=';').strip() | 0.002176 |
def _verify_temperature(self, temp):
"""Verifies that the temperature is valid.
:raises TemperatureException: On invalid temperature.
"""
if temp < self.min_temp or temp > self.max_temp:
raise TemperatureException('Temperature {} out of range [{}, {}]'
.format(temp, self.min_temp, self.max_temp)) | 0.007813 |
def get_topic(self, topic_name):
"""Get a client for a topic entity.
:param topic_name: The name of the topic.
:type topic_name: str
:rtype: ~azure.servicebus.servicebus_client.TopicClient
:raises: ~azure.servicebus.common.errors.ServiceBusConnectionError if the namespace is not found.
:raises: ~azure.servicebus.common.errors.ServiceBusResourceNotFound if the topic is not found.
Example:
.. literalinclude:: ../examples/test_examples.py
:start-after: [START get_topic_client]
:end-before: [END get_topic_client]
:language: python
:dedent: 8
:caption: Get the specific topic client from Service Bus client
"""
try:
topic = self.mgmt_client.get_topic(topic_name)
except requests.exceptions.ConnectionError as e:
raise ServiceBusConnectionError("Namespace: {} not found".format(self.service_namespace), e)
except AzureServiceBusResourceNotFound:
raise ServiceBusResourceNotFound("Specificed topic does not exist.")
return TopicClient.from_entity(
self._get_host(), topic,
shared_access_key_name=self.shared_access_key_name,
shared_access_key_value=self.shared_access_key_value,
debug=self.debug) | 0.00438 |
def user_session_info_output_user_role(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
user_session_info = ET.Element("user_session_info")
config = user_session_info
output = ET.SubElement(user_session_info, "output")
user_role = ET.SubElement(output, "user-role")
user_role.text = kwargs.pop('user_role')
callback = kwargs.pop('callback', self._callback)
return callback(config) | 0.004115 |
def set( string, target_level, indent_string=" ", indent_empty_lines=False ):
""" Sets indentation of a single/multi-line string. """
lines = string.splitlines()
set_lines( lines, target_level, indent_string=indent_string, indent_empty_lines=indent_empty_lines )
result = "\n".join(lines)
return result | 0.021084 |
def create_schema(self, connection):
"""
Will create the schema in the database
"""
if '.' not in self.table:
return
query = 'CREATE SCHEMA IF NOT EXISTS {schema_name};'.format(schema_name=self.table.split('.')[0])
connection.cursor().execute(query) | 0.009677 |
def get_file_hash(fpath, blocksize=65536, hasher=None, stride=1,
hexdigest=False):
r"""
For better hashes use hasher=hashlib.sha256, and keep stride=1
Args:
fpath (str): file path string
blocksize (int): 2 ** 16. Affects speed of reading file
hasher (None): defaults to sha1 for fast (but insecure) hashing
stride (int): strides > 1 skip data to hash, useful for faster
hashing, but less accurate, also makes hash dependant on
blocksize.
References:
http://stackoverflow.com/questions/3431825/generating-a-md5-checksum-of-a-file
http://stackoverflow.com/questions/5001893/when-should-i-use-sha-1-and-when-should-i-use-sha-2
CommandLine:
python -m utool.util_hash --test-get_file_hash
python -m utool.util_hash --test-get_file_hash:0
python -m utool.util_hash --test-get_file_hash:1
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_hash import * # NOQA
>>> fpath = ut.grab_test_imgpath('patsy.jpg')
>>> #blocksize = 65536 # 2 ** 16
>>> blocksize = 2 ** 16
>>> hasher = None
>>> stride = 1
>>> hashbytes_20 = get_file_hash(fpath, blocksize, hasher, stride)
>>> result = repr(hashbytes_20)
>>> print(result)
'7\x07B\x0eX<sRu\xa2\x90P\xda\xb2\x84?\x81?\xa9\xd9'
'\x13\x9b\xf6\x0f\xa3QQ \xd7"$\xe9m\x05\x9e\x81\xf6\xf2v\xe4'
'\x16\x00\x80Xx\x8c-H\xcdP\xf6\x02\x9frl\xbf\x99VQ\xb5'
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_hash import * # NOQA
>>> #fpath = ut.grab_file_url('http://en.wikipedia.org/wiki/List_of_comets_by_type')
>>> fpath = ut.unixjoin(ut.ensure_app_resource_dir('utool'), 'tmp.txt')
>>> ut.write_to(fpath, ut.lorium_ipsum())
>>> blocksize = 2 ** 3
>>> hasher = None
>>> stride = 2
>>> hashbytes_20 = get_file_hash(fpath, blocksize, hasher, stride)
>>> result = repr(hashbytes_20)
>>> print(result)
'5KP\xcf>R\xf6\xffO:L\xac\x9c\xd3V+\x0e\xf6\xe1n'
Ignore:
file_ = open(fpath, 'rb')
"""
if hasher is None:
hasher = hashlib.sha1()
with open(fpath, 'rb') as file_:
buf = file_.read(blocksize)
while len(buf) > 0:
hasher.update(buf)
if stride > 1:
file_.seek(blocksize * (stride - 1), 1) # skip blocks
buf = file_.read(blocksize)
if hexdigest:
return hasher.hexdigest()
else:
return hasher.digest() | 0.00038 |
def save_fits(self, data, name):
"""
This method simply saves the model components and the residual.
INPUTS:
data (no default) Data which is to be saved.
name (no default) File name for new .fits file. Will overwrite.
"""
data = data.reshape(1, 1, data.shape[0], data.shape[0])
new_file = pyfits.PrimaryHDU(data,self.img_hdu_list[0].header)
new_file.writeto("{}".format(name), overwrite=True) | 0.006289 |
def volume_up(self):
"""Increasing volume of the device."""
self._volume_level += self._volume_step / self._max_volume
self._device.vol_up(num=self._volume_step) | 0.010811 |
def iter_trees(self, *args, **kwargs):
""":return: Iterator yielding Tree objects
:note: Takes all arguments known to iter_commits method"""
return (c.tree for c in self.iter_commits(*args, **kwargs)) | 0.008929 |
def _layout(self):
"""
Creates the grid layout for the joint plot, adding new axes for the histograms
if necessary and modifying the aspect ratio. Does not modify the axes or the
layout if self.hist is False or None.
"""
# Ensure the axes are created if not hist, then return.
if not self.hist:
self.ax
return
# Ensure matplotlib version compatibility
if make_axes_locatable is None:
raise YellowbrickValueError((
"joint plot histograms requires matplotlib 2.0.2 or greater "
"please upgrade matplotlib or set hist=False on the visualizer"
))
# Create the new axes for the histograms
divider = make_axes_locatable(self.ax)
self._xhax = divider.append_axes("top", size=1, pad=0.1, sharex=self.ax)
self._yhax = divider.append_axes("right", size=1, pad=0.1, sharey=self.ax)
# Modify the display of the axes
self._xhax.xaxis.tick_top()
self._yhax.yaxis.tick_right()
self._xhax.grid(False, axis='y')
self._yhax.grid(False, axis='x') | 0.005199 |
def processString(inptStr):
'''
inptStr may be a string of the following forms:
* 'meme: text0 | text1'
* 'gif: search_keywords'
If not, it returns an appropriate error message,
stating an improperly formatted <magic> tag.
Fails gracefully when it can't find or generate a meme
or a gif, by returning an appropriate image url with the
failure message on it.
TODO: Find a way to efficiently search for xkcd comics
'''
inptStr.strip(' ')
imgParamList = inptStr.split(':')
if len(imgParamList) < 2:
print("Not enough information for searching for image.")
return not_enough_info
else:
imgType = imgParamList[0]
imgParams = imgParamList[1]
if imgType == 'meme':
imgURL = processMeme(imgParams)
# print(imgURL)
return imgURL
elif imgType == 'gif':
gifURL = processGif(imgParams)
# print(gifURL)
return gifURL
else:
print("Improperly formatted <magic> tag.")
return improperly_formatted_tag | 0.003575 |
def get_config(name, region=None, key=None, keyid=None, profile=None):
'''
Get the configuration for a cache cluster.
CLI example::
salt myminion boto_elasticache.get_config myelasticache
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
if not conn:
return None
try:
cc = conn.describe_cache_clusters(name,
show_cache_node_info=True)
except boto.exception.BotoServerError as e:
msg = 'Failed to get config for cache cluster {0}.'.format(name)
log.error(msg)
log.debug(e)
return {}
cc = cc['DescribeCacheClustersResponse']['DescribeCacheClustersResult']
cc = cc['CacheClusters'][0]
ret = odict.OrderedDict()
attrs = ['engine', 'cache_parameter_group', 'cache_cluster_id',
'cache_security_groups', 'replication_group_id',
'auto_minor_version_upgrade', 'num_cache_nodes',
'preferred_availability_zone', 'security_groups',
'cache_subnet_group_name', 'engine_version', 'cache_node_type',
'notification_configuration', 'preferred_maintenance_window',
'configuration_endpoint', 'cache_cluster_status', 'cache_nodes']
for key, val in six.iteritems(cc):
_key = boto.utils.pythonize_name(key)
if _key not in attrs:
continue
if _key == 'cache_parameter_group':
if val:
ret[_key] = val['CacheParameterGroupName']
else:
ret[_key] = None
elif _key == 'cache_nodes':
if val:
ret[_key] = [k for k in val]
else:
ret[_key] = []
elif _key == 'cache_security_groups':
if val:
ret[_key] = [k['CacheSecurityGroupName'] for k in val]
else:
ret[_key] = []
elif _key == 'configuration_endpoint':
if val:
ret['port'] = val['Port']
ret['address'] = val['Address']
else:
ret['port'] = None
ret['address'] = None
elif _key == 'notification_configuration':
if val:
ret['notification_topic_arn'] = val['TopicArn']
else:
ret['notification_topic_arn'] = None
else:
ret[_key] = val
return ret | 0.000413 |
def parse_template(template):
"""returns a 2-tuple of (template_name, number_of_priors)"""
m = TEMPLATE_OVERRIDE_RE.match(template)
if not m:
return template, 0
return m.group('template'), int(m.group('depth')) | 0.004274 |
def operate(self, left, right, operation):
""" Do operation on colors
args:
left (str): left side
right (str): right side
operation (str): Operation
returns:
str
"""
operation = {
'+': operator.add,
'-': operator.sub,
'*': operator.mul,
'/': operator.truediv
}.get(operation)
return operation(left, right) | 0.004376 |
def universal_transformer_with_lstm_as_transition_function(
layer_inputs, step, hparams, ffn_unit, attention_unit, pad_remover=None):
"""Universal Transformer which uses a lstm as transition function.
It's kind of like having a lstm, filliped vertically next to the Universal
Transformer that controls the flow of the information in depth,
over different steps of the Universal Transformer.
Args:
layer_inputs:
- state: state
- inputs: the original embedded inputs (= inputs to the first step)
- memory: memory used in lstm.
step: indicates number of steps taken so far
hparams: model hyper-parameters.
ffn_unit: feed-forward unit
attention_unit: multi-head attention unit
pad_remover: to mask out padding in convolutional layers (efficiency).
Returns:
layer_output:
new_state: new state
inputs: the original embedded inputs (= inputs to the first step)
memory: contains information of state from all the previous steps.
"""
state, unused_inputs, memory = tf.unstack(
layer_inputs, num=None, axis=0, name="unstack")
# NOTE:
# state (ut_state): output of the lstm in the previous step
# inputs (ut_input): original input --> we don't use it here
# memory: lstm memory
# Multi_head_attention:
assert not hparams.add_step_timing_signal # Let lstm count for us!
mh_attention_input = step_preprocess(state, step, hparams)
transition_function_input = attention_unit(mh_attention_input)
# Transition Function:
if hparams.add_ffn_unit_to_the_transition_function:
transition_function_input = ffn_unit(transition_function_input)
transition_function_input = common_layers.layer_preprocess(
transition_function_input, hparams)
with tf.variable_scope("lstm"):
# lstm input gate: i_t = sigmoid(W_i.x_t + U_i.h_{t-1})
transition_function_input_gate = _ffn_layer_multi_inputs(
[transition_function_input, state],
hparams,
name="input",
bias_initializer=tf.zeros_initializer(),
activation=tf.sigmoid,
pad_remover=pad_remover,
preprocess=False,
postprocess=False)
tf.contrib.summary.scalar("lstm_input_gate",
tf.reduce_mean(transition_function_input_gate))
# lstm forget gate: f_t = sigmoid(W_f.x_t + U_f.h_{t-1})
transition_function_forget_gate = _ffn_layer_multi_inputs(
[transition_function_input, state],
hparams,
name="forget",
bias_initializer=tf.zeros_initializer(),
activation=None,
pad_remover=pad_remover,
preprocess=False,
postprocess=False)
forget_bias_tensor = tf.constant(hparams.lstm_forget_bias)
transition_function_forget_gate = tf.sigmoid(
transition_function_forget_gate + forget_bias_tensor)
tf.contrib.summary.scalar("lstm_forget_gate",
tf.reduce_mean(transition_function_forget_gate))
# lstm output gate: o_t = sigmoid(W_o.x_t + U_o.h_{t-1})
transition_function_output_gate = _ffn_layer_multi_inputs(
[transition_function_input, state],
hparams,
name="output",
bias_initializer=tf.zeros_initializer(),
activation=tf.sigmoid,
pad_remover=pad_remover,
preprocess=False,
postprocess=False)
tf.contrib.summary.scalar("lstm_output_gate",
tf.reduce_mean(transition_function_output_gate))
# lstm input modulation
transition_function_input_modulation = _ffn_layer_multi_inputs(
[transition_function_input, state],
hparams,
name="input_modulation",
bias_initializer=tf.zeros_initializer(),
activation=tf.tanh,
pad_remover=pad_remover,
preprocess=False,
postprocess=False)
transition_function_memory = (
memory * transition_function_forget_gate +
transition_function_input_gate * transition_function_input_modulation)
transition_function_output = (
tf.tanh(transition_function_memory) * transition_function_output_gate)
transition_function_output = common_layers.layer_preprocess(
transition_function_output, hparams)
return transition_function_output, unused_inputs, transition_function_memory | 0.004202 |
def _get_remote_ontology(onto_url, time_difference=None):
"""Check if the online ontology is more recent than the local ontology.
If yes, try to download and store it in Invenio's cache directory.
Return a boolean describing the success of the operation.
:return: path to the downloaded ontology.
"""
if onto_url is None:
return False
dl_dir = os.path.join(
current_app.config["CLASSIFIER_WORKDIR"] or tempfile.gettempdir(),
"classifier"
)
if not os.path.exists(dl_dir):
os.makedirs(dl_dir)
local_file = dl_dir + os.path.basename(onto_url)
remote_modif_time = _get_last_modification_date(onto_url)
try:
local_modif_seconds = os.path.getmtime(local_file)
except OSError:
# The local file does not exist. Download the ontology.
download = True
current_app.logger.info("The local ontology could not be found.")
else:
local_modif_time = datetime(*time.gmtime(local_modif_seconds)[0:6])
# Let's set a time delta of 1 hour and 10 minutes.
time_difference = time_difference or timedelta(hours=1, minutes=10)
download = remote_modif_time > local_modif_time + time_difference
if download:
current_app.logger.info(
"The remote ontology '{0}' is more recent "
"than the local ontology.".format(onto_url)
)
if download:
if not _download_ontology(onto_url, local_file):
current_app.logger.warning(
"Error downloading the ontology from: {0}".format(onto_url)
)
return local_file | 0.000609 |
def Deserialize(self, reader):
"""
Deserialize full object.
Args:
reader (neo.IO.BinaryReader):
"""
self.__hash = None
self.DeserializeUnsigned(reader)
byt = reader.ReadByte()
if int(byt) != 1:
raise Exception('Incorrect format')
witness = Witness()
witness.Deserialize(reader)
self.Script = witness | 0.004843 |
def closed(self) -> bool:
'''Return whether the connection is closed.'''
return not self.writer or not self.reader or self.reader.at_eof() | 0.012987 |
def flatten(lol):
"""Flatten a list of lists to a list.
:param lol: A list of lists in arbitrary depth.
:type lol: list of list.
:returns: flat list of elements.
"""
new_list = []
for element in lol:
if element is None:
continue
elif not isinstance(element, list) and not isinstance(element, tuple):
new_list.append(element)
elif len(element) > 0:
new_list.extend(flatten(element))
return new_list | 0.002037 |
def fill_series(x, year):
"""Returns the value of a timeseries (indexed over years) for a year
by linear interpolation.
Parameters
----------
x: pandas.Series
a timeseries to be interpolated
year: int
year of interpolation
"""
x = x.dropna()
if year in x.index and not np.isnan(x[year]):
return x[year]
else:
prev = [i for i in x.index if i < year]
nxt = [i for i in x.index if i > year]
if prev and nxt:
p = max(prev)
n = min(nxt)
return ((n - year) * x[p] + (year - p) * x[n]) / (n - p)
else:
return np.nan | 0.001531 |
def change_settings(self, bio=None, public_images=None,
messaging_enabled=None, album_privacy=None,
accepted_gallery_terms=None):
"""
Update the settings for the user.
:param bio: A basic description filled out by the user, is displayed in
the gallery profile page.
:param public_images: Set the default privacy setting of the users
images. If True images are public, if False private.
:param messaging_enabled: Set to True to enable messaging.
:param album_privacy: The default privacy level of albums created by
the user. Can be public, hidden or secret.
:param accepted_gallery_terms: The user agreement to Imgur Gallery
terms. Necessary before the user can submit to the gallery.
"""
# NOTE: album_privacy should maybe be renamed to default_privacy
# NOTE: public_images is a boolean, despite the documentation saying it
# is a string.
url = self._imgur._base_url + "/3/account/{0}/settings".format(self.name)
resp = self._imgur._send_request(url, needs_auth=True, params=locals(),
method='POST')
return resp | 0.003962 |
def do_plugins(self, args, arguments):
"""
::
Usage:
plugins add COMMAND [--dryrun] [-q]
plugins delete COMMAND [--dryrun] [-q]
plugins list [--output=FORMAT] [-q]
plugins activate
Arguments:
FORMAT format is either yaml, json, or list [default=yaml]
Options:
-q stands for quiet and suppresses additional messages
Description:
Please note that adding and deleting plugins requires restarting
cm to activate them
plugins list
lists the plugins in the yaml file
plugins add COMMAND
plugins delete COMMAND
cmd3 contains a ~/.cloudmesh/cmd3.yaml file.
This command will add/delete a plugin for a given command
that has been generated with cm-generate-command
To the yaml this command will add to the modules
- cloudmesh_COMMAND.plugins
where COMMAND is the name of the command. In case we add
a command and the command is out commented the comment
will be removed so the command is enabled.
plugins activate
NOT YET SUPPORTED.
Example:
plugins add pbs
"""
# pprint(arguments)
quiet = arguments["-q"]
if arguments["activate"]:
Console.error("this method is not yet supported.")
self.activate()
elif arguments["list"]:
if arguments["--output"] == "yaml":
plugins_object = setup_management(quiet=quiet)
print(plugins_object.config.yaml())
elif arguments["--output"] == "json":
plugins_object = setup_management(quiet=quiet)
print(plugins_object.config)
elif arguments["--output"] == "list":
plugins_object = setup_management(quiet=quiet)
print(plugins_object.config["cmd3"]["modules"])
if arguments["--output"] is None:
plugins_object = setup_management(quiet=quiet)
print(plugins_object)
elif arguments["add"]:
plugins_object = setup_management()
plugins_object.add(arguments["COMMAND"],
dryrun=arguments["--dryrun"])
elif arguments["delete"]:
plugins_object = setup_management()
plugins_object.delete(arguments["COMMAND"],
dryrun=arguments["--dryrun"])
else:
Console.error("unknown option.") | 0.001787 |
def get_assets_by_genus_type(self, asset_genus_type):
"""Gets an ``AssetList`` corresponding to the given asset genus ``Type`` which does not include assets of types derived from the specified ``Type``.
In plenary mode, the returned list contains all known assets or
an error results. Otherwise, the returned list may contain only
those assets that are accessible through this session.
arg: asset_genus_type (osid.type.Type): an asset genus type
return: (osid.repository.AssetList) - the returned ``Asset
list``
raise: NullArgument - ``asset_genus_type`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceLookupSession.get_resources_by_genus_type
# NOTE: This implementation currently ignores plenary view
collection = JSONClientValidated('repository',
collection='Asset',
runtime=self._runtime)
result = collection.find(
dict({'genusTypeId': str(asset_genus_type)},
**self._view_filter())).sort('_id', DESCENDING)
return objects.AssetList(result, runtime=self._runtime, proxy=self._proxy) | 0.002772 |
def publish(self, load):
'''
Publish "load" to minions. This send the load to the publisher daemon
process with does the actual sending to minions.
:param dict load: A load to be sent across the wire to minions
'''
payload = {'enc': 'aes'}
crypticle = salt.crypt.Crypticle(self.opts, salt.master.SMaster.secrets['aes']['secret'].value)
payload['load'] = crypticle.dumps(load)
if self.opts['sign_pub_messages']:
master_pem_path = os.path.join(self.opts['pki_dir'], 'master.pem')
log.debug("Signing data packet")
payload['sig'] = salt.crypt.sign_message(master_pem_path, payload['load'])
int_payload = {'payload': self.serial.dumps(payload)}
# add some targeting stuff for lists only (for now)
if load['tgt_type'] == 'list':
int_payload['topic_lst'] = load['tgt']
# If zmq_filtering is enabled, target matching has to happen master side
match_targets = ["pcre", "glob", "list"]
if self.opts['zmq_filtering'] and load['tgt_type'] in match_targets:
# Fetch a list of minions that match
_res = self.ckminions.check_minions(load['tgt'],
tgt_type=load['tgt_type'])
match_ids = _res['minions']
log.debug("Publish Side Match: %s", match_ids)
# Send list of miions thru so zmq can target them
int_payload['topic_lst'] = match_ids
payload = self.serial.dumps(int_payload)
log.debug(
'Sending payload to publish daemon. jid=%s size=%d',
load.get('jid', None), len(payload),
)
if not self.pub_sock:
self.pub_connect()
self.pub_sock.send(payload)
log.debug('Sent payload to publish daemon.') | 0.002698 |
def _mutate(self):
'''
Mutate enclosed fields
'''
for i in range(self._field_idx, len(self._fields)):
self._field_idx = i
if self._current_field().mutate():
return True
self._current_field().reset()
return False | 0.006601 |
def _StartSshd(self):
"""Initialize the SSH daemon."""
# Exit as early as possible.
# Instance setup systemd scripts block sshd from starting.
if os.path.exists(constants.LOCALBASE + '/bin/systemctl'):
return
elif (os.path.exists('/etc/init.d/ssh')
or os.path.exists('/etc/init/ssh.conf')):
subprocess.call(['service', 'ssh', 'start'])
subprocess.call(['service', 'ssh', 'reload'])
elif (os.path.exists('/etc/init.d/sshd')
or os.path.exists('/etc/init/sshd.conf')):
subprocess.call(['service', 'sshd', 'start'])
subprocess.call(['service', 'sshd', 'reload']) | 0.009494 |
def _resolve_input(variable, variable_name, config_key, config):
"""
Resolve input entered as option values with config values
If option values are provided (passed in as `variable`), then they are
returned unchanged. If `variable` is None, then we first look for a config
value to use.
If no config value is found, then raise an error.
Parameters
----------
variable: string or numeric
value passed in as input by the user
variable_name: string
name of the variable, for clarity in the error message
config_key: string
key in the config whose value could be used to fill in the variable
config: ConfigParser
contains keys/values in .apparatecfg
"""
if variable is None:
try:
variable = config.get(PROFILE, config_key)
except NoOptionError:
raise ValueError((
'no {} found - either provide a command line argument or '
'set up a default by running `apparate configure`'
).format(variable_name))
return variable | 0.000917 |
def get(self):
'''xxxxx.xxxxx.campaign.areaoptions.get
===================================
取得推广计划的可设置投放地域列表'''
request = TOPRequest('xxxxx.xxxxx.campaign.areaoptions.get')
self.create(self.execute(request), fields=['success','result'], models={'result':AreaOption})
return self.result | 0.01506 |
def _parseRequestValues(self, request, command):
"""Parses all the values in the request that are in a form specific
to the JSON AMP dialect.
"""
for key, ampType in command.arguments:
ampClass = ampType.__class__
if ampClass is exposed.ExposedResponderLocator:
request[key] = self._remote
continue
decoder = _decoders.get(ampClass)
if decoder is not None:
value = request.get(key)
request[key] = decoder(value, self) | 0.003546 |
def set_value(self, value):
"""
Sets the user value of the symbol.
Equal in effect to assigning the value to the symbol within a .config
file. For bool and tristate symbols, use the 'assignable' attribute to
check which values can currently be assigned. Setting values outside
'assignable' will cause Symbol.user_value to differ from
Symbol.str/tri_value (be truncated down or up).
Setting a choice symbol to 2 (y) sets Choice.user_selection to the
choice symbol in addition to setting Symbol.user_value.
Choice.user_selection is considered when the choice is in y mode (the
"normal" mode).
Other symbols that depend (possibly indirectly) on this symbol are
automatically recalculated to reflect the assigned value.
value:
The user value to give to the symbol. For bool and tristate symbols,
n/m/y can be specified either as 0/1/2 (the usual format for tristate
values in Kconfiglib) or as one of the strings "n"/"m"/"y". For other
symbol types, pass a string.
Values that are invalid for the type (such as "foo" or 1 (m) for a
BOOL or "0x123" for an INT) are ignored and won't be stored in
Symbol.user_value. Kconfiglib will print a warning by default for
invalid assignments, and set_value() will return False.
Returns True if the value is valid for the type of the symbol, and
False otherwise. This only looks at the form of the value. For BOOL and
TRISTATE symbols, check the Symbol.assignable attribute to see what
values are currently in range and would actually be reflected in the
value of the symbol. For other symbol types, check whether the
visibility is non-n.
"""
# If the new user value matches the old, nothing changes, and we can
# save some work.
#
# This optimization is skipped for choice symbols: Setting a choice
# symbol's user value to y might change the state of the choice, so it
# wouldn't be safe (symbol user values always match the values set in a
# .config file or via set_value(), and are never implicitly updated).
if value == self.user_value and not self.choice:
self._was_set = True
return True
# Check if the value is valid for our type
if not (self.orig_type is BOOL and value in (2, 0, "y", "n") or
self.orig_type is TRISTATE and value in (2, 1, 0, "y", "m", "n") or
(value.__class__ is str and
(self.orig_type is STRING or
self.orig_type is INT and _is_base_n(value, 10) or
self.orig_type is HEX and _is_base_n(value, 16)
and int(value, 16) >= 0))):
# Display tristate values as n, m, y in the warning
self.kconfig._warn(
"the value {} is invalid for {}, which has type {} -- "
"assignment ignored"
.format(TRI_TO_STR[value] if value in (0, 1, 2) else
"'{}'".format(value),
_name_and_loc(self), TYPE_TO_STR[self.orig_type]))
return False
if self.orig_type in _BOOL_TRISTATE and value in ("y", "m", "n"):
value = STR_TO_TRI[value]
self.user_value = value
self._was_set = True
if self.choice and value == 2:
# Setting a choice symbol to y makes it the user selection of the
# choice. Like for symbol user values, the user selection is not
# guaranteed to match the actual selection of the choice, as
# dependencies come into play.
self.choice.user_selection = self
self.choice._was_set = True
self.choice._rec_invalidate()
else:
self._rec_invalidate_if_has_prompt()
return True | 0.00248 |
def read_private_key_file(pkey_file,
pkey_password=None,
key_type=None,
logger=None):
"""
Get SSH Public key from a private key file, given an optional password
Arguments:
pkey_file (str):
File containing a private key (RSA, DSS or ECDSA)
Keyword Arguments:
pkey_password (Optional[str]):
Password to decrypt the private key
logger (Optional[logging.Logger])
Return:
paramiko.Pkey
"""
ssh_pkey = None
for pkey_class in (key_type,) if key_type else (
paramiko.RSAKey,
paramiko.DSSKey,
paramiko.ECDSAKey,
paramiko.Ed25519Key
):
try:
ssh_pkey = pkey_class.from_private_key_file(
pkey_file,
password=pkey_password
)
if logger:
logger.debug('Private key file ({0}, {1}) successfully '
'loaded'.format(pkey_file, pkey_class))
break
except paramiko.PasswordRequiredException:
if logger:
logger.error('Password is required for key {0}'
.format(pkey_file))
break
except paramiko.SSHException:
if logger:
logger.debug('Private key file ({0}) could not be loaded '
'as type {1} or bad password'
.format(pkey_file, pkey_class))
return ssh_pkey | 0.002924 |
def _get_private_room(self, invitees: List[User]):
""" Create an anonymous, private room and invite peers """
return self._client.create_room(
None,
invitees=[user.user_id for user in invitees],
is_public=False,
) | 0.007326 |
def raw_value(self):
"""
Property to return the variable defined in ``django.conf.settings``.
Returns:
object: the variable defined in ``django.conf.settings``.
Raises:
AttributeError: if the variable is missing.
KeyError: if the item is missing from nested setting.
"""
if self.parent_setting is not None:
return self.parent_setting.raw_value[self.full_name]
else:
return getattr(settings, self.full_name) | 0.003817 |
def open(self, path, mode='r'):
"""Open stream, returning ``Stream`` object"""
entry = self.find(path)
if entry is None:
if mode == 'r':
raise ValueError("stream does not exists: %s" % path)
entry = self.create_dir_entry(path, 'stream', None)
else:
if not entry.isfile():
raise ValueError("can only open stream type DirEntry's")
if mode == 'w':
logging.debug("stream: %s exists, overwriting" % path)
self.free_fat_chain(entry.sector_id, entry.byte_size < self.min_stream_max_size)
entry.sector_id = None
entry.byte_size = 0
entry.class_id = None
elif mode == 'rw':
pass
s = Stream(self, entry, mode)
return s | 0.003538 |
def __look_up_geom(self, geomType):
""" compares the geometry object's type verse the JSOn
specs for geometry types
Inputs:
geomType - string - geometry object's type
Returns:
string JSON geometry type or None if not an allowed type
"""
if geomType.lower() == "point":
return "esriGeometryPoint"
elif geomType.lower() == "polyline":
return "esriGeometryPolyline"
elif geomType.lower() == "polygon":
return "esriGeometryPolygon"
elif geomType.lower() == "multipoint":
return "esriGeometryMultipoint"
else:
return None | 0.002865 |
def _create_api_call(self, method, _url, kwargs):
"""
This will create an APICall object and return it
:param method: str of the html method ['GET','POST','PUT','DELETE']
:param _url: str of the sub url of the api call (ex. g/device/list)
:param kwargs: dict of additional arguments
:return: ApiCall
"""
api_call = self.ApiCall(name='%s.%s' % (_url, method),
label='ID_%s' % self._count,
base_uri=self.base_uri,
timeout=self.timeout,
headers=self.headers,
cookies=self.cookies,
proxies=self.proxies,
accepted_return=self.accepted_return or 'json')
if self.max_history:
self._count += 1 # count of _calls
if len(self) > self.max_history:
self._calls.pop(0)
self._calls['ID_%s' % self._count] = api_call
return api_call | 0.001842 |
def load_state(self, state_id, delete=True):
"""
Load a state from storage identified by `state_id`.
:param state_id: The state reference of what to load
:return: The deserialized state
:rtype: State
"""
return self._store.load_state(f'{self._prefix}{state_id:08x}{self._suffix}', delete=delete) | 0.008523 |
def image(name,
data,
step=None,
max_outputs=3,
description=None):
"""Write an image summary.
Arguments:
name: A name for this summary. The summary tag used for TensorBoard will
be this name prefixed by any active name scopes.
data: A `Tensor` representing pixel data with shape `[k, h, w, c]`,
where `k` is the number of images, `h` and `w` are the height and
width of the images, and `c` is the number of channels, which
should be 1, 2, 3, or 4 (grayscale, grayscale with alpha, RGB, RGBA).
Any of the dimensions may be statically unknown (i.e., `None`).
Floating point data will be clipped to the range [0,1).
step: Explicit `int64`-castable monotonic step value for this summary. If
omitted, this defaults to `tf.summary.experimental.get_step()`, which must
not be None.
max_outputs: Optional `int` or rank-0 integer `Tensor`. At most this
many images will be emitted at each step. When more than
`max_outputs` many images are provided, the first `max_outputs` many
images will be used and the rest silently discarded.
description: Optional long-form description for this summary, as a
constant `str`. Markdown is supported. Defaults to empty.
Returns:
True on success, or false if no summary was emitted because no default
summary writer was available.
Raises:
ValueError: if a default writer exists, but no step was provided and
`tf.summary.experimental.get_step()` is None.
"""
summary_metadata = metadata.create_summary_metadata(
display_name=None, description=description)
# TODO(https://github.com/tensorflow/tensorboard/issues/2109): remove fallback
summary_scope = (
getattr(tf.summary.experimental, 'summary_scope', None) or
tf.summary.summary_scope)
with summary_scope(
name, 'image_summary', values=[data, max_outputs, step]) as (tag, _):
tf.debugging.assert_rank(data, 4)
tf.debugging.assert_non_negative(max_outputs)
images = tf.image.convert_image_dtype(data, tf.uint8, saturate=True)
limited_images = images[:max_outputs]
encoded_images = tf.map_fn(tf.image.encode_png, limited_images,
dtype=tf.string,
name='encode_each_image')
# Workaround for map_fn returning float dtype for an empty elems input.
encoded_images = tf.cond(
tf.shape(input=encoded_images)[0] > 0,
lambda: encoded_images, lambda: tf.constant([], tf.string))
image_shape = tf.shape(input=images)
dimensions = tf.stack([tf.as_string(image_shape[2], name='width'),
tf.as_string(image_shape[1], name='height')],
name='dimensions')
tensor = tf.concat([dimensions, encoded_images], axis=0)
return tf.summary.write(
tag=tag, tensor=tensor, step=step, metadata=summary_metadata) | 0.003066 |
def start(client, container, interactive=True, stdout=None, stderr=None, stdin=None, logs=None):
"""
Present the PTY of the container inside the current process.
This is just a wrapper for PseudoTerminal(client, container).start()
"""
operation = RunOperation(client, container, interactive=interactive, stdout=stdout,
stderr=stderr, stdin=stdin, logs=logs)
PseudoTerminal(client, operation).start() | 0.006593 |
def get_crl(self, expires=86400, encoding=None, algorithm=None, password=None, scope=None, **kwargs):
"""Generate a Certificate Revocation List (CRL).
The ``full_name`` and ``relative_name`` parameters describe how to retrieve the CRL and are used in
the `Issuing Distribution Point extension <https://tools.ietf.org/html/rfc5280.html#section-5.2.5>`_.
The former defaults to the ``crl_url`` field, pass ``None`` to not include the value. At most one of
the two may be set.
Parameters
----------
expires : int
The time in seconds when this CRL expires. Note that you should generate a new CRL until then.
encoding : :py:class:`~cg:cryptography.hazmat.primitives.serialization.Encoding` or str, optional
The encoding format for the CRL, passed to :py:func:`~django_ca.utils.parse_encoding`. The default
value is ``"PEM"``.
algorithm : :py:class:`~cg:cryptography.hazmat.primitives.hashes.Hash` or str, optional
The hash algorithm to use, passed to :py:func:`~django_ca.utils.parse_hash_algorithm`. The default
is to use :ref:`CA_DIGEST_ALGORITHM <settings-ca-digest-algorithm>`.
password : bytes, optional
Password used to load the private key of the certificate authority. If not passed, the private key
is assumed to be unencrypted.
scope : {None, 'ca', 'user', 'attribute'}, optional
What to include in the CRL: Use ``"ca"`` to include only revoked certificate authorities and
``"user"`` to include only certificates or ``None`` (the default) to include both.
``"attribute"`` is reserved for future use and always produces an empty CRL.
full_name : list of str or :py:class:`~cg:cryptography.x509.GeneralName`, optional
List of general names to use in the Issuing Distribution Point extension. If not passed, use
``crl_url`` if set.
relative_name : :py:class:`~cg:cryptography.x509.RelativeDistinguishedName`, optional
Used in Issuing Distribution Point extension, retrieve the CRL relative to the issuer.
Returns
-------
bytes
The CRL in the requested format.
"""
if scope is not None and scope not in ['ca', 'user', 'attribute']:
raise ValueError('Scope must be either None, "ca", "user" or "attribute"')
encoding = parse_encoding(encoding)
now = now_builder = timezone.now()
algorithm = parse_hash_algorithm(algorithm)
if timezone.is_aware(now_builder):
now_builder = timezone.make_naive(now, pytz.utc)
builder = x509.CertificateRevocationListBuilder()
builder = builder.issuer_name(self.x509.subject)
builder = builder.last_update(now_builder)
builder = builder.next_update(now_builder + timedelta(seconds=expires))
if 'full_name' in kwargs:
full_name = kwargs['full_name']
full_name = [parse_general_name(n) for n in full_name]
elif self.crl_url:
crl_url = [url.strip() for url in self.crl_url.split()]
full_name = [x509.UniformResourceIdentifier(c) for c in crl_url]
else:
full_name = None
# Keyword arguments for the IssuingDistributionPoint extension
idp_kwargs = {
'only_contains_ca_certs': False,
'only_contains_user_certs': False,
'indirect_crl': False,
'only_contains_attribute_certs': False,
'only_some_reasons': None,
'full_name': full_name,
'relative_name': kwargs.get('relative_name'),
}
ca_qs = self.children.filter(expires__gt=now).revoked()
cert_qs = self.certificate_set.filter(expires__gt=now).revoked()
if scope == 'ca':
certs = ca_qs
idp_kwargs['only_contains_ca_certs'] = True
elif scope == 'user':
certs = cert_qs
idp_kwargs['only_contains_user_certs'] = True
elif scope == 'attribute':
# sorry, nothing we support right now
certs = []
idp_kwargs['only_contains_attribute_certs'] = True
else:
certs = itertools.chain(ca_qs, cert_qs)
for cert in certs:
builder = builder.add_revoked_certificate(cert.get_revocation())
if ca_settings.CRYPTOGRAPHY_HAS_IDP: # pragma: no branch, pragma: only cryptography>=2.5
builder = builder.add_extension(x509.IssuingDistributionPoint(**idp_kwargs), critical=True)
# TODO: Add CRLNumber extension
# https://cryptography.io/en/latest/x509/reference/#cryptography.x509.CRLNumber
crl = builder.sign(private_key=self.key(password), algorithm=algorithm, backend=default_backend())
return crl.public_bytes(encoding) | 0.004893 |
def format_messages(self, messages):
""" Formats several messages with :class:Look, encodes them
with :func:vital.tools.encoding.stdout_encode """
mess = ""
for message in self.message:
if self.pretty:
mess = "{}{}".format(mess, self.format_message(message))
else:
mess += str(message)
if self.include_time:
return ": {} : {}".format(
datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'), mess)
return stdout_encode(mess) | 0.003584 |
def dist_iter(self, g_nums, ats_1, ats_2, invalid_error=False):
""" Iterator over selected interatomic distances.
Distances are in Bohrs as with :meth:`dist_single`.
See `above <toc-generators_>`_ for more information on
calling options.
Parameters
----------
g_nums
|int| or length-R iterable |int| or |None| --
Index/indices of the desired geometry/geometries
ats_1
|int| or iterable |int| or |None| --
Index/indices of the first atom(s)
ats_2
|int| or iterable |int| or |None| --
Index/indices of the second atom(s)
invalid_error
|bool|, optional --
If |False| (the default), |None| values are returned for
results corresponding to invalid indices. If |True|,
exceptions are raised per normal.
Yields
------
dist
|npfloat_| --
Interatomic distance in Bohrs between each atom pair of
`ats_1` and `ats_2` from the corresponding geometries
of `g_nums`.
Raises
------
~exceptions.IndexError
If an invalid (out-of-range) `g_num` or `at_#` is provided.
~exceptions.ValueError
If all iterable objects are not the same length.
"""
# Imports
import numpy as np
from .utils import pack_tups
# Print the function inputs if debug mode is on
if _DEBUG: # pragma: no cover
print("g_nums = {0}".format(g_nums))
print("ats_1 = {0}".format(ats_1))
print("ats_2 = {0}".format(ats_2))
## end if
# Perform the None substitution
arglist = self._none_subst(g_nums, ats_1, ats_2)
# Expand/pack the tuples from the inputs
tups = pack_tups(*arglist)
# Dump the results if debug mode is on
if _DEBUG: # pragma: no cover
print(tups)
## end if
# Construct the generator using the packed tuples. If 'None' expansion
# was used, return None for any invalid indices instead of raising
# an exception.
for tup in tups:
yield self._iter_return(tup, self.dist_single, invalid_error) | 0.001737 |
def init_sqlite_db(path, initTime=False):
"""
Initialize SQLite Database
Args:
path(str): Path to database (Ex. '/home/username/my_sqlite.db').
initTime(Optional[bool]): If True, it will print the amount of time to generate database.
Example::
from gsshapy.lib.db_tools import init_sqlite_db, create_session
sqlite_db_path = '/home/username/my_sqlite.db'
init_postgresql_db(path=sqlite_db_path)
sqlalchemy_url = init_sqlite_db(path=sqlite_db_path)
db_work_sessionmaker = get_sessionmaker(sqlalchemy_url)
db_work_session = db_work_sessionmaker()
##DO WORK
db_work_session.close()
"""
sqlite_base_url = 'sqlite:///'
sqlalchemy_url = sqlite_base_url + path
init_time = init_db(sqlalchemy_url)
if initTime:
print('TIME: {0} seconds'.format(init_time))
return sqlalchemy_url | 0.014199 |
def elliot_function( signal, derivative=False ):
""" A fast approximation of sigmoid """
s = 1 # steepness
abs_signal = (1 + np.abs(signal * s))
if derivative:
return 0.5 * s / abs_signal**2
else:
# Return the activation signal
return 0.5*(signal * s) / abs_signal + 0.5 | 0.015674 |
def nvmlDeviceSetComputeMode(handle, mode):
r"""
/**
* Set the compute mode for the device.
*
* For all products.
* Requires root/admin permissions.
*
* The compute mode determines whether a GPU can be used for compute operations and whether it can
* be shared across contexts.
*
* This operation takes effect immediately. Under Linux it is not persistent across reboots and
* always resets to "Default". Under windows it is persistent.
*
* Under windows compute mode may only be set to DEFAULT when running in WDDM
*
* See \ref nvmlComputeMode_t for details on available compute modes.
*
* @param device The identifier of the target device
* @param mode The target compute mode
*
* @return
* - \ref NVML_SUCCESS if the compute mode was set
* - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized
* - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a mode is invalid
* - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature
* - \ref NVML_ERROR_NO_PERMISSION if the user doesn't have permission to perform this operation
* - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible
* - \ref NVML_ERROR_UNKNOWN on any unexpected error
*
* @see nvmlDeviceGetComputeMode()
*/
nvmlReturn_t DECLDIR nvmlDeviceSetComputeMode
"""
fn = _nvmlGetFunctionPointer("nvmlDeviceSetComputeMode")
ret = fn(handle, _nvmlComputeMode_t(mode))
_nvmlCheckReturn(ret)
return None | 0.005552 |
def save_profile_as(self):
"""Save the minimum needs under a new profile name.
"""
# noinspection PyCallByClass,PyTypeChecker
file_name_dialog = QFileDialog(self)
file_name_dialog.setAcceptMode(QFileDialog.AcceptSave)
file_name_dialog.setNameFilter(self.tr('JSON files (*.json *.JSON)'))
file_name_dialog.setDefaultSuffix('json')
dir = os.path.join(QgsApplication.qgisSettingsDirPath(),
'inasafe', 'minimum_needs')
file_name_dialog.setDirectory(expanduser(dir))
if file_name_dialog.exec_():
file_name = file_name_dialog.selectedFiles()[0]
else:
return
file_name = basename(file_name)
file_name = file_name.replace('.json', '')
minimum_needs = {'resources': []}
self.mark_current_profile_as_saved()
for index in range(self.resources_list.count()):
item = self.resources_list.item(index)
minimum_needs['resources'].append(item.resource_full)
minimum_needs['provenance'] = self.provenance.text()
minimum_needs['profile'] = file_name
self.minimum_needs.update_minimum_needs(minimum_needs)
self.minimum_needs.save()
self.minimum_needs.save_profile(file_name)
if self.profile_combo.findText(file_name) == -1:
self.profile_combo.addItem(file_name)
self.profile_combo.setCurrentIndex(
self.profile_combo.findText(file_name)) | 0.001334 |
def print_options(self):
""" print description of the component options
"""
summary = []
for opt_name, opt in self.options.items():
if opt.hidden:
continue
summary.append(opt.summary())
print("\n".join(summary)) | 0.006873 |
def visitInlineShapeOrRef(self, ctx: ShExDocParser.InlineShapeOrRefContext):
""" inlineShapeOrRef: inlineShapeDefinition | shapeRef """
if ctx.inlineShapeDefinition():
from pyshexc.parser_impl.shex_shape_definition_parser import ShexShapeDefinitionParser
shdef_parser = ShexShapeDefinitionParser(self.context, self.label)
shdef_parser.visitChildren(ctx)
self.expr = shdef_parser.shape
else:
self.expr = self.context.shapeRef_to_iriref(ctx.shapeRef()) | 0.005618 |
def _convert_operator(op_name, attrs, identity_list=None, convert_map=None):
"""Convert from onnx operator to mxnet operator.
The converter must specify conversions explicitly for incompatible name, and
apply handlers to operator attributes.
Parameters
----------
op_name : str
Operator name, such as Convolution, FullyConnected
attrs : dict
Dict of operator attributes
identity_list : list
List of operators that don't require conversion
convert_map : dict
Dict of name : callable, where name is the op's name that
require conversion to mxnet, callable are functions which
take attrs and return (new_op_name, new_attrs)
Returns
-------
(op_name, attrs)
Converted (op_name, attrs) for mxnet.
"""
identity_list = identity_list if identity_list else _identity_list
convert_map = convert_map if convert_map else _convert_map
if op_name in identity_list:
pass
elif op_name in convert_map:
op_name, attrs = convert_map[op_name](attrs)
else:
raise NotImplementedError("Operator {} not implemented.".format(op_name))
op = getattr(mx.sym, op_name, None)
if not op:
raise RuntimeError("Unable to map op_name {} to sym".format(op_name))
return op, attrs | 0.002278 |
def finalize_block(self, block: BaseBlock) -> BaseBlock:
"""
Perform any finalization steps like awarding the block mining reward,
and persisting the final state root.
"""
if block.number > 0:
self._assign_block_rewards(block)
# We need to call `persist` here since the state db batches
# all writes until we tell it to write to the underlying db
self.state.persist()
return block.copy(header=block.header.copy(state_root=self.state.state_root)) | 0.00565 |
def get(self, request, *args, **kwargs):
"""
Main entry. This View only responds to GET requests.
"""
context = self.chart_instance.chartjs_configuration(*args, **kwargs)
return self.render_json_response(context) | 0.007937 |
def add_nodes_from(self, nodes, weights=None):
"""
Add multiple nodes to the Graph.
**The behviour of adding weights is different than in networkx.
Parameters
----------
nodes: iterable container
A container of nodes (list, dict, set, or any hashable python
object).
weights: list, tuple (default=None)
A container of weights (int, float). The weight value at index i
is associated with the variable at index i.
Examples
--------
>>> from pgmpy.base import DAG
>>> G = DAG()
>>> G.add_nodes_from(nodes=['A', 'B', 'C'])
>>> sorted(G.nodes())
['A', 'B', 'C']
Adding nodes with weights:
>>> G.add_nodes_from(nodes=['D', 'E'], weights=[0.3, 0.6])
>>> G.node['D']
{'weight': 0.3}
>>> G.node['E']
{'weight': 0.6}
>>> G.node['A']
{'weight': None}
"""
nodes = list(nodes)
if weights:
if len(nodes) != len(weights):
raise ValueError("The number of elements in nodes and weights"
"should be equal.")
for index in range(len(nodes)):
self.add_node(node=nodes[index], weight=weights[index])
else:
for node in nodes:
self.add_node(node=node) | 0.001422 |
def hvals(self, key, *, encoding=_NOTSET):
"""Get all the values in a hash."""
return self.execute(b'HVALS', key, encoding=encoding) | 0.013514 |
def assert_not_present(self, selector, testid=None, **kwargs):
"""Assert that the element is not present in the dom
Args:
selector (str): the selector used to find the element
test_id (str): the test_id or a str
Kwargs:
wait_until_not_present (bool)
Returns:
bool: True is the assertion succeed; False otherwise.
"""
self.info_log(
"Assert not present selector(%s) testid(%s)" %
(selector, testid)
)
wait_until_not_present = kwargs.get(
'wait_until_not_present',
BROME_CONFIG['proxy_driver']['wait_until_not_present_before_assert_not_present'] # noqa
)
self.debug_log(
"effective wait_until_not_present: %s" % wait_until_not_present
)
if wait_until_not_present:
ret = self.wait_until_not_present(selector, raise_exception=False)
else:
ret = not self.is_present(selector)
if ret:
if testid is not None:
self.create_test_result(testid, True)
return True
else:
if testid is not None:
self.create_test_result(testid, False)
return False | 0.001571 |
def do_dir(self, args, unknown):
"""List contents of current directory."""
# No arguments for this command
if unknown:
self.perror("dir does not take any positional arguments:", traceback_war=False)
self.do_help('dir')
self._last_result = cmd2.CommandResult('', 'Bad arguments')
return
# Get the contents as a list
contents = os.listdir(self.cwd)
fmt = '{} '
if args.long:
fmt = '{}\n'
for f in contents:
self.stdout.write(fmt.format(f))
self.stdout.write('\n')
self._last_result = cmd2.CommandResult(data=contents) | 0.004478 |
def decoded_output_boxes(self):
"""
Returns:
Nx#classx4
"""
ret = self._cascade_boxes[-1]
ret = tf.expand_dims(ret, 1) # class-agnostic
return tf.tile(ret, [1, self.num_classes, 1]) | 0.008163 |
def set_properties(self, pathobj, props, recursive):
"""
Set artifact properties
"""
url = '/'.join([pathobj.drive,
'api/storage',
str(pathobj.relative_to(pathobj.drive)).strip('/')])
params = {'properties': encode_properties(props)}
if not recursive:
params['recursive'] = '0'
text, code = self.rest_put(url,
params=params,
auth=pathobj.auth,
verify=pathobj.verify,
cert=pathobj.cert)
if code == 404 and "Unable to find item" in text:
raise OSError(2, "No such file or directory: '%s'" % url)
if code != 204:
raise RuntimeError(text) | 0.002398 |
def commit_transaction(self):
""" Commit the current transaction.
:returns: the bookmark returned from the server, if any
:raise: :class:`.TransactionError` if no transaction is currently open
"""
self._assert_open()
if not self._transaction:
raise TransactionError("No transaction to commit")
metadata = {}
try:
self._connection.commit(on_success=metadata.update)
finally:
self._disconnect(sync=True)
self._transaction = None
bookmark = metadata.get("bookmark")
self._bookmarks_in = tuple([bookmark])
self._bookmark_out = bookmark
return bookmark | 0.002857 |
def defUtilityFuncs(self):
'''
Defines CRRA utility function for this period (and its derivatives,
and their inverses), saving them as attributes of self for other methods
to use.
Parameters
----------
none
Returns
-------
none
'''
ConsPerfForesightSolver.defUtilityFuncs(self)
self.uPinv = lambda u : utilityP_inv(u,gam=self.CRRA)
self.uPinvP = lambda u : utilityP_invP(u,gam=self.CRRA)
self.uinvP = lambda u : utility_invP(u,gam=self.CRRA)
if self.vFuncBool:
self.uinv = lambda u : utility_inv(u,gam=self.CRRA) | 0.022523 |
def get_imports(fname):
""" get a list of imports from a Python program """
txt = ''
with open(fname, 'r') as f:
for line in f:
if line[0:6] == 'import':
txt += '<PRE>' + strip_text_after_string(line[7:], ' as ') + '</PRE>\n'
return txt + '<BR>' | 0.006734 |
def savefig(self, output_path, **kwargs):
"""Save figure during generation.
This method is used to save a completed figure during the main function run.
It represents a call to ``matplotlib.pyplot.fig.savefig``.
# TODO: Switch to kwargs for matplotlib.pyplot.savefig
Args:
output_path (str): Relative path to the WORKING_DIRECTORY to save the figure.
Keyword Arguments:
dpi (int, optional): Dots per inch of figure. Default is 200.
Note: Other kwargs are available. See:
https://matplotlib.org/api/_as_gen/matplotlib.pyplot.savefig.html
"""
self.figure.save_figure = True
self.figure.output_path = output_path
self.figure.savefig_kwargs = kwargs
return | 0.005019 |
def formvalue (form, key):
"""Get value with given key from WSGI form."""
field = form.get(key)
if isinstance(field, list):
field = field[0]
return field | 0.011299 |