text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def qps(self, callback=None, errback=None):
"""
Return the current QPS for this record
:rtype: dict
:return: QPS information
"""
if not self.data:
raise RecordException('record not loaded')
stats = Stats(self.parentZone.config)
return stats.qps(zone=self.parentZone.zone,
domain=self.domain,
type=self.type,
callback=callback,
errback=errback) | 0.003846 |
def calls(ctx, obj, limit):
""" List call/short positions of an account or an asset
"""
if obj.upper() == obj:
# Asset
from bitshares.asset import Asset
asset = Asset(obj, full=True)
calls = asset.get_call_orders(limit)
t = [["acount", "debt", "collateral", "call price", "ratio"]]
for call in calls:
t.append(
[
str(call["account"]["name"]),
str(call["debt"]),
str(call["collateral"]),
str(call["call_price"]),
"%.2f" % (call["ratio"]),
]
)
print_table(t)
else:
# Account
from bitshares.dex import Dex
dex = Dex(bitshares_instance=ctx.bitshares)
calls = dex.list_debt_positions(account=obj)
t = [["debt", "collateral", "call price", "ratio"]]
for symbol in calls:
t.append(
[
str(calls[symbol]["debt"]),
str(calls[symbol]["collateral"]),
str(calls[symbol]["call_price"]),
"%.2f" % (calls[symbol]["ratio"]),
]
)
print_table(t) | 0.000801 |
def fill(self, postf_un_ops: str):
"""
Insert:
* math styles
* other styles
* unary prefix operators without brackets
* defaults
"""
for op, dic in self.ops.items():
if 'postf' not in dic:
dic['postf'] = self.postf
self.ops = OrderedDict(
self.styles.spec(postf_un_ops) +
self.other_styles.spec(postf_un_ops) +
self.pref_un_greedy.spec() +
list(self.ops.items())
)
for op, dic in self.ops.items():
dic['postf'] = re.compile(dic['postf'])
self.regex = _search_regex(self.ops, self.regex_pat) | 0.002933 |
def getThesaurus(self, word):
"""response = requests.get("http://words.bighugelabs.com/api/2/%s/%s/json"
% (self.tkey, word)).json()
return response"""
response = requests.get(
"http://api.wordnik.com:80/v4/word.json/%s/relatedWords?"
"useCanonical=false&relationshipTypes=synonym&limitPer"
"RelationshipType=15&api_key=%s" % (word, key)).json()
try:
return response[0]
except IndexError:
pass | 0.005758 |
def delete(self, symbol):
"""
Deletes a Symbol.
Parameters
----------
symbol : str or Symbol
"""
if isinstance(symbol, (str, unicode)):
sym = self.get(symbol)
elif isinstance(symbol, Symbol):
sym = symbol
else:
raise Exception("Invalid symbol {}".format((repr(symbol))))
# Has to handle the case where the table would exist already
# and where it wouldn't.
try:
sym.datatable = Table(sym.name, Base.metadata, autoload=True)
sym.datatable.drop(self.eng, checkfirst=True)
except NoSuchTableError:
print "No worries, {} never existed to begin with.".format(sym.name)
self.ses.delete(sym)
self.ses.commit() | 0.004825 |
def forward(self, X):
"""Forward function.
:param X: The input (batch) of the model contains word sequences for lstm
and features.
:type X: For word sequences: a list of torch.Tensor pair (word sequence
and word mask) of shape (batch_size, sequence_length).
For features: torch.Tensor of shape (batch_size, feature_size).
:return: The output of LSTM layer.
:rtype: torch.Tensor of shape (batch_size, num_classes)
"""
s = X[:-1]
f = X[-1]
batch_size = len(f)
outputs = self._cuda(torch.Tensor([]))
# Calculate textual features from LSTMs
for i in range(len(s)):
state_word = self.lstms[0].init_hidden(batch_size)
output = self.lstms[0].forward(s[i][0], s[i][1], state_word)
outputs = torch.cat((outputs, output), 1)
# Concatenate textual features with multi-modal features
outputs = torch.cat((outputs, f), 1)
return self.linear(outputs) | 0.002899 |
def addfile(project, filename, user, postdata, inputsource=None,returntype='xml'): #pylint: disable=too-many-return-statements
"""Add a new input file, this invokes the actual uploader"""
def errorresponse(msg, code=403):
if returntype == 'json':
return withheaders(flask.make_response("{success: false, error: '" + msg + "'}"),'application/json',{'allow_origin': settings.ALLOW_ORIGIN})
else:
return withheaders(flask.make_response(msg,403),headers={'allow_origin': settings.ALLOW_ORIGIN}) #(it will have to be explicitly deleted by the client first)
inputtemplate_id = flask.request.headers.get('Inputtemplate','')
inputtemplate = None
metadata = None
printdebug('Handling addfile, postdata contains fields ' + ",".join(postdata.keys()) )
if 'inputtemplate' in postdata:
inputtemplate_id = postdata['inputtemplate']
if inputtemplate_id:
#An input template must always be provided
for profile in settings.PROFILES:
for t in profile.input:
if t.id == inputtemplate_id:
inputtemplate = t
if not inputtemplate:
#Inputtemplate not found, send 404
printlog("Specified inputtemplate (" + inputtemplate_id + ") not found!")
return withheaders(flask.make_response("Specified inputtemplate (" + inputtemplate_id + ") not found!",404),headers={'allow_origin': settings.ALLOW_ORIGIN})
printdebug('Inputtemplate explicitly provided: ' + inputtemplate.id )
if not inputtemplate:
#See if an inputtemplate is explicitly specified in the filename
printdebug('Attempting to determine input template from filename ' + filename )
if '/' in filename.strip('/'):
raw = filename.split('/')
inputtemplate = None
for profile in settings.PROFILES:
for it in profile.input:
if it.id == raw[0]:
inputtemplate = it
break
if inputtemplate:
filename = raw[1]
if not inputtemplate:
#Check if the specified filename can be uniquely associated with an inputtemplate
for profile in settings.PROFILES:
for t in profile.input:
if t.filename == filename:
if inputtemplate:
#we found another one, not unique!! reset and break
inputtemplate = None
break
else:
#good, we found one, don't break cause we want to make sure there is only one
inputtemplate = t
if not inputtemplate:
printlog("No inputtemplate specified and filename " + filename + " does not uniquely match with any inputtemplate!")
return errorresponse("No inputtemplate specified nor auto-detected for filename " + filename + "!",404)
#See if other previously uploaded input files use this inputtemplate
if inputtemplate.unique:
nextseq = 0 #unique
else:
nextseq = 1 #will hold the next sequence number for this inputtemplate (in multi-mode only)
for seq, inputfile in Project.inputindexbytemplate(project, user, inputtemplate): #pylint: disable=unused-variable
if inputtemplate.unique:
return errorresponse("You have already submitted a file of this type, you can only submit one. Delete it first. (Inputtemplate=" + inputtemplate.id + ", unique=True)")
else:
if seq >= nextseq:
nextseq = seq + 1 #next available sequence number
if not filename: #Actually, I don't think this can occur at this stage, but we'll leave it in to be sure (yes it can, when the entry shortcut is used!)
if inputtemplate.filename:
filename = inputtemplate.filename
elif inputtemplate.extension:
filename = str(nextseq) +'-' + str("%034x" % random.getrandbits(128)) + '.' + inputtemplate.extension
else:
filename = str(nextseq) +'-' + str("%034x" % random.getrandbits(128))
#Make sure filename matches (only if not an archive)
if inputtemplate.acceptarchive and (filename[-7:].lower() == '.tar.gz' or filename[-8:].lower() == '.tar.bz2' or filename[-4:].lower() == '.zip'):
pass
else:
if inputtemplate.filename:
if filename != inputtemplate.filename:
filename = inputtemplate.filename
#return flask.make_response("Specified filename must the filename dictated by the inputtemplate, which is " + inputtemplate.filename)
#TODO LATER: add support for calling this with an actual number instead of #
if inputtemplate.extension:
if filename[-len(inputtemplate.extension) - 1:].lower() == '.' + inputtemplate.extension.lower():
#good, extension matches (case independent). Let's just make sure the case is as defined exactly by the inputtemplate
filename = filename[:-len(inputtemplate.extension) - 1] + '.' + inputtemplate.extension
else:
filename = filename + '.' + inputtemplate.extension
#return flask.make_response("Specified filename does not have the extension dictated by the inputtemplate ("+inputtemplate.extension+")") #403
if inputtemplate.onlyinputsource and (not 'inputsource' in postdata or not postdata['inputsource']):
return errorresponse("Adding files for this inputtemplate must proceed through inputsource")
if 'converter' in postdata and postdata['converter'] and not postdata['converter'] in [ x.id for x in inputtemplate.converters]:
return errorresponse("Invalid converter specified: " + postdata['converter'])
#Make sure the filename is secure
validfilename = True
DISALLOWED = ('/','&','|','<','>',';','"',"'","`","{","}","\n","\r","\b","\t")
for c in filename:
if c in DISALLOWED:
validfilename = False
break
if not validfilename:
return errorresponse("Filename contains invalid symbols! Do not use /,&,|,<,>,',`,\",{,} or ;")
#Create the project (no effect if already exists)
response = Project.create(project, user)
if response is not None:
return response
printdebug("(Obtaining filename for uploaded file)")
head = "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n"
head += "<clamupload>\n"
if 'file' in flask.request.files:
printlog("Adding client-side file " + flask.request.files['file'].filename + " to input files")
sourcefile = flask.request.files['file'].filename
elif 'url' in postdata and postdata['url']:
#Download from URL
printlog("Adding web-based URL " + postdata['url'] + " to input files")
sourcefile = os.path.basename(postdata['url'])
elif 'contents' in postdata and postdata['contents']:
#In message
printlog("Adding file " + filename + " with explicitly provided contents to input files")
sourcefile = "editor"
elif 'inputsource' in postdata and postdata['inputsource']:
printlog("Adding file " + filename + " from preinstalled data to input files")
if not inputsource:
inputsource = None
for s in inputtemplate.inputsources:
if s.id.lower() == postdata['inputsource'].lower():
inputsource = s
if not inputsource:
return errorresponse("Specified inputsource '" + postdata['inputsource'] + "' does not exist for inputtemplate '"+inputtemplate.id+"'")
sourcefile = os.path.basename(inputsource.path)
elif 'accesstoken' in postdata and 'filename' in postdata:
#XHR POST, data in body
printlog("Adding client-side file " + filename + " to input files. Uploaded using XHR POST")
sourcefile = postdata['filename']
else:
return errorresponse("No file, url or contents specified!")
#============================ Generate metadata ========================================
printdebug('(Generating and validating metadata)')
if 'metafile' in flask.request.files: #and (not isinstance(postdata['metafile'], dict) or len(postdata['metafile']) > 0)):
#an explicit metadata file was provided, upload it:
printlog("Metadata explicitly provided in file, uploading...")
#Upload file from client to server
metafile = Project.path(project, user) + 'input/.' + filename + '.METADATA'
flask.request.files['metafile'].save(metafile)
try:
with io.open(metafile,'r',encoding='utf-8') as f:
metadata = clam.common.data.CLAMMetaData.fromxml(f.read())
errors, parameters = inputtemplate.validate(metadata, user)
validmeta = True
except Exception as e: #pylint: disable=broad-except
printlog("Uploaded metadata is invalid! " + str(e))
metadata = None
errors = True
parameters = []
validmeta = False
elif 'metadata' in postdata and postdata['metadata']:
printlog("Metadata explicitly provided in message, uploading...")
try:
metadata = clam.common.data.CLAMMetaData.fromxml(postdata['metadata'])
errors, parameters = inputtemplate.validate(metadata, user)
validmeta = True
except: #pylint: disable=bare-except
printlog("Uploaded metadata is invalid!")
metadata = None
errors = True
parameters = []
validmeta = False
elif 'inputsource' in postdata and postdata['inputsource']:
printlog("Getting metadata from inputsource, uploading...")
if inputsource.metadata:
printlog("DEBUG: Validating metadata from inputsource")
metadata = inputsource.metadata
errors, parameters = inputtemplate.validate(metadata, user)
validmeta = True
else:
printlog("DEBUG: No metadata provided with inputsource, looking for metadata files..")
metafilename = os.path.dirname(inputsource.path)
if metafilename: metafilename += '/'
metafilename += '.' + os.path.basename(inputsource.path) + '.METADATA'
if os.path.exists(metafilename):
try:
metadata = clam.common.data.CLAMMetaData.fromxml(open(metafilename,'r').readlines())
errors, parameters = inputtemplate.validate(metadata, user)
validmeta = True
except: #pylint: disable=bare-except
printlog("Uploaded metadata is invalid!")
metadata = None
errors = True
parameters = []
validmeta = False
else:
return withheaders(flask.make_response("No metadata found nor specified for inputsource " + inputsource.id ,500),headers={'allow_origin': settings.ALLOW_ORIGIN})
else:
errors, parameters = inputtemplate.validate(postdata, user)
validmeta = True #will be checked later
# ----------- Check if archive are allowed -------------
archive = False
addedfiles = []
if not errors and inputtemplate.acceptarchive: #pylint: disable=too-many-nested-blocks
printdebug('(Archive test)')
# -------- Are we an archive? If so, determine what kind
archivetype = None
if 'file' in flask.request.files:
uploadname = sourcefile.lower()
archivetype = None
if uploadname[-4:] == '.zip':
archivetype = 'zip'
elif uploadname[-7:] == '.tar.gz':
archivetype = 'tar.gz'
elif uploadname[-4:] == '.tar':
archivetype = 'tar'
elif uploadname[-8:] == '.tar.bz2':
archivetype = 'tar.bz2'
xhrpost = False
elif 'accesstoken' in postdata and 'filename' in postdata:
xhrpost = True
if postdata['filename'][-7:].lower() == '.tar.gz':
uploadname = sourcefile.lower()
archivetype = 'tar.gz'
elif postdata['filename'][-8:].lower() == '.tar.bz2':
uploadname = sourcefile.lower()
archivetype = 'tar.bz2'
elif postdata['filename'][-4:].lower() == '.tar':
uploadname = sourcefile.lower()
archivetype = 'tar'
elif postdata['filename'][-4:].lower() == '.zip':
uploadname = sourcefile.lower()
archivetype = 'zip'
if archivetype:
# =============== upload archive ======================
#random name
archive = "%032x" % random.getrandbits(128) + '.' + archivetype
#Upload file from client to server
printdebug('(Archive transfer starting)')
if not xhrpost:
flask.request.files['file'].save(Project.path(project,user) + archive)
elif xhrpost:
with open(Project.path(project,user) + archive,'wb') as f:
while True:
chunk = flask.request.stream.read(16384)
if chunk:
f.write(chunk)
else:
break
printdebug('(Archive transfer completed)')
# =============== Extract archive ======================
#Determine extraction command
if archivetype == 'zip':
cmd = 'unzip -u'
elif archivetype == 'tar':
cmd = 'tar -xvf'
elif archivetype == 'tar.gz':
cmd = 'tar -xvzf'
elif archivetype == 'tar.bz2':
cmd = 'tar -xvjf'
else:
raise Exception("Invalid archive format: " + archivetype) #invalid archive, shouldn't happen
#invoke extractor
printlog("Extracting '" + archive + "'" )
try:
process = subprocess.Popen(cmd + " " + archive, cwd=Project.path(project,user), stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
except: #pylint: disable=bare-except
return withheaders(flask.make_response("Unable to extract archive",500),headers={'allow_origin': settings.ALLOW_ORIGIN})
out, _ = process.communicate() #waits for process to end
if sys.version < '3':
if isinstance(out, str):
out = unicode(out,'utf-8') #pylint: disable=undefined-variable
else:
if isinstance(out, bytes):
out = str(out,'utf-8')
#Read filename results
firstline = True
for line in out.split("\n"):
line = line.strip()
if line:
printdebug('(Extraction output: ' + line+')')
subfile = None
if archivetype[0:3] == 'tar':
subfile = line
elif archivetype == 'zip' and not firstline: #firstline contains archive name itself, skip it
colon = line.find(":")
if colon:
subfile = line[colon + 1:].strip()
if subfile and os.path.isfile(Project.path(project, user) + subfile):
subfile_newname = clam.common.data.resolveinputfilename(os.path.basename(subfile), parameters, inputtemplate, nextseq+len(addedfiles), project)
printdebug('(Extracted file ' + subfile + ', moving to input/' + subfile_newname+')')
os.rename(Project.path(project, user) + subfile, Project.path(project, user) + 'input/' + subfile_newname)
addedfiles.append(subfile_newname)
firstline = False
#all done, remove archive
os.unlink(Project.path(project, user) + archive)
if not archive:
addedfiles = [clam.common.data.resolveinputfilename(filename, parameters, inputtemplate, nextseq, project)]
fatalerror = None
jsonoutput = {'success': False if errors else True, 'isarchive': archive}
output = head
for filename in addedfiles: #pylint: disable=too-many-nested-blocks
output += "<upload source=\""+sourcefile +"\" filename=\""+filename+"\" inputtemplate=\"" + inputtemplate.id + "\" templatelabel=\""+inputtemplate.label+"\" format=\""+inputtemplate.formatclass.__name__+"\">\n"
if not errors:
output += "<parameters errors=\"no\">"
else:
output += "<parameters errors=\"yes\">"
jsonoutput['error'] = 'There were parameter errors, file not uploaded: '
for parameter in parameters:
output += parameter.xml()
if parameter.error:
jsonoutput['error'] += parameter.error + ". "
output += "</parameters>"
if not errors:
if not archive:
#============================ Transfer file ========================================
printdebug('(Start file transfer: ' + Project.path(project, user) + 'input/' + filename+' )')
if 'file' in flask.request.files:
printdebug('(Receiving data by uploading file)')
#Upload file from client to server
flask.request.files['file'].save(Project.path(project, user) + 'input/' + filename)
elif 'url' in postdata and postdata['url']:
printdebug('(Receiving data via url)')
#Download file from 3rd party server to CLAM server
try:
r = requests.get(postdata['url'])
except:
raise flask.abort(404)
if not (r.status_code >= 200 and r.status_code < 300):
raise flask.abort(404)
CHUNK = 16 * 1024
f = open(Project.path(project, user) + 'input/' + filename,'wb')
for chunk in r.iter_content(chunk_size=CHUNK):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
f.flush()
f.close()
elif 'inputsource' in postdata and postdata['inputsource']:
#Copy (symlink!) from preinstalled data
printdebug('(Creating symlink to file ' + inputsource.path + ' <- ' + Project.path(project,user) + '/input/ ' + filename + ')')
os.symlink(inputsource.path, Project.path(project, user) + 'input/' + filename)
elif 'contents' in postdata and postdata['contents']:
printdebug('(Receiving data via from contents variable)')
#grab encoding
encoding = 'utf-8'
for p in parameters:
if p.id == 'encoding':
encoding = p.value
#Contents passed in POST message itself
try:
f = io.open(Project.path(project, user) + 'input/' + filename,'w',encoding=encoding)
f.write(postdata['contents'])
f.close()
except UnicodeError:
return errorresponse("Input file " + str(filename) + " is not in the expected encoding!")
elif 'accesstoken' in postdata and 'filename' in postdata:
printdebug('(Receiving data directly from post body)')
with open(Project.path(project,user) + 'input/' + filename,'wb') as f:
while True:
chunk = flask.request.stream.read(16384)
if chunk:
f.write(chunk)
else:
break
printdebug('(File transfer completed)')
#Create a file object
file = clam.common.data.CLAMInputFile(Project.path(project, user), filename, False) #get CLAMInputFile without metadata (chicken-egg problem, this does not read the actual file contents!
#============== Generate metadata ==============
metadataerror = None
if not metadata and not errors: #check if it has not already been set in another stage
printdebug('(Generating metadata)')
#for newly generated metadata
try:
#Now we generate the actual metadata object (unsaved yet though). We pass our earlier validation results to prevent computing it again
validmeta, metadata, parameters = inputtemplate.generate(file, (errors, parameters ))
if validmeta:
#And we tie it to the CLAMFile object
file.metadata = metadata
#Add inputtemplate ID to metadata
metadata.inputtemplate = inputtemplate.id
else:
metadataerror = "Undefined error"
except ValueError as msg:
validmeta = False
metadataerror = msg
except KeyError as msg:
validmeta = False
metadataerror = msg
elif validmeta:
#for explicitly uploaded metadata
metadata.file = file
file.metadata = metadata
metadata.inputtemplate = inputtemplate.id
if metadataerror:
printdebug('(Metadata could not be generated, ' + str(metadataerror) + ', this usually indicated an error in service configuration)')
#output += "<metadataerror />" #This usually indicates an error in service configuration!
fatalerror = "<error type=\"metadataerror\">Metadata could not be generated for " + filename + ": " + str(metadataerror) + " (this usually indicates an error in service configuration!)</error>"
jsonoutput['error'] = "Metadata could not be generated! " + str(metadataerror) + " (this usually indicates an error in service configuration!)"
elif validmeta:
#=========== Convert the uploaded file (if requested) ==============
conversionerror = False
if 'converter' in postdata and postdata['converter']:
for c in inputtemplate.converters:
if c.id == postdata['converter']:
converter = c
break
if converter: #(should always be found, error already provided earlier if not)
printdebug('(Invoking converter)')
try:
success = converter.convertforinput(Project.path(project, user) + 'input/' + filename, metadata)
except: #pylint: disable=bare-except
success = False
if not success:
conversionerror = True
fatalerror = "<error type=\"conversion\">The file " + xmlescape(filename) + " could not be converted</error>"
jsonoutput['error'] = "The file could not be converted"
jsonoutput['success'] = False
#====================== Validate the file itself ====================
if not conversionerror:
valid = file.validate()
if valid:
printdebug('(Validation ok)')
output += "<valid>yes</valid>"
#Great! Everything ok, save metadata
metadata.save(Project.path(project, user) + 'input/' + file.metafilename())
#And create symbolic link for inputtemplates
linkfilename = os.path.dirname(filename)
if linkfilename: linkfilename += '/'
linkfilename += '.' + os.path.basename(filename) + '.INPUTTEMPLATE' + '.' + inputtemplate.id + '.' + str(nextseq)
os.symlink(Project.path(project, user) + 'input/' + filename, Project.path(project, user) + 'input/' + linkfilename)
else:
printdebug('(Validation error)')
#Too bad, everything worked out but the file itself doesn't validate.
#output += "<valid>no</valid>"
fatalerror = "<error type=\"validation\">The file " + xmlescape(filename) + " did not validate, it is not in the proper expected format.</error>"
jsonoutput['error'] = "The file " + filename.replace("'","") + " did not validate, it is not in the proper expected format."
jsonoutput['success'] = False
#remove upload
os.unlink(Project.path(project, user) + 'input/' + filename)
output += "</upload>\n"
output += "</clamupload>"
if returntype == 'boolean':
return jsonoutput['success']
elif fatalerror:
#fatal error return error message with 403 code
printlog('Fatal Error during upload: ' + fatalerror)
return errorresponse(head + fatalerror,403)
elif errors:
#parameter errors, return XML output with 403 code
printdebug('There were parameter errors during upload!')
if returntype == 'json':
jsonoutput['xml'] = output #embed XML in JSON for complete client-side processing
return withheaders(flask.make_response(json.dumps(jsonoutput)), 'application/json', {'allow_origin': settings.ALLOW_ORIGIN})
else:
return withheaders(flask.make_response(output,403),headers={'allow_origin': settings.ALLOW_ORIGIN})
elif returntype == 'xml': #success
printdebug('Returning xml')
return withheaders(flask.make_response(output), 'text/xml', {'allow_origin': settings.ALLOW_ORIGIN})
elif returntype == 'json': #success
printdebug('Returning json')
#everything ok, return JSON output (caller decides)
jsonoutput['xml'] = output #embed XML in JSON for complete client-side processing
return withheaders(flask.make_response(json.dumps(jsonoutput)), 'application/json', {'allow_origin': settings.ALLOW_ORIGIN})
elif returntype == 'true_on_success':
return True
else:
printdebug('Invalid return type')
raise Exception("invalid return type") | 0.01043 |
def draw_shapes_svg_layer(df_shapes, shape_i_columns, layer_name,
layer_number=1, use_svg_path=True):
'''
Draw shapes as a layer in a SVG file.
Args:
df_shapes (pandas.DataFrame): Table of shape vertices (one row per
vertex).
shape_i_columns (str or list) : Either a single column name as a string
or a list of column names in ``df_shapes``. Rows in ``df_shapes``
with the same value in the ``shape_i_columns`` column(s) are
grouped together as a shape.
layer_name (str) : Name of Inkscape layer.
layer_number (int, optional) : Z-order index of Inkscape layer.
use_svg_path (bool, optional) : If ``True``, electrodes are drawn as
``svg:path`` elements. Otherwise, electrodes are drawn as
``svg:polygon`` elements.
Returns
-------
StringIO.StringIO
A file-like object containing SVG XML source.
The XML contains a layer named according to :data:`layer_name`, which
in turn contains ``svg:polygon`` or ``svg:path`` elements corresponding
to the shapes in the input :data:`df_shapes` table.
'''
# Note that `svgwrite.Drawing` requires a filepath to be specified during
# construction, *but* nothing is actually written to the path unless one of
# the `save*` methods is called.
#
# In this function, we do *not* call any of the `save*` methods. Instead,
# we use the `write` method to write to an in-memory file-like object.
minx, miny = df_shapes[['x', 'y']].min().values
maxx, maxy = df_shapes[['x', 'y']].max().values
width = maxx - minx
height = maxy - miny
dwg = svgwrite.Drawing('should_not_exist.svg', size=(width, height),
debug=False)
nsmap = INKSCAPE_NSMAP
dwg.attribs['xmlns:inkscape'] = nsmap['inkscape']
svg_root = dwg.g(id='layer%d' % layer_number,
**{'inkscape:label': layer_name,
'inkscape:groupmode': 'layer'})
minx, miny = df_shapes[['x', 'y']].min().values
for shape_i, df_shape_i in df_shapes.groupby(shape_i_columns):
attr_columns = [c for c in df_shape_i.columns
if c not in ('vertex_i', 'x', 'y')]
attrs = df_shape_i.iloc[0][attr_columns].to_dict()
vertices = df_shape_i[['x', 'y']].values.tolist()
if not use_svg_path:
# Draw electrode shape as an `svg:polygon` element.
p = Polygon(vertices, debug=False, **attrs)
else:
# Draw electrode shape as an `svg:path` element.
commands = ['M %s,%s' % tuple(vertices[0])]
commands += ['L %s,%s' % tuple(v) for v in vertices[1:]]
while vertices[0] == vertices[-1]:
# Start is equal to end of path, but we will use the `'Z'`
# command to close the path, so delete the last point in the
# path.
del vertices[-1]
commands += ['Z']
p = Path_(d=' '.join(commands), debug=False, **attrs)
svg_root.add(p)
dwg.add(svg_root)
# Write result to `StringIO`.
output = StringIO.StringIO()
dwg.write(output)
output.seek(0)
return output | 0.000304 |
def vatm(model,
x,
logits,
eps,
num_iterations=1,
xi=1e-6,
clip_min=None,
clip_max=None,
scope=None):
"""
Tensorflow implementation of the perturbation method used for virtual
adversarial training: https://arxiv.org/abs/1507.00677
:param model: the model which returns the network unnormalized logits
:param x: the input placeholder
:param logits: the model's unnormalized output tensor (the input to
the softmax layer)
:param eps: the epsilon (input variation parameter)
:param num_iterations: the number of iterations
:param xi: the finite difference parameter
:param clip_min: optional parameter that can be used to set a minimum
value for components of the example returned
:param clip_max: optional parameter that can be used to set a maximum
value for components of the example returned
:param seed: the seed for random generator
:return: a tensor for the adversarial example
"""
with tf.name_scope(scope, "virtual_adversarial_perturbation"):
d = tf.random_normal(tf.shape(x), dtype=tf_dtype)
for _ in range(num_iterations):
d = xi * utils_tf.l2_batch_normalize(d)
logits_d = model.get_logits(x + d)
kl = utils_tf.kl_with_logits(logits, logits_d)
Hd = tf.gradients(kl, d)[0]
d = tf.stop_gradient(Hd)
d = eps * utils_tf.l2_batch_normalize(d)
adv_x = x + d
if (clip_min is not None) and (clip_max is not None):
adv_x = tf.clip_by_value(adv_x, clip_min, clip_max)
return adv_x | 0.005646 |
def polygon(surf, points, color):
"""Draw an antialiased filled polygon on a surface"""
gfxdraw.aapolygon(surf, points, color)
gfxdraw.filled_polygon(surf, points, color)
x = min([x for (x, y) in points])
y = min([y for (x, y) in points])
xm = max([x for (x, y) in points])
ym = max([y for (x, y) in points])
return pygame.Rect(x, y, xm - x, ym - y) | 0.002604 |
def validate_protocol(protocol):
'''Validate a protocol, a string, and return it.'''
if not re.match(PROTOCOL_REGEX, protocol):
raise ValueError(f'invalid protocol: {protocol}')
return protocol.lower() | 0.004525 |
def to_polygon(self):
"""
Return a 4-cornered polygon equivalent to this rectangle
"""
x,y = self.corners.T
vertices = PixCoord(x=x, y=y)
return PolygonPixelRegion(vertices=vertices, meta=self.meta,
visual=self.visual) | 0.01 |
async def start_polling(self,
timeout=20,
relax=0.1,
limit=None,
reset_webhook=None,
fast: typing.Optional[bool] = True,
error_sleep: int = 5):
"""
Start long-polling
:param timeout:
:param relax:
:param limit:
:param reset_webhook:
:param fast:
:return:
"""
if self._polling:
raise RuntimeError('Polling already started')
log.info('Start polling.')
# context.set_value(MODE, LONG_POLLING)
Dispatcher.set_current(self)
Bot.set_current(self.bot)
if reset_webhook is None:
await self.reset_webhook(check=False)
if reset_webhook:
await self.reset_webhook(check=True)
self._polling = True
offset = None
try:
current_request_timeout = self.bot.timeout
if current_request_timeout is not sentinel and timeout is not None:
request_timeout = aiohttp.ClientTimeout(total=current_request_timeout.total + timeout or 1)
else:
request_timeout = None
while self._polling:
try:
with self.bot.request_timeout(request_timeout):
updates = await self.bot.get_updates(limit=limit, offset=offset, timeout=timeout)
except:
log.exception('Cause exception while getting updates.')
await asyncio.sleep(error_sleep)
continue
if updates:
log.debug(f"Received {len(updates)} updates.")
offset = updates[-1].update_id + 1
self.loop.create_task(self._process_polling_updates(updates, fast))
if relax:
await asyncio.sleep(relax)
finally:
self._close_waiter._set_result(None)
log.warning('Polling is stopped.') | 0.005706 |
def complex_request(self, request, wait_for_first_response=True):
"""Send a compound command request to the interface over the normal data
channel.
request - A dict storing the request to send to the VI. It will be
serialized to the currently selected output format.
wait_for_first_response - If true, this function will block waiting for
a response from the VI and return it to the caller. Otherwise, it
will send the command and return immediately and any response will
be lost.
"""
receiver = self._prepare_response_receiver(request,
receiver_class=CommandResponseReceiver)
self._send_complex_request(request)
responses = []
if wait_for_first_response:
responses = receiver.wait_for_responses()
return responses | 0.004587 |
def _warning(code):
"""
Return a warning message of code 'code'.
If code = (cd, str) it returns the warning message of code 'cd' and appends
str at the end
"""
if isinstance(code, str):
return code
message = ''
if isinstance(code, tuple):
if isinstance(code[0], str):
message = code[1]
code = code[0]
return CFG_BIBRECORD_WARNING_MSGS.get(code, '') + message | 0.002294 |
def asRemoteException(ErrorType):
'''return the remote exception version of the error above
you can catch errors as usally:
>>> try:
raise asRemoteException(ValueError)
except ValueError:
pass
or you can catch the remote Exception
>>> try:
raise asRemoteException(ReferenceError)(ReferenceError(),'')
except asRemoteException(ReferenceError):
pass
'''
RemoteException = _remoteExceptionCache.get(ErrorType)
if RemoteException is None:
RemoteException = _newRemoteException(ErrorType)
_remoteExceptionCache.setdefault(ErrorType, RemoteException)
_remoteExceptionCache.setdefault(RemoteException, RemoteException)
return _remoteExceptionCache.get(ErrorType)
return RemoteException | 0.001377 |
def getExtentAddress(self, zoom, extent=None, contained=False):
"""
Return the bounding addresses ([minRow, minCol, maxRow, maxCol] based
on the instance's extent or a user defined extent. Generic method
that works with regular and irregular pyramids.
Parameters:
zoom -- the zoom for which we want the bounding addresses
extent (optional) -- the extent ([minX, minY, maxX, maxY])
defaults to the instance extent
contained (optional) -- get only tile addresses that contain
a coordinate of the extent. For instance if
the extent only intersects a tile border,
if this option is set to True, this tile
will be ignored. defaults to False
"""
if extent:
bbox = extent
else:
bbox = self.extent
minX = bbox[0]
maxX = bbox[2]
if self.originCorner == 'bottom-left':
minY = bbox[3]
maxY = bbox[1]
elif self.originCorner == 'top-left':
minY = bbox[1]
maxY = bbox[3]
[minCol, minRow] = self.tileAddress(zoom, [minX, maxY])
[maxCol, maxRow] = self.tileAddress(zoom, [maxX, minY])
if contained and minCol != maxCol or minRow != maxRow:
parentBoundsMin = self.tileBounds(zoom, minCol, minRow)
if self.originCorner == 'bottom-left':
if parentBoundsMin[2] == maxX:
maxCol -= 1
if parentBoundsMin[3] == minY:
maxRow -= 1
elif self.originCorner == 'top-left':
if parentBoundsMin[2] == maxX:
maxCol -= 1
if parentBoundsMin[1] == minY:
maxRow -= 1
return [minRow, minCol, maxRow, maxCol] | 0.001019 |
def run(self,
kpop,
nreps,
ipyclient=None,
seed=12345,
force=False,
quiet=False,
):
"""
submits a job to run on the cluster and returns an asynchronous result
object. K is the number of populations, randomseed if not set will be
randomly drawn, ipyclient if not entered will raise an error. If nreps
is set then multiple jobs will be started from new seeds, each labeled
by its replicate number. If force=True then replicates will be overwritten,
otherwise, new replicates will be created starting with the last file N
found in the workdir.
Parameters:
-----------
kpop: (int)
The MAXPOPS parameter in structure, i.e., the number of populations
assumed by the model (K).
nreps: (int):
Number of independent runs starting from distinct seeds.
ipyclient: (ipyparallel.Client Object)
An ipyparallel client connected to an ipcluster instance. This is
used to manage parallel jobs. If not present a single job will
run and block until finished (i.e., code is not parallel).
seed: (int):
Random number seed used for subsampling unlinked SNPs if a mapfile
is linked to the Structure Object.
force: (bool):
If force is true then old replicates are removed and new reps start
from rep-0. Otherwise, new reps start at end of existing rep numbers.
quiet: (bool)
Whether to print number of jobs submitted to stderr
Example:
---------
import ipyparallel as ipp
import ipyrad.analysis as ipa
## get parallel client
ipyclient = ipp.Client()
## get structure object
s = ipa.structure(
name="test",
data="mydata.str",
mapfile="mydata.snps.map",
workdir="structure-results",
)
## modify some basic params
s.mainparams.numreps = 100000
s.mainparams.burnin = 10000
## submit many jobs
for kpop in [3, 4, 5]:
s.run(
kpop=kpop,
nreps=10,
ipyclient=ipyclient,
)
## block until all jobs finish
ipyclient.wait()
"""
## initiate starting seed
np.random.seed(seed)
## check for stuructre here
proc = subprocess.Popen(["which", "structure"],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT).communicate()
if not proc:
raise Exception(\
"structure is not installed: run `conda install structure -c ipyrad`")
## start load balancer
if ipyclient:
lbview = ipyclient.load_balanced_view()
## remove old jobs with this same name
handle = OPJ(self.workdir, self.name+"-K-{}-*".format(kpop))
oldjobs = glob.glob(handle)
if force or (not oldjobs):
for job in oldjobs:
os.remove(job)
repstart = 0
repend = nreps
else:
repstart = max([int(i.split("-")[-1][:-2]) for i in oldjobs])
repend = repstart + nreps
## check that there is a ipcluster instance running
for rep in xrange(repstart, repend):
## sample random seed for this rep
self.extraparams.seed = np.random.randint(0, 1e9, 1)[0]
## prepare files (randomly subsamples snps if mapfile)
mname, ename, sname = self.write_structure_files(kpop, rep)
args = [
mname, ename, sname,
self.name,
self.workdir,
self.extraparams.seed,
self.ntaxa,
self.nsites,
kpop,
rep]
if ipyclient:
## call structure
async = lbview.apply(_call_structure, *(args))
self.asyncs.append(async)
else:
if not quiet:
sys.stderr.write("submitted 1 structure job [{}-K-{}]\n"\
.format(self.name, kpop))
comm = _call_structure(*args)
return comm
if ipyclient:
if not quiet:
sys.stderr.write("submitted {} structure jobs [{}-K-{}]\n"\
.format(nreps, self.name, kpop)) | 0.009385 |
def target_types_by_alias(self):
"""Returns a mapping from target alias to the target types produced for that alias.
Normally there is 1 target type per alias, but macros can expand a single alias to several
target types.
:API: public
:rtype: dict
"""
target_types_by_alias = defaultdict(set)
for alias, target_type in self.target_types.items():
target_types_by_alias[alias].add(target_type)
for alias, target_macro_factory in self.target_macro_factories.items():
target_types_by_alias[alias].update(target_macro_factory.target_types)
return dict(target_types_by_alias) | 0.008013 |
def grant(args):
"""
Given an IAM role or instance name, attach an IAM policy granting
appropriate permissions to subscribe to deployments. Given a
GitHub repo URL, create and record deployment keys for the repo
and any of its private submodules, making the keys accessible to
the IAM role.
"""
try:
role = resources.iam.Role(args.iam_role_or_instance)
role.load()
except ClientError:
role = get_iam_role_for_instance(args.iam_role_or_instance)
role.attach_policy(PolicyArn=ensure_deploy_iam_policy().arn)
for private_repo in [args.repo] + list(private_submodules(args.repo)):
gh_owner_name, gh_repo_name = parse_repo_name(private_repo)
secret = secrets.put(argparse.Namespace(secret_name="deploy.{}.{}".format(gh_owner_name, gh_repo_name),
iam_role=role.name,
instance_profile=None,
iam_group=None,
iam_user=None,
generate_ssh_key=True))
get_repo(private_repo).create_key(__name__ + "." + role.name, secret["ssh_public_key"])
logger.info("Created deploy key %s for IAM role %s to access GitHub repo %s",
secret["ssh_key_fingerprint"], role.name, private_repo) | 0.002817 |
def delete_topics(self, topics, **kwargs):
"""
Delete topics.
The future result() value is None.
:param list(str) topics: Topics to mark for deletion.
:param float operation_timeout: Set broker's operation timeout in seconds,
controlling how long the DeleteTopics request will block
on the broker waiting for the topic deletion to propagate
in the cluster. A value of 0 returns immediately. Default: 0
:param float request_timeout: Set the overall request timeout in seconds,
including broker lookup, request transmission, operation time
on broker, and response. Default: `socket.timeout.ms*1000.0`
:returns: a dict of futures for each topic, keyed by the topic name.
:rtype: dict(<topic_name, future>)
:raises KafkaException: Operation failed locally or on broker.
:raises TypeException: Invalid input.
:raises ValueException: Invalid input.
"""
f, futmap = AdminClient._make_futures(topics, None,
AdminClient._make_topics_result)
super(AdminClient, self).delete_topics(topics, f, **kwargs)
return futmap | 0.003155 |
def driver(self, sim):
"""Push data to interface"""
r = sim.read
if self.actualData is NOP and self.data:
self.actualData = self.data.popleft()
do = self.actualData is not NOP
if do:
self.doWrite(sim, self.actualData)
else:
self.doWrite(sim, None)
en = self.notReset(sim) and self._enabled
if not (en and do):
return
yield sim.waitOnCombUpdate()
rd = self.isRd(r)
if en:
assert rd.vldMask, (
("%r: ready signal for interface %r is in invalid state,"
" this would cause desynchronization") %
(sim.now, self.intf))
if rd.val:
if self._debugOutput is not None:
self._debugOutput.write("%s, wrote, %d: %r\n" % (
self.intf._getFullName(),
sim.now, self.actualData))
if self.data:
self.actualData = self.data.popleft()
else:
self.actualData = NOP | 0.001786 |
def run_simulations(self, parameter_list, data_folder):
"""
This function runs multiple simulations in parallel.
"""
# Open up a session
s = drmaa.Session()
s.initialize()
# Create a job template for each parameter combination
jobs = {}
for parameter in parameter_list:
# Initialize result
current_result = {
'params': {},
'meta': {}
}
current_result['params'].update(parameter)
command = " ".join([self.script_executable] + ['--%s=%s' % (param,
value)
for param, value in
parameter.items()])
# Run from dedicated temporary folder
current_result['meta']['id'] = str(uuid.uuid4())
temp_dir = os.path.join(data_folder, current_result['meta']['id'])
if not os.path.exists(temp_dir):
os.makedirs(temp_dir)
jt = s.createJobTemplate()
jt.remoteCommand = os.path.dirname(
os.path.abspath(__file__)) + '/run_program.sh'
jt.args = [command]
jt.jobEnvironment = self.environment
jt.workingDirectory = temp_dir
jt.nativeSpecification = SIMULATION_GRID_PARAMS
output_filename = os.path.join(temp_dir, 'stdout')
error_filename = os.path.join(temp_dir, 'stderr')
jt.outputPath = ':' + output_filename
jt.errorPath = ':' + error_filename
jobid = s.runJob(jt)
# Save the template in our dictionary
jobs[jobid] = {
'template': jt,
'result': current_result,
'output': output_filename,
'error': error_filename,
}
# Check for job completion, yield results when they are ready
try:
while len(jobs):
found_done = False
for curjob in jobs.keys():
try:
status = s.jobStatus(curjob)
except drmaa.errors.DrmCommunicationException:
pass
if status is drmaa.JobState.DONE:
current_result = jobs[curjob]['result']
# TODO Actually compute time elapsed in the running
# state
current_result['meta']['elapsed_time'] = 0
try:
s.deleteJobTemplate(jobs[curjob]['template'])
except drmaa.errors.DrmCommunicationException:
pass
del jobs[curjob]
found_done = True
yield current_result
break
if not found_done: # Sleep if we can't find a completed task
time.sleep(6)
finally:
try:
for v in jobs.values():
s.deleteJobTemplate(v['template'])
s.control(drmaa.JOB_IDS_SESSION_ALL,
drmaa.JobControlAction.TERMINATE)
s.synchronize([drmaa.JOB_IDS_SESSION_ALL], dispose=True)
s.exit()
except(drmaa.errors.NoActiveSessionException):
pass | 0.000566 |
def differential(poly, diffvar):
"""
Polynomial differential operator.
Args:
poly (Poly) : Polynomial to be differentiated.
diffvar (Poly) : Polynomial to differentiate by. Must be decomposed. If
polynomial array, the output is the Jacobian matrix.
Examples:
>>> q0, q1 = chaospy.variable(2)
>>> poly = chaospy.Poly([1, q0, q0*q1**2+1])
>>> print(poly)
[1, q0, q0q1^2+1]
>>> print(differential(poly, q0))
[0, 1, q1^2]
>>> print(differential(poly, q1))
[0, 0, 2q0q1]
"""
poly = Poly(poly)
diffvar = Poly(diffvar)
if not chaospy.poly.caller.is_decomposed(diffvar):
sum(differential(poly, chaospy.poly.caller.decompose(diffvar)))
if diffvar.shape:
return Poly([differential(poly, pol) for pol in diffvar])
if diffvar.dim > poly.dim:
poly = chaospy.poly.dimension.setdim(poly, diffvar.dim)
else:
diffvar = chaospy.poly.dimension.setdim(diffvar, poly.dim)
qkey = diffvar.keys[0]
core = {}
for key in poly.keys:
newkey = np.array(key) - np.array(qkey)
if np.any(newkey < 0):
continue
newkey = tuple(newkey)
core[newkey] = poly.A[key] * np.prod(
[fac(key[idx], exact=True) / fac(newkey[idx], exact=True)
for idx in range(poly.dim)])
return Poly(core, poly.dim, poly.shape, poly.dtype) | 0.000693 |
def run(self):
"""The thread's main activity. Call start() instead."""
self.socket = self.context.socket(zmq.DEALER)
self.socket.setsockopt(zmq.IDENTITY, self.session.bsession)
self.socket.connect('tcp://%s:%i' % self.address)
self.stream = zmqstream.ZMQStream(self.socket, self.ioloop)
self.stream.on_recv(self._handle_recv)
self._run_loop()
try:
self.socket.close()
except:
pass | 0.006289 |
def run_application(
application, patch_stdout=False, return_asyncio_coroutine=False,
true_color=False, refresh_interval=0, eventloop=None):
"""
Run a prompt toolkit application.
:param patch_stdout: Replace ``sys.stdout`` by a proxy that ensures that
print statements from other threads won't destroy the prompt. (They
will be printed above the prompt instead.)
:param return_asyncio_coroutine: When True, return a asyncio coroutine. (Python >3.3)
:param true_color: When True, use 24bit colors instead of 256 colors.
:param refresh_interval: (number; in seconds) When given, refresh the UI
every so many seconds.
"""
assert isinstance(application, Application)
if return_asyncio_coroutine:
eventloop = create_asyncio_eventloop()
else:
eventloop = eventloop or create_eventloop()
# Create CommandLineInterface.
cli = CommandLineInterface(
application=application,
eventloop=eventloop,
output=create_output(true_color=true_color))
# Set up refresh interval.
if refresh_interval:
done = [False]
def start_refresh_loop(cli):
def run():
while not done[0]:
time.sleep(refresh_interval)
cli.request_redraw()
t = threading.Thread(target=run)
t.daemon = True
t.start()
def stop_refresh_loop(cli):
done[0] = True
cli.on_start += start_refresh_loop
cli.on_stop += stop_refresh_loop
# Replace stdout.
patch_context = cli.patch_stdout_context(raw=True) if patch_stdout else DummyContext()
# Read input and return it.
if return_asyncio_coroutine:
# Create an asyncio coroutine and call it.
exec_context = {'patch_context': patch_context, 'cli': cli,
'Document': Document}
exec_(textwrap.dedent('''
def prompt_coro():
# Inline import, because it slows down startup when asyncio is not
# needed.
import asyncio
@asyncio.coroutine
def run():
with patch_context:
result = yield from cli.run_async()
if isinstance(result, Document): # Backwards-compatibility.
return result.text
return result
return run()
'''), exec_context)
return exec_context['prompt_coro']()
else:
try:
with patch_context:
result = cli.run()
if isinstance(result, Document): # Backwards-compatibility.
return result.text
return result
finally:
eventloop.close() | 0.00144 |
def _compute_handshake(self):
"""Compute the authentication handshake value.
:return: the computed hash value.
:returntype: `str`"""
return hashlib.sha1(to_utf8(self.stream_id)+to_utf8(self.secret)).hexdigest() | 0.012346 |
def _desc_has_data(desc):
"""Returns true if there is any data set for a particular PhoneNumberDesc."""
if desc is None:
return False
# Checking most properties since we don't know what's present, since a custom build may have
# stripped just one of them (e.g. liteBuild strips exampleNumber). We don't bother checking the
# possibleLengthsLocalOnly, since if this is the only thing that's present we don't really
# support the type at all: no type-specific methods will work with only this data.
return ((desc.example_number is not None) or
_desc_has_possible_number_data(desc) or
(desc.national_number_pattern is not None)) | 0.008759 |
def update(self, other, join='left', overwrite=True, filter_func=None,
errors='ignore'):
"""
Modify in place using non-NA values from another DataFrame.
Aligns on indices. There is no return value.
Parameters
----------
other : DataFrame, or object coercible into a DataFrame
Should have at least one matching index/column label
with the original DataFrame. If a Series is passed,
its name attribute must be set, and that will be
used as the column name to align with the original DataFrame.
join : {'left'}, default 'left'
Only left join is implemented, keeping the index and columns of the
original object.
overwrite : bool, default True
How to handle non-NA values for overlapping keys:
* True: overwrite original DataFrame's values
with values from `other`.
* False: only update values that are NA in
the original DataFrame.
filter_func : callable(1d-array) -> bool 1d-array, optional
Can choose to replace values other than NA. Return True for values
that should be updated.
errors : {'raise', 'ignore'}, default 'ignore'
If 'raise', will raise a ValueError if the DataFrame and `other`
both contain non-NA data in the same place.
.. versionchanged :: 0.24.0
Changed from `raise_conflict=False|True`
to `errors='ignore'|'raise'`.
Returns
-------
None : method directly changes calling object
Raises
------
ValueError
* When `errors='raise'` and there's overlapping non-NA data.
* When `errors` is not either `'ignore'` or `'raise'`
NotImplementedError
* If `join != 'left'`
See Also
--------
dict.update : Similar method for dictionaries.
DataFrame.merge : For column(s)-on-columns(s) operations.
Examples
--------
>>> df = pd.DataFrame({'A': [1, 2, 3],
... 'B': [400, 500, 600]})
>>> new_df = pd.DataFrame({'B': [4, 5, 6],
... 'C': [7, 8, 9]})
>>> df.update(new_df)
>>> df
A B
0 1 4
1 2 5
2 3 6
The DataFrame's length does not increase as a result of the update,
only values at matching index/column labels are updated.
>>> df = pd.DataFrame({'A': ['a', 'b', 'c'],
... 'B': ['x', 'y', 'z']})
>>> new_df = pd.DataFrame({'B': ['d', 'e', 'f', 'g', 'h', 'i']})
>>> df.update(new_df)
>>> df
A B
0 a d
1 b e
2 c f
For Series, it's name attribute must be set.
>>> df = pd.DataFrame({'A': ['a', 'b', 'c'],
... 'B': ['x', 'y', 'z']})
>>> new_column = pd.Series(['d', 'e'], name='B', index=[0, 2])
>>> df.update(new_column)
>>> df
A B
0 a d
1 b y
2 c e
>>> df = pd.DataFrame({'A': ['a', 'b', 'c'],
... 'B': ['x', 'y', 'z']})
>>> new_df = pd.DataFrame({'B': ['d', 'e']}, index=[1, 2])
>>> df.update(new_df)
>>> df
A B
0 a x
1 b d
2 c e
If `other` contains NaNs the corresponding values are not updated
in the original dataframe.
>>> df = pd.DataFrame({'A': [1, 2, 3],
... 'B': [400, 500, 600]})
>>> new_df = pd.DataFrame({'B': [4, np.nan, 6]})
>>> df.update(new_df)
>>> df
A B
0 1 4.0
1 2 500.0
2 3 6.0
"""
import pandas.core.computation.expressions as expressions
# TODO: Support other joins
if join != 'left': # pragma: no cover
raise NotImplementedError("Only left join is supported")
if errors not in ['ignore', 'raise']:
raise ValueError("The parameter errors must be either "
"'ignore' or 'raise'")
if not isinstance(other, DataFrame):
other = DataFrame(other)
other = other.reindex_like(self)
for col in self.columns:
this = self[col]._values
that = other[col]._values
if filter_func is not None:
with np.errstate(all='ignore'):
mask = ~filter_func(this) | isna(that)
else:
if errors == 'raise':
mask_this = notna(that)
mask_that = notna(this)
if any(mask_this & mask_that):
raise ValueError("Data overlaps.")
if overwrite:
mask = isna(that)
else:
mask = notna(this)
# don't overwrite columns unecessarily
if mask.all():
continue
self[col] = expressions.where(mask, this, that) | 0.000579 |
def add_router_interface(self, context, router_id, interface_info):
"""Add a subnet of a network to an existing router."""
new_router = super(AristaL3ServicePlugin, self).add_router_interface(
context, router_id, interface_info)
core = directory.get_plugin()
# Get network info for the subnet that is being added to the router.
# Check if the interface information is by port-id or subnet-id
add_by_port, add_by_sub = self._validate_interface_info(interface_info)
if add_by_sub:
subnet = core.get_subnet(context, interface_info['subnet_id'])
elif add_by_port:
port = core.get_port(context, interface_info['port_id'])
subnet_id = port['fixed_ips'][0]['subnet_id']
subnet = core.get_subnet(context, subnet_id)
network_id = subnet['network_id']
# To create SVI's in Arista HW, the segmentation Id is required
# for this network.
ml2_db = NetworkContext(self, context, {'id': network_id})
seg_id = ml2_db.network_segments[0]['segmentation_id']
# Package all the info needed for Hw programming
router = self.get_router(context, router_id)
router_info = copy.deepcopy(new_router)
router_info['seg_id'] = seg_id
router_info['name'] = router['name']
router_info['cidr'] = subnet['cidr']
router_info['gip'] = subnet['gateway_ip']
router_info['ip_version'] = subnet['ip_version']
try:
self.driver.add_router_interface(context, router_info)
return new_router
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Error Adding subnet %(subnet)s to "
"router %(router_id)s on Arista HW"),
{'subnet': subnet, 'router_id': router_id})
super(AristaL3ServicePlugin, self).remove_router_interface(
context,
router_id,
interface_info) | 0.000965 |
def partition_read(
self,
session,
table,
key_set,
transaction=None,
index=None,
columns=None,
partition_options=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Creates a set of partition tokens that can be used to execute a read
operation in parallel. Each of the returned partition tokens can be used
by ``StreamingRead`` to specify a subset of the read result to read. The
same session and read-only transaction must be used by the
PartitionReadRequest used to create the partition tokens and the
ReadRequests that use the partition tokens. There are no ordering
guarantees on rows returned among the returned partition tokens, or even
within each individual StreamingRead call issued with a
partition\_token.
Partition tokens become invalid when the session used to create them is
deleted, is idle for too long, begins a new transaction, or becomes too
old. When any of these happen, it is not possible to resume the read,
and the whole operation must be restarted from the beginning.
Example:
>>> from google.cloud import spanner_v1
>>>
>>> client = spanner_v1.SpannerClient()
>>>
>>> session = client.session_path('[PROJECT]', '[INSTANCE]', '[DATABASE]', '[SESSION]')
>>>
>>> # TODO: Initialize `table`:
>>> table = ''
>>>
>>> # TODO: Initialize `key_set`:
>>> key_set = {}
>>>
>>> response = client.partition_read(session, table, key_set)
Args:
session (str): Required. The session used to create the partitions.
table (str): Required. The name of the table in the database to be read.
key_set (Union[dict, ~google.cloud.spanner_v1.types.KeySet]): Required. ``key_set`` identifies the rows to be yielded. ``key_set``
names the primary keys of the rows in ``table`` to be yielded, unless
``index`` is present. If ``index`` is present, then ``key_set`` instead
names index keys in ``index``.
It is not an error for the ``key_set`` to name rows that do not exist in
the database. Read yields nothing for nonexistent rows.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.spanner_v1.types.KeySet`
transaction (Union[dict, ~google.cloud.spanner_v1.types.TransactionSelector]): Read only snapshot transactions are supported, read/write and single use
transactions are not.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.spanner_v1.types.TransactionSelector`
index (str): If non-empty, the name of an index on ``table``. This index is used
instead of the table primary key when interpreting ``key_set`` and
sorting result rows. See ``key_set`` for further information.
columns (list[str]): The columns of ``table`` to be returned for each row matching this
request.
partition_options (Union[dict, ~google.cloud.spanner_v1.types.PartitionOptions]): Additional options that affect how many partitions are created.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.spanner_v1.types.PartitionOptions`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.spanner_v1.types.PartitionResponse` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "partition_read" not in self._inner_api_calls:
self._inner_api_calls[
"partition_read"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.partition_read,
default_retry=self._method_configs["PartitionRead"].retry,
default_timeout=self._method_configs["PartitionRead"].timeout,
client_info=self._client_info,
)
request = spanner_pb2.PartitionReadRequest(
session=session,
table=table,
key_set=key_set,
transaction=transaction,
index=index,
columns=columns,
partition_options=partition_options,
)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [("session", session)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
return self._inner_api_calls["partition_read"](
request, retry=retry, timeout=timeout, metadata=metadata
) | 0.004136 |
def api_backoff(func):
"""
Decorator, Handles HTTP 429 "Too Many Requests" messages from the Discord API
If blocking=True is specified, this function will block and retry
the function up to max_retries=n times, or 3 if retries is not specified.
If the API call still recieves a backoff timer this function will raise
a <DiscordApiTooBusy> exception.
If the caller chooses blocking=False, the decorator will raise a DiscordApiBackoff
exception and the caller can choose to retry after the given timespan available in
the retry_after property in seconds.
"""
class PerformBackoff(Exception):
def __init__(self, retry_after, retry_datetime, global_ratelimit):
super(Exception, self).__init__()
self.retry_after = int(retry_after)
self.retry_datetime = retry_datetime
self.global_ratelimit = global_ratelimit
@wraps(func)
def decorated(*args, **kwargs):
blocking = kwargs.get('blocking', False)
retries = kwargs.get('max_retries', 3)
# Strip our parameters
if 'max_retries' in kwargs:
del kwargs['max_retries']
if 'blocking' in kwargs:
del kwargs['blocking']
cache_key = 'DISCORD_BACKOFF_' + func.__name__
cache_global_key = 'DISCORD_BACKOFF_GLOBAL'
while retries > 0:
try:
try:
# Check global backoff first, then route backoff
existing_global_backoff = cache.get(cache_global_key)
existing_backoff = existing_global_backoff or cache.get(cache_key)
if existing_backoff:
backoff_timer = datetime.datetime.strptime(existing_backoff, cache_time_format)
if backoff_timer > datetime.datetime.utcnow():
backoff_seconds = (backoff_timer - datetime.datetime.utcnow()).total_seconds()
logger.debug("Still under backoff for %s seconds, backing off" % backoff_seconds)
# Still under backoff
raise PerformBackoff(
retry_after=backoff_seconds,
retry_datetime=backoff_timer,
global_ratelimit=bool(existing_global_backoff)
)
logger.debug("Calling API calling function")
return func(*args, **kwargs)
except requests.HTTPError as e:
if e.response.status_code == 429:
try:
retry_after = int(e.response.headers['Retry-After'])
except (TypeError, KeyError):
# Pick some random time
retry_after = 5000
logger.info("Received backoff from API of %s seconds, handling" % retry_after)
# Store value in redis
backoff_until = (datetime.datetime.utcnow() +
datetime.timedelta(milliseconds=retry_after))
global_backoff = bool(e.response.headers.get('X-RateLimit-Global', False))
if global_backoff:
logger.info("Global backoff!!")
cache.set(cache_global_key, backoff_until.strftime(cache_time_format), retry_after)
else:
cache.set(cache_key, backoff_until.strftime(cache_time_format), retry_after)
raise PerformBackoff(retry_after=retry_after, retry_datetime=backoff_until,
global_ratelimit=global_backoff)
else:
# Not 429, re-raise
raise e
except PerformBackoff as bo:
# Sleep if we're blocking
if blocking:
logger.info("Blocking Back off from API calls for %s seconds" % bo.retry_after)
time.sleep((10 if bo.retry_after > 10 else bo.retry_after) / 1000)
else:
# Otherwise raise exception and let caller handle the backoff
raise DiscordApiBackoff(retry_after=bo.retry_after, global_ratelimit=bo.global_ratelimit)
finally:
retries -= 1
if retries == 0:
raise DiscordApiTooBusy()
return decorated | 0.004166 |
def set_level(self, level):
"""
Set the log level of the g8os
Note: this level is for messages that ends up on screen or on log file
:param level: the level to be set can be one of ("CRITICAL", "ERROR", "WARNING", "NOTICE", "INFO", "DEBUG")
"""
args = {
'level': level,
}
self._level_chk.check(args)
return self._client.json('logger.set_level', args) | 0.006881 |
def read_rows(
self,
table_name,
app_profile_id=None,
rows=None,
filter_=None,
rows_limit=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Streams back the contents of all requested rows in key order, optionally
applying the same Reader filter to each. Depending on their size,
rows and cells may be broken up across multiple responses, but
atomicity of each row will still be preserved. See the
ReadRowsResponse documentation for details.
Example:
>>> from google.cloud import bigtable_v2
>>>
>>> client = bigtable_v2.BigtableClient()
>>>
>>> table_name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]')
>>>
>>> for element in client.read_rows(table_name):
... # process element
... pass
Args:
table_name (str): The unique name of the table from which to read. Values are of the form
``projects/<project>/instances/<instance>/tables/<table>``.
app_profile_id (str): This value specifies routing for replication. If not specified, the
"default" application profile will be used.
rows (Union[dict, ~google.cloud.bigtable_v2.types.RowSet]): The row keys and/or ranges to read. If not specified, reads from all rows.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.bigtable_v2.types.RowSet`
filter_ (Union[dict, ~google.cloud.bigtable_v2.types.RowFilter]): The filter to apply to the contents of the specified row(s). If unset,
reads the entirety of each row.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.bigtable_v2.types.RowFilter`
rows_limit (long): The read will terminate after committing to N rows' worth of results. The
default (zero) is to return all results.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
Iterable[~google.cloud.bigtable_v2.types.ReadRowsResponse].
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "read_rows" not in self._inner_api_calls:
self._inner_api_calls[
"read_rows"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.read_rows,
default_retry=self._method_configs["ReadRows"].retry,
default_timeout=self._method_configs["ReadRows"].timeout,
client_info=self._client_info,
)
request = bigtable_pb2.ReadRowsRequest(
table_name=table_name,
app_profile_id=app_profile_id,
rows=rows,
filter=filter_,
rows_limit=rows_limit,
)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [("table_name", table_name)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
return self._inner_api_calls["read_rows"](
request, retry=retry, timeout=timeout, metadata=metadata
) | 0.002941 |
def get_config(self, key="Entrypoint", delim=None):
'''get_config returns a particular key (default is Entrypoint)
from a VERSION 1 manifest obtained with get_manifest.
Parameters
==========
key: the key to return from the manifest config
delim: Given a list, the delim to use to join the entries.
Default is newline
'''
if not hasattr(self,'manifests'):
bot.error('Please retrieve manifests for an image first.')
sys.exit(1)
cmd = None
# If we didn't find the config value in version 2
for version in ['config', 'v1']:
if cmd is None and 'config' in self.manifests:
# First try, version 2.0 manifest config has upper level config
manifest = self.manifests['config']
if "config" in manifest:
if key in manifest['config']:
cmd = manifest['config'][key]
# Second try, config manifest (not from verison 2.0 schema blob)
if cmd is None and "history" in manifest:
for entry in manifest['history']:
if 'v1Compatibility' in entry:
entry = json.loads(entry['v1Compatibility'])
if "config" in entry:
if key in entry["config"]:
cmd = entry["config"][key]
# Standard is to include commands like ['/bin/sh']
if isinstance(cmd, list):
if delim is not None:
cmd = delim.join(cmd)
bot.verbose("Found Docker config (%s) %s" % (key, cmd))
return cmd | 0.001847 |
def to_dict(self, ignore_none: bool=True, force_value: bool=True, ignore_empty: bool=False) -> dict:
"""From instance to dict
:param ignore_none: Properties which is None are excluded if True
:param force_value: Transform to value using to_value (default: str()) of ValueTransformer which inherited if True
:param ignore_empty: Properties which is empty are excluded if True
:return: Dict
Usage:
>>> from owlmixin.samples import Human, Food
>>> human_dict = {
... "id": 1,
... "name": "Tom",
... "favorites": [
... {"name": "Apple", "names_by_lang": {"en": "Apple"}}
... ]
... }
>>> Human.from_dict(human_dict).to_dict() == human_dict
True
You can include None properties by specifying False for ignore_none
>>> f = Food.from_dict({"name": "Apple"}).to_dict(ignore_none=False)
>>> f["name"]
'Apple'
>>> "names_by_lang" in f
True
>>> f["names_by_lang"]
As default
>>> f = Food.from_dict({"name": "Apple"}).to_dict()
>>> f["name"]
'Apple'
>>> "names_by_lang" in f
False
You can exclude Empty properties by specifying True for ignore_empty
>>> f = Human.from_dict({"id": 1, "name": "Ichiro", "favorites": []}).to_dict()
>>> f["favorites"]
[]
>>> f = Human.from_dict({"id": 1, "name": "Ichiro", "favorites": []}).to_dict(ignore_empty=True)
>>> "favorites" in f
False
"""
return traverse_dict(self._dict, ignore_none, force_value, ignore_empty) | 0.007848 |
def parse_with_retrieved(self, retrieved):
"""
Parse output data folder, store results in database.
:param retrieved: a dictionary of retrieved nodes, where
the key is the link name
:returns: a tuple with two values ``(bool, node_list)``,
where:
* ``bool``: variable to tell if the parsing succeeded
* ``node_list``: list of new nodes to be stored in the db
(as a list of tuples ``(link_name, node)``)
"""
success = False
node_list = []
# Check that the retrieved folder is there
try:
out_folder = retrieved['retrieved']
except KeyError:
self.logger.error("No retrieved folder found")
return success, node_list
# Check the folder content is as expected
list_of_files = out_folder.get_folder_list()
output_files = [self._calc._OUTPUT_FILE_NAME]
# Note: set(A) <= set(B) checks whether A is a subset
if set(output_files) <= set(list_of_files):
pass
else:
self.logger.error(
"Not all expected output files {} were found".format(
output_files))
return success, node_list
# Do not return distance matrix, as it is too large
#from aiida.orm.data.singlefile import SinglefileData
#node = SinglefileData(
# file=out_folder.get_abs_path(self._calc._OUTPUT_FILE_NAME))
#node_list.append(('distance_matrix', node))
success = True
return success, node_list | 0.003755 |
def containerIsRunning(container_name):
"""
Checks whether the container is running or not.
:param container_name: Name of the container being checked.
:returns: True if status is 'running', False if status is anything else,
and None if the container does not exist.
"""
client = docker.from_env(version='auto')
try:
this_container = client.containers.get(container_name)
if this_container.status == 'running':
return True
else:
# this_container.status == 'exited', 'restarting', or 'paused'
return False
except NotFound:
return None
except requests.exceptions.HTTPError as e:
logger.debug("Server error attempting to call container: ",
container_name)
raise create_api_error_from_http_exception(e) | 0.00237 |
def bvlpdu_contents(self, use_dict=None, as_class=dict):
"""Return the contents of an object as a dict."""
foreign_device_table = []
for fdte in self.bvlciFDT:
foreign_device_table.append(fdte.dict_contents(as_class=as_class))
return key_value_contents(use_dict=use_dict, as_class=as_class,
key_values=(
('function', 'ReadForeignDeviceTableAck'),
('foreign_device_table', foreign_device_table),
)) | 0.006024 |
def clear(self):
"""Helper for clearing all the keys in a database. Use with
caution!"""
for key in self.conn.keys():
self.conn.delete(key) | 0.011429 |
def launch_app(self, timeout=10):
"""
Launch Spotify application.
Will raise a LaunchError exception if there is no response from the
Spotify app within timeout seconds.
"""
def callback():
"""Callback function"""
self.send_message({"type": TYPE_STATUS,
"credentials": self.access_token,
"expiresIn": self.expires})
self.launch(callback_function=callback)
# Need to wait for Spotify to be launched on Chromecast completely
while not self.is_launched and timeout:
time.sleep(1)
timeout -= 1
if not self.is_launched:
raise LaunchError(
"Timeout when waiting for status response from Spotify app") | 0.002448 |
def transform_subject(self, X):
"""Transform a new subject using the existing model
Parameters
----------
X : 2D array, shape=[voxels, timepoints]
The fMRI data of the new subject.
Returns
-------
w : 2D array, shape=[voxels, features]
Orthogonal mapping `W_{new}` for new subject
s : 2D array, shape=[voxels, timepoints]
Individual term `S_{new}` for new subject
"""
# Check if the model exist
if hasattr(self, 'w_') is False:
raise NotFittedError("The model fit has not been run yet.")
# Check the number of TRs in the subject
if X.shape[1] != self.r_.shape[1]:
raise ValueError("The number of timepoints(TRs) does not match the"
"one in the model.")
s = np.zeros_like(X)
for i in range(self.n_iter):
w = self._update_transform_subject(X, s, self.r_)
s = self._shrink(X - w.dot(self.r_), self.lam)
return w, s | 0.001887 |
def is_dimension(self):
"""Return true if the colum is a dimension"""
from ambry.valuetype.core import ROLE
return self.role == ROLE.DIMENSION | 0.012048 |
def make_input_from_dict(sentence_id: SentenceId,
input_dict: Dict,
translator: 'Translator') -> TranslatorInput:
"""
Returns a TranslatorInput object from a JSON object, serialized as a string.
:param sentence_id: Sentence id.
:param input_dict: A dict that must contain a key "text", mapping to the input text, and optionally a key "factors"
that maps to a list of strings, each of which representing a factor sequence for the input text.
Constraints and an avoid list can also be added through the "constraints" and "avoid" keys.
:param translator: A translator object.
:return: A TranslatorInput.
"""
try:
tokens = input_dict[C.JSON_TEXT_KEY]
tokens = list(data_io.get_tokens(tokens))
factors = input_dict.get(C.JSON_FACTORS_KEY)
if isinstance(factors, list):
factors = [list(data_io.get_tokens(factor)) for factor in factors]
lengths = [len(f) for f in factors]
if not all(length == len(tokens) for length in lengths):
logger.error("Factors have different length than input text: %d vs. %s", len(tokens), str(lengths))
return _bad_input(sentence_id, reason=str(input_dict))
# Lexicon for vocabulary selection/restriction:
# This is only populated when using multiple lexicons, in which case the
# restrict_lexicon key must exist and the value (name) must map to one
# of the translator's known lexicons.
restrict_lexicon = None
restrict_lexicon_name = input_dict.get(C.JSON_RESTRICT_LEXICON_KEY)
if isinstance(translator.restrict_lexicon, dict):
if restrict_lexicon_name is None:
logger.error("Must specify restrict_lexicon when using multiple lexicons. Choices: %s"
% ' '.join(sorted(translator.restrict_lexicon)))
return _bad_input(sentence_id, reason=str(input_dict))
restrict_lexicon = translator.restrict_lexicon.get(restrict_lexicon_name, None)
if restrict_lexicon is None:
logger.error("Unknown restrict_lexicon '%s'. Choices: %s"
% (restrict_lexicon_name, ' '.join(sorted(translator.restrict_lexicon))))
return _bad_input(sentence_id, reason=str(input_dict))
# List of phrases to prevent from occuring in the output
avoid_list = input_dict.get(C.JSON_AVOID_KEY)
# List of phrases that must appear in the output
constraints = input_dict.get(C.JSON_CONSTRAINTS_KEY)
# If there is overlap between positive and negative constraints, assume the user wanted
# the words, and so remove them from the avoid_list (negative constraints)
if constraints is not None and avoid_list is not None:
avoid_set = set(avoid_list)
overlap = set(constraints).intersection(avoid_set)
if len(overlap) > 0:
logger.warning("Overlap between constraints and avoid set, dropping the overlapping avoids")
avoid_list = list(avoid_set.difference(overlap))
# Convert to a list of tokens
if isinstance(avoid_list, list):
avoid_list = [list(data_io.get_tokens(phrase)) for phrase in avoid_list]
if isinstance(constraints, list):
constraints = [list(data_io.get_tokens(constraint)) for constraint in constraints]
return TranslatorInput(sentence_id=sentence_id, tokens=tokens, factors=factors,
restrict_lexicon=restrict_lexicon, constraints=constraints,
avoid_list=avoid_list, pass_through_dict=input_dict)
except Exception as e:
logger.exception(e, exc_info=True) if not is_python34() else logger.error(e) # type: ignore
return _bad_input(sentence_id, reason=str(input_dict)) | 0.004821 |
def camel_to_under(name):
"""
Converts camel-case string to lowercase string separated by underscores.
Written by epost (http://stackoverflow.com/questions/1175208).
:param name: String to be converted
:return: new String with camel-case converted to lowercase, underscored
"""
s1 = re.sub("(.)([A-Z][a-z]+)", r"\1_\2", name)
return re.sub("([a-z0-9])([A-Z])", r"\1_\2", s1).lower() | 0.002404 |
def frommatrix(cls, apart, dpart, det_radius, init_matrix, **kwargs):
"""Create a `ParallelHoleCollimatorGeometry` using a matrix.
This alternative constructor uses a matrix to rotate and
translate the default configuration. It is most useful when
the transformation to be applied is already given as a matrix.
Parameters
----------
apart : 1-dim. `RectPartition`
Partition of the parameter interval.
dpart : 2-dim. `RectPartition`
Partition of the detector parameter set.
det_radius : positive float
Radius of the circular detector orbit.
init_matrix : `array_like`, shape ``(3, 3)`` or ``(3, 4)``, optional
Transformation matrix whose left ``(3, 3)`` block is multiplied
with the default ``det_pos_init`` and ``det_axes_init`` to
determine the new vectors. If present, the fourth column acts
as a translation after the initial transformation.
The resulting ``det_axes_init`` will be normalized.
kwargs :
Further keyword arguments passed to the class constructor.
Returns
-------
geometry : `ParallelHoleCollimatorGeometry`
The resulting geometry.
"""
# Get transformation and translation parts from `init_matrix`
init_matrix = np.asarray(init_matrix, dtype=float)
if init_matrix.shape not in ((3, 3), (3, 4)):
raise ValueError('`matrix` must have shape (3, 3) or (3, 4), '
'got array with shape {}'
''.format(init_matrix.shape))
trafo_matrix = init_matrix[:, :3]
translation = init_matrix[:, 3:].squeeze()
# Transform the default vectors
default_axis = cls._default_config['axis']
# Normalized version, just in case
default_orig_to_det_init = (
np.array(cls._default_config['det_pos_init'], dtype=float) /
np.linalg.norm(cls._default_config['det_pos_init']))
default_det_axes_init = cls._default_config['det_axes_init']
vecs_to_transform = ((default_orig_to_det_init,) +
default_det_axes_init)
transformed_vecs = transform_system(
default_axis, None, vecs_to_transform, matrix=trafo_matrix)
# Use the standard constructor with these vectors
axis, orig_to_det, det_axis_0, det_axis_1 = transformed_vecs
if translation.size != 0:
kwargs['translation'] = translation
return cls(apart, dpart, det_radius, axis,
orig_to_det_init=orig_to_det,
det_axes_init=[det_axis_0, det_axis_1],
**kwargs) | 0.000723 |
def dump(d, fmt='json', stream=None):
"""Serialize structured data into a stream in JSON, YAML, or LHA format.
If stream is None, return the produced string instead.
Parameters:
- fmt: should be 'json' (default), 'yaml', or 'lha'
- stream: if None, return string
"""
if fmt == 'json':
return _dump_json(d, stream=stream)
elif fmt == 'yaml':
return yaml.dump(d, stream)
elif fmt == 'lha':
s = _dump_lha(d)
if stream is None:
return s
else:
return stream.write(s) | 0.001779 |
def get(cls, group, admin):
"""Get specific GroupAdmin object."""
try:
ga = cls.query.filter_by(
group=group, admin_id=admin.get_id(),
admin_type=resolve_admin_type(admin)).one()
return ga
except Exception:
return None | 0.006452 |
def commit(self):
'''
Writes the current provenance stack to storage if it wasn't already there and returns it
Returns (Tuple[bool, str, List[]]):
Whether the stack was not cached, the iden of the prov stack, and the provstack
'''
providen, provstack = get()
wasnew = (providen is None)
if wasnew:
providen = self.getProvIden(provstack)
setiden(providen)
return wasnew, s_common.ehex(providen), provstack | 0.007921 |
def logx(supress_args=[], supress_all_args=False, supress_result=False, receiver=None):
"""
logs parameters and result
takes arguments
supress_args - list of parameter names to supress
supress_all_args - boolean to supress all arguments
supress_result - boolean to supress result
receiver - custom logging function which takes a string as input; defaults to logging on stdout
"""
def decorator(fn):
def func(*args, **kwargs):
if not supress_all_args:
arg_string = ""
for i in range(0, len(args)):
var_name = fn.__code__.co_varnames[i]
if var_name != "self" and var_name not in supress_args:
arg_string += var_name + ":" + str(args[i]) + ","
arg_string = arg_string[0:len(arg_string) - 1]
string = (RED + BOLD + '>> ' + END + 'Calling {0}({1})'.format(fn.__code__.co_name, arg_string))
if len(kwargs):
string = (
RED + BOLD + '>> ' + END + 'Calling {0} with args {1} and kwargs {2}'.format(
fn.__code__.co_name,
arg_string, kwargs))
if receiver:
receiver(string)
else:
print(string)
result = fn(*args, **kwargs)
if not supress_result:
string = BLUE + BOLD + '<< ' + END + 'Return {0} with result :{1}'.format(fn.__code__.co_name, result)
if receiver:
receiver(string)
else:
print(string)
return result
return func
return decorator | 0.003415 |
def get_splits(self, n_splits=1):
"""Return splits of this dataset ready for Cross Validation.
If n_splits is 1, a tuple containing the X for train and test
and the y for train and test is returned.
Otherwise, if n_splits is bigger than 1, a list of such tuples
is returned, one for each split.
Args:
n_splits (int): Number of times that the data needs to be splitted.
Returns:
tuple or list:
if n_splits is 1, a tuple containing the X for train and test
and the y for train and test is returned.
Otherwise, if n_splits is bigger than 1, a list of such tuples
is returned, one for each split.
"""
if n_splits == 1:
stratify = self.target if self._stratify else None
return train_test_split(
self.data,
self.target,
shuffle=self._shuffle,
stratify=stratify
)
else:
cv_class = StratifiedKFold if self._stratify else KFold
cv = cv_class(n_splits=n_splits, shuffle=self._shuffle)
splits = list()
for train, test in cv.split(self.data, self.target):
X_train = self._get_split(self.data, train)
y_train = self._get_split(self.target, train)
X_test = self._get_split(self.data, test)
y_test = self._get_split(self.target, test)
splits.append((X_train, X_test, y_train, y_test))
return splits | 0.001251 |
def get_access_key(self):
"""
Gets the application secret key.
The value can be stored in parameters "access_key", "client_key" or "secret_key".
:return: the application secret key.
"""
access_key = self.get_as_nullable_string("access_key")
access_key = access_key if access_key != None else self.get_as_nullable_string("access_key")
return access_key | 0.012019 |
def make_map():
"""Create, configure and return the routes Mapper"""
map = Mapper(directory=config['pylons.paths']['controllers'],
always_scan=config['debug'], explicit=True)
map.minimization = False
# The ErrorController route (handles 404/500 error pages); it should
# likely stay at the top, ensuring it can always be resolved
map.connect('/error/{action}', controller='error')
map.connect('/error/{action}/{id}', controller='error')
# CUSTOM ROUTES HERE
# mapfiles routes
map.connect('/mapfiles/default', controller='mapfiles', action='get_default_mapfile')
map.resource('MapFiles','mapfiles')
map.connect('/mapfiles/{id}/symbols', controller='mapfiles', action='get_symbols')
map.connect('/mapfiles/{id}/fonts', controller='mapfiles', action='get_fonts')
map.connect('/mapfiles/{id}/download', controller='mapfiles', action='download_mapfile')
map.connect('/mapfiles/{id}/wms', controller='mapfiles', action='wms_proxy')
# datastores routes
map.resource('DataStores', 'datastores')
# datasources routes
map.connect('/datastores/{datastore_id}/datasources',
controller='datasources', conditions={'method': ['POST']}, action='create')
map.connect('/datastores/{datastore_id}/datasources',
controller='datasources', action='index')
map.connect('/datastores/{datastore_id}/datasources/{datasource_id}',
controller='datasources', action='show')
map.connect('/datastores/{datastore_id}/datasources/{datasource_id}/columns',
controller='datasources', action='showcolumns')
map.connect('/datastores/{datastore_id}/datasources/{datasource_id}/mapfile',
controller='datasources', action='showmapfile')
# layertemplates routes
map.resource('LayerTemplates', 'layertemplates')
# auth routes
map.connect('/signin', controller='main', action='signin')
map.connect('/signout', controller='main', action='signout')
# default route for root ;)
map.connect('/', controller='main', action='index')
map.connect('/layout.js', controller='main', action='layout')
map.connect('/{controller}/{action}')
map.connect('/{controller}/{action}/{id}')
return map | 0.00699 |
def __get_securities(self, currency: str, agent: str, symbol: str,
namespace: str) -> List[dal.Security]:
""" Fetches the securities that match the given filters """
repo = self.get_security_repository()
query = repo.query
if currency is not None:
query = query.filter(dal.Security.currency == currency)
if agent is not None:
query = query.filter(dal.Security.updater == agent)
if symbol is not None:
query = query.filter(dal.Security.symbol == symbol)
if namespace is not None:
query = query.filter(dal.Security.namespace == namespace)
# Sorting
query = query.order_by(dal.Security.namespace, dal.Security.symbol)
securities = query.all()
return securities | 0.003641 |
def jdnDate(jdn):
""" Converts Julian Day Number to Gregorian date. """
a = jdn + 32044
b = (4*a + 3) // 146097
c = a - (146097*b) // 4
d = (4*c + 3) // 1461
e = c - (1461*d) // 4
m = (5*e + 2) // 153
day = e + 1 - (153*m + 2) // 5
month = m + 3 - 12*(m//10)
year = 100*b + d - 4800 + m//10
return [year, month, day] | 0.002778 |
def risearch(self):
"Instance of :class:`eulfedora.api.ResourceIndex`, with the same root url and credentials"
if self._risearch is None:
self._risearch = ResourceIndex(self.api.base_url, self.api.username, self.api.password)
return self._risearch | 0.014134 |
def purge_run(self, event):
"""Run purge for the object with ``location_id`` specified in ``event`` argument."""
location_id = event['location_id']
verbosity = event['verbosity']
try:
logger.info(__("Running purge for location id {}.", location_id))
location_purge(location_id=location_id, delete=True, verbosity=verbosity)
except Exception: # pylint: disable=broad-except
logger.exception("Error while purging location.", extra={'location_id': location_id}) | 0.009328 |
def _infer_interval_breaks(coord, axis=0, check_monotonic=False):
"""
>>> _infer_interval_breaks(np.arange(5))
array([-0.5, 0.5, 1.5, 2.5, 3.5, 4.5])
>>> _infer_interval_breaks([[0, 1], [3, 4]], axis=1)
array([[-0.5, 0.5, 1.5],
[ 2.5, 3.5, 4.5]])
"""
coord = np.asarray(coord)
if check_monotonic and not _is_monotonic(coord, axis=axis):
raise ValueError("The input coordinate is not sorted in increasing "
"order along axis %d. This can lead to unexpected "
"results. Consider calling the `sortby` method on "
"the input DataArray. To plot data with categorical "
"axes, consider using the `heatmap` function from "
"the `seaborn` statistical plotting library." % axis)
deltas = 0.5 * np.diff(coord, axis=axis)
if deltas.size == 0:
deltas = np.array(0.0)
first = np.take(coord, [0], axis=axis) - np.take(deltas, [0], axis=axis)
last = np.take(coord, [-1], axis=axis) + np.take(deltas, [-1], axis=axis)
trim_last = tuple(slice(None, -1) if n == axis else slice(None)
for n in range(coord.ndim))
return np.concatenate([first, coord[trim_last] + deltas, last], axis=axis) | 0.000764 |
def get_package_id(self, name):
"""
Retrieve the smart package id given is English name
@param (str) name: the Aruba Smart package size name, ie: "small", "medium", "large", "extra large".
@return: The package id that depends on the Data center and the size choosen.
"""
json_scheme = self.gen_def_json_scheme('GetPreConfiguredPackages', dict(HypervisorType=4))
json_obj = self.call_method_post(method='GetPreConfiguredPackages ', json_scheme=json_scheme)
for package in json_obj['Value']:
packageId = package['PackageID']
for description in package['Descriptions']:
languageID = description['LanguageID']
packageName = description['Text']
if languageID == 2 and packageName.lower() == name.lower():
return packageId | 0.008018 |
def search(self, **kwargs):
"""
Method to search ipv4's based on extends search.
:param search: Dict containing QuerySets to find ipv4's.
:param include: Array containing fields to include on response.
:param exclude: Array containing fields to exclude on response.
:param fields: Array containing fields to override default fields.
:param kind: Determine if result will be detailed ('detail') or basic ('basic').
:return: Dict containing ipv4's
"""
return super(ApiNetworkIPv4, self).get(self.prepare_url('api/v3/networkv4/',
kwargs)) | 0.005865 |
def addLocation(self, locationUri, weight):
"""
add relevant location to the topic page
@param locationUri: uri of the location to add
@param weight: importance of the provided location (typically in range 1 - 50)
"""
assert isinstance(weight, (float, int)), "weight value has to be a positive or negative integer"
self.topicPage["locations"].append({"uri": locationUri, "wgt": weight}) | 0.00905 |
def theta2_purities_chart (self):
""" Make the plot showing alignment rates """
# Specify the order of the different possible categories
keys = OrderedDict()
keys['proportion_germline'] = { 'name': 'Germline' }
keys['proportion_tumour_1'] = { 'name': 'Tumour Subclone 1' }
keys['proportion_tumour_2'] = { 'name': 'Tumour Subclone 2' }
keys['proportion_tumour_3'] = { 'name': 'Tumour Subclone 3' }
keys['proportion_tumour_4'] = { 'name': 'Tumour Subclone 4' }
keys['proportion_tumour_5'] = { 'name': 'Tumour Subclone 5' }
keys['proportion_tumour_gt5'] = { 'name': 'Tumour Subclones > 5' }
# Config for the plot
pconfig = {
'id': 'theta2_purity_plot',
'title': 'THetA2: Tumour Subclone Purities',
'cpswitch': False,
'ymin': 0,
'ymax': 100,
'ylab': '% Purity',
'tt_suffix': '%'
}
return bargraph.plot(self.theta2_data, keys, pconfig) | 0.022094 |
def _raw_write(self, data, debug_str = None):
"""
Write data to YubiKey.
"""
if self.debug:
if not debug_str:
debug_str = ''
hexdump = yubico_util.hexdump(data, colorize=True)[:-1] # strip LF
self._debug("WRITE : %s %s\n" % (hexdump, debug_str))
request_type = _USB_TYPE_CLASS | _USB_RECIP_INTERFACE | _USB_ENDPOINT_OUT
value = _REPORT_TYPE_FEATURE << 8 # apparently required for YubiKey 1.3.2, but not 2.2.x
sent = self._usb_handle.controlMsg(request_type,
_HID_SET_REPORT,
data,
value = value,
timeout = _USB_TIMEOUT_MS)
if sent != _FEATURE_RPT_SIZE:
self.debug("Failed writing %i bytes (wrote %i) to USB HID YubiKey.\n"
% (_FEATURE_RPT_SIZE, sent))
raise YubiKeyUSBHIDError('Failed talking to USB HID YubiKey')
return sent | 0.014995 |
def list(
self,
bucket: str,
prefix: str=None,
delimiter: str=None,
) -> typing.Iterator[str]:
"""
Returns an iterator of all blob entries in a bucket that match a given prefix. Do not return any keys that
contain the delimiter past the prefix.
"""
raise NotImplementedError() | 0.02168 |
def format_configchoicefield_nodes(field_name, field, field_id, state, lineno):
"""Create a section node that documents a ConfigChoiceField config field.
Parameters
----------
field_name : `str`
Name of the configuration field (the attribute name of on the config
class).
field : ``lsst.pex.config.ConfigChoiceField``
A configuration field.
field_id : `str`
Unique identifier for this field. This is used as the id and name of
the section node. with a -section suffix
state : ``docutils.statemachine.State``
Usually the directive's ``state`` attribute.
lineno (`int`)
Usually the directive's ``lineno`` attribute.
Returns
-------
``docutils.nodes.section``
Section containing documentation nodes for the ConfigChoiceField.
"""
# Create a definition list for the choices
choice_dl = nodes.definition_list()
for choice_value, choice_class in field.typemap.items():
item = nodes.definition_list_item()
item_term = nodes.term()
item_term += nodes.literal(text=repr(choice_value))
item += item_term
item_definition = nodes.definition()
def_para = nodes.paragraph()
name = '.'.join((choice_class.__module__, choice_class.__name__))
def_para += pending_config_xref(rawsource=name)
item_definition += def_para
item += item_definition
choice_dl.append(item)
choices_node = nodes.definition_list_item()
choices_node.append(nodes.term(text='Choices'))
choices_definition = nodes.definition()
choices_definition.append(choice_dl)
choices_node.append(choices_definition)
# Field type
field_type_item = nodes.definition_list_item()
field_type_item.append(nodes.term(text="Field type"))
field_type_item_content = nodes.definition()
field_type_item_content_p = nodes.paragraph()
if field.multi:
multi_text = "Multi-selection "
else:
multi_text = "Single-selection "
field_type_item_content_p += nodes.Text(multi_text, multi_text)
field_type_item_content_p += make_python_xref_nodes_for_type(
type(field),
state,
hide_namespace=True)[0].children[0]
if field.optional:
field_type_item_content_p += nodes.Text(' (optional)', ' (optional)')
field_type_item_content += field_type_item_content_p
field_type_item += field_type_item_content
dl = nodes.definition_list()
dl += create_default_item_node(field, state)
dl += field_type_item
dl += choices_node
# Doc for this field, parsed as rst
desc_node = create_description_node(field, state)
# Title for configuration field
title = create_title_node(field_name, field, field_id, state, lineno)
return [title, dl, desc_node] | 0.000354 |
def _get_component_from_result(self, result, lookup):
"""
Helper function to get a particular address component from a Google result.
Since the address components in results are an array of objects containing a types array,
we have to search for a particular component rather than being able to look it up directly.
Returns the first match, so this should be used for unique component types (e.g.
'locality'), not for categories (e.g. 'political') that can describe multiple components.
:arg dict result: A results dict with an 'address_components' key, as returned by the
Google geocoder.
:arg dict lookup: The type (e.g. 'street_number') and key ('short_name' or 'long_name') of
the desired address component value.
:returns: address component or empty string
"""
for component in result['address_components']:
if lookup['type'] in component['types']:
return component.get(lookup['key'], '')
return '' | 0.008318 |
def _negotiateHandler(self, request):
"""
Negotiate a handler based on the content types acceptable to the
client.
:rtype: 2-`tuple` of `twisted.web.iweb.IResource` and `bytes`
:return: Pair of a resource and the content type.
"""
accept = _parseAccept(request.requestHeaders.getRawHeaders('Accept'))
for contentType in accept.keys():
handler = self._acceptHandlers.get(contentType.lower())
if handler is not None:
return handler, handler.contentType
if self._fallback:
handler = self._handlers[0]
return handler, handler.contentType
return NotAcceptable(), None | 0.002825 |
def _listen(self, protocols, From, description):
"""
Implementation of L{Listen}.
"""
# The peer is coming from a client-side representation of the user
# described by 'From', and talking *to* a server-side representation of
# the user described by 'From'.
self.verifyCertificateAllowed(From, From)
theirCert = Certificate.peerFromTransport(self.transport)
for protocolName in protocols:
if protocolName.startswith('.'):
raise VerifyError(
"Internal protocols are for server-server use _only_: %r" %
protocolName)
key = (From, protocolName)
value = (self, theirCert, description)
log.msg("%r listening for %r" % key)
self.listeningClient.append((key, value))
self.service.listeningClients.setdefault(key, []).append(value)
return {} | 0.002125 |
def inverse_transform(self, X_in):
"""
Perform the inverse transformation to encoded data. Will attempt best case reconstruction, which means
it will return nan for handle_missing and handle_unknown settings that break the bijection. We issue
warnings when some of those cases occur.
Parameters
----------
X_in : array-like, shape = [n_samples, n_features]
Returns
-------
p: array, the same size of X_in
"""
X = X_in.copy(deep=True)
# first check the type
X = util.convert_input(X)
if self._dim is None:
raise ValueError(
'Must train encoder before it can be used to inverse_transform data')
# then make sure that it is the right size
if X.shape[1] != self._dim:
if self.drop_invariant:
raise ValueError("Unexpected input dimension %d, the attribute drop_invariant should "
"set as False when transform data" % (X.shape[1],))
else:
raise ValueError('Unexpected input dimension %d, expected %d' % (X.shape[1], self._dim,))
if not self.cols:
return X if self.return_df else X.values
if self.handle_unknown == 'value':
for col in self.cols:
if any(X[col] == -1):
warnings.warn("inverse_transform is not supported because transform impute "
"the unknown category -1 when encode %s" % (col,))
if self.handle_unknown == 'return_nan' and self.handle_missing == 'return_nan':
for col in self.cols:
if X[col].isnull().any():
warnings.warn("inverse_transform is not supported because transform impute "
"the unknown category nan when encode %s" % (col,))
for switch in self.mapping:
column_mapping = switch.get('mapping')
inverse = pd.Series(data=column_mapping.index, index=column_mapping.get_values())
X[switch.get('col')] = X[switch.get('col')].map(inverse).astype(switch.get('data_type'))
return X if self.return_df else X.values | 0.006696 |
def _registered_kl(type_a, type_b):
"""Get the KL function registered for classes a and b."""
hierarchy_a = tf_inspect.getmro(type_a)
hierarchy_b = tf_inspect.getmro(type_b)
dist_to_children = None
kl_fn = None
for mro_to_a, parent_a in enumerate(hierarchy_a):
for mro_to_b, parent_b in enumerate(hierarchy_b):
candidate_dist = mro_to_a + mro_to_b
candidate_kl_fn = _DIVERGENCES.get((parent_a, parent_b), None)
if not kl_fn or (candidate_kl_fn and candidate_dist < dist_to_children):
dist_to_children = candidate_dist
kl_fn = candidate_kl_fn
return kl_fn | 0.018152 |
def get_next_line(self):
""" Gets next line of random_file and starts over when reaching end of file"""
try:
line = next(self.random_file).strip()
#keep track of which document we are currently looking at to later avoid having the same doc as t1
if line == "":
self.current_random_doc = self.current_random_doc + 1
line = next(self.random_file).strip()
except StopIteration:
self.random_file.close()
self.random_file = open(self.corpus_path, "r", encoding=self.encoding)
line = next(self.random_file).strip()
return line | 0.009146 |
async def peers(client: Client, leaves: bool = False, leaf: str = "") -> dict:
"""
GET peering entries of every node inside the currency network
:param client: Client to connect to the api
:param leaves: True if leaves should be requested
:param leaf: True if leaf should be requested
:return:
"""
if leaves is True:
return await client.get(MODULE + '/peering/peers', {"leaves": "true"}, schema=PEERS_SCHEMA)
else:
return await client.get(MODULE + '/peering/peers', {"leaf": leaf}, schema=PEERS_SCHEMA) | 0.005405 |
def free_symbolic(self):
"""Free symbolic data"""
if self._symbolic is not None:
self.funs.free_symbolic(self._symbolic)
self._symbolic = None
self.mtx = None | 0.009524 |
def set_options_from_file(self, filename, file_format='yaml'):
"""Load options from file.
This is a wrapper over :func:`.set_options_from_JSON` and
:func:`.set_options_from_YAML`.
:param str filename: File from which to load the options.
:param str file_format: File format (``yaml`` or ``json``).
:raises:
:ValueError: If an unknown ``format`` is requested.
"""
if file_format.lower() == 'yaml':
return self.set_options_from_YAML(filename)
elif file_format.lower() == 'json':
return self.set_options_from_JSON(filename)
else:
raise ValueError('Unknown format {}'.format(file_format)) | 0.002797 |
def charges(self, num, charge_id=None, **kwargs):
"""Search for charges against a company by company number.
Args:
num (str): Company number to search on.
transaction (Optional[str]): Filing record number.
kwargs (dict): additional keywords passed into
requests.session.get params keyword.
"""
baseuri = self._BASE_URI + "company/{}/charges".format(num)
if charge_id is not None:
baseuri += "/{}".format(charge_id)
res = self.session.get(baseuri, params=kwargs)
else:
res = self.session.get(baseuri, params=kwargs)
self.handle_http_error(res)
return res | 0.002882 |
def post_event_unpublish(self, id, **data):
"""
POST /events/:id/unpublish/
Unpublishes an event. In order for a free event to be unpublished, it must not have any pending or completed orders,
even if the event is in the past. In order for a paid event to be unpublished, it must not have any pending or completed
orders, unless the event has been completed and paid out. Returns a boolean indicating success or failure of the
unpublish.
"""
return self.post("/events/{0}/unpublish/".format(id), data=data) | 0.010363 |
def _as_reference_point(self) -> np.ndarray:
""" Return classification information as reference point
"""
ref_val = []
for fn, f in self._classification.items():
if f[0] == "<":
ref_val.append(self._method.problem.ideal[fn])
elif f[0] == "<>":
ref_val.append(self._method.problem.nadir[fn])
else:
ref_val.append(f[1])
return np.array(ref_val) | 0.004283 |
def makevAndvPfuncs(self,policyFunc):
'''
Constructs the marginal value function for this period.
Parameters
----------
policyFunc : function
Consumption and medical care function for this period, defined over
market resources, permanent income level, and the medical need shock.
Returns
-------
vFunc : function
Value function for this period, defined over market resources and
permanent income.
vPfunc : function
Marginal value (of market resources) function for this period, defined
over market resources and permanent income.
'''
# Get state dimension sizes
mCount = self.aXtraGrid.size
pCount = self.pLvlGrid.size
MedCount = self.MedShkVals.size
# Make temporary grids to evaluate the consumption function
temp_grid = np.tile(np.reshape(self.aXtraGrid,(mCount,1,1)),(1,pCount,MedCount))
aMinGrid = np.tile(np.reshape(self.mLvlMinNow(self.pLvlGrid),(1,pCount,1)),
(mCount,1,MedCount))
pGrid = np.tile(np.reshape(self.pLvlGrid,(1,pCount,1)),(mCount,1,MedCount))
mGrid = temp_grid*pGrid + aMinGrid
if self.pLvlGrid[0] == 0:
mGrid[:,0,:] = np.tile(np.reshape(self.aXtraGrid,(mCount,1)),(1,MedCount))
MedShkGrid = np.tile(np.reshape(self.MedShkVals,(1,1,MedCount)),(mCount,pCount,1))
probsGrid = np.tile(np.reshape(self.MedShkPrbs,(1,1,MedCount)),(mCount,pCount,1))
# Get optimal consumption (and medical care) for each state
cGrid,MedGrid = policyFunc(mGrid,pGrid,MedShkGrid)
# Calculate expected value by "integrating" across medical shocks
if self.vFuncBool:
MedGrid = np.maximum(MedGrid,1e-100) # interpolation error sometimes makes Med < 0 (barely)
aGrid = np.maximum(mGrid - cGrid - self.MedPrice*MedGrid, aMinGrid) # interpolation error sometimes makes tiny violations
vGrid = self.u(cGrid) + MedShkGrid*self.uMed(MedGrid) + self.EndOfPrdvFunc(aGrid,pGrid)
vNow = np.sum(vGrid*probsGrid,axis=2)
# Calculate expected marginal value by "integrating" across medical shocks
vPgrid = self.uP(cGrid)
vPnow = np.sum(vPgrid*probsGrid,axis=2)
# Add vPnvrs=0 at m=mLvlMin to close it off at the bottom (and vNvrs=0)
mGrid_small = np.concatenate((np.reshape(self.mLvlMinNow(self.pLvlGrid),(1,pCount)),mGrid[:,:,0]))
vPnvrsNow = np.concatenate((np.zeros((1,pCount)),self.uPinv(vPnow)))
if self.vFuncBool:
vNvrsNow = np.concatenate((np.zeros((1,pCount)),self.uinv(vNow)),axis=0)
vNvrsPnow = vPnow*self.uinvP(vNow)
vNvrsPnow = np.concatenate((np.zeros((1,pCount)),vNvrsPnow),axis=0)
# Construct the pseudo-inverse value and marginal value functions over mLvl,pLvl
vPnvrsFunc_by_pLvl = []
vNvrsFunc_by_pLvl = []
for j in range(pCount): # Make a pseudo inverse marginal value function for each pLvl
pLvl = self.pLvlGrid[j]
m_temp = mGrid_small[:,j] - self.mLvlMinNow(pLvl)
vPnvrs_temp = vPnvrsNow[:,j]
vPnvrsFunc_by_pLvl.append(LinearInterp(m_temp,vPnvrs_temp))
if self.vFuncBool:
vNvrs_temp = vNvrsNow[:,j]
vNvrsP_temp = vNvrsPnow[:,j]
vNvrsFunc_by_pLvl.append(CubicInterp(m_temp,vNvrs_temp,vNvrsP_temp))
vPnvrsFuncBase = LinearInterpOnInterp1D(vPnvrsFunc_by_pLvl,self.pLvlGrid)
vPnvrsFunc = VariableLowerBoundFunc2D(vPnvrsFuncBase,self.mLvlMinNow) # adjust for the lower bound of mLvl
if self.vFuncBool:
vNvrsFuncBase = LinearInterpOnInterp1D(vNvrsFunc_by_pLvl,self.pLvlGrid)
vNvrsFunc = VariableLowerBoundFunc2D(vNvrsFuncBase,self.mLvlMinNow) # adjust for the lower bound of mLvl
# "Re-curve" the (marginal) value function
vPfunc = MargValueFunc2D(vPnvrsFunc,self.CRRA)
if self.vFuncBool:
vFunc = ValueFunc2D(vNvrsFunc,self.CRRA)
else:
vFunc = NullFunc()
return vFunc, vPfunc | 0.026085 |
def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self,
'matching_results') and self.matching_results is not None:
_dict['matching_results'] = self.matching_results
if hasattr(self, 'hits') and self.hits is not None:
_dict['hits'] = [x._to_dict() for x in self.hits]
return _dict | 0.004878 |
def handle_registered(self, server):
"""\
When the connection to the server is registered, send all pending
data.
"""
if not self._registered:
self.logger.info('Registered')
self._registered = True
for data in self._out_buffer:
self.send(data)
self._out_buffer = [] | 0.00542 |
def _join_strings(x):
"""Joins adjacent Str elements found in the element list 'x'."""
for i in range(len(x)-1): # Process successive pairs of elements
if x[i]['t'] == 'Str' and x[i+1]['t'] == 'Str':
x[i]['c'] += x[i+1]['c']
del x[i+1] # In-place deletion of element from list
return None # Forces processing to repeat
return True | 0.002571 |
def return_file_objects(connection, container, prefix='database'):
"""Given connecton and container find database dumps
"""
options = []
meta_data = objectstore.get_full_container_list(
connection, container, prefix='database')
env = ENV.upper()
for o_info in meta_data:
expected_file = f'database.{ENV}'
if o_info['name'].startswith(expected_file):
dt = dateparser.parse(o_info['last_modified'])
now = datetime.datetime.now()
delta = now - dt
LOG.debug('AGE: %d %s', delta.days, expected_file)
options.append((dt, o_info))
options.sort()
return options | 0.001466 |
def is_visible(self, pos: Union[Point2, Point3, Unit]) -> bool:
""" Returns True if you have vision on a grid point. """
# more info: https://github.com/Blizzard/s2client-proto/blob/9906df71d6909511907d8419b33acc1a3bd51ec0/s2clientprotocol/spatial.proto#L19
assert isinstance(pos, (Point2, Point3, Unit))
pos = pos.position.to2.rounded
return self.state.visibility[pos] == 2 | 0.007246 |
def validate_word_size(word_size, BLAST_SETS):
"""Validate word size in blast/tblastn form. """
blast_min_int_word_size = BLAST_SETS.min_word_size
blast_max_int_word_size = BLAST_SETS.max_word_size
blast_word_size_error = BLAST_SETS.get_word_size_error()
try:
if len(word_size) <= 0:
raise forms.ValidationError(blast_word_size_error)
int_word_size = int(word_size)
if int_word_size < blast_min_int_word_size:
raise forms.ValidationError(blast_word_size_error)
if int_word_size >= blast_max_int_word_size:
raise forms.ValidationError(blast_word_size_error)
except:
raise forms.ValidationError(blast_word_size_error)
return int_word_size | 0.002674 |
def share(self, email, message=None):
"""Share the project with another Todoist user.
:param email: The other user's email address.
:type email: str
:param message: Optional message to send with the invitation.
:type message: str
>>> from pytodoist import todoist
>>> user = todoist.login('john.doe@gmail.com', 'password')
>>> project = user.get_project('PyTodoist')
>>> project.share('jane.doe@gmail.com')
"""
args = {
'project_id': self.id,
'email': email,
'message': message
}
_perform_command(self.owner, 'share_project', args) | 0.002972 |
def find_field(browser, field_type, value):
"""
Locate an input field.
:param browser: ``world.browser``
:param string field_type: a field type (i.e. `button`)
:param string value: an id, name or label
This first looks for `value` as the id of the element, else
the name of the element, else as a label for the element.
Returns: an :class:`ElementSelector`
"""
return find_field_by_id(browser, field_type, value) + \
find_field_by_name(browser, field_type, value) + \
find_field_by_label(browser, field_type, value) | 0.001742 |
def format_request_email_templ(increq, template, **ctx):
"""Format the email message element for inclusion request notification.
Formats the message according to the provided template file, using
some default fields from 'increq' object as default context.
Arbitrary context can be provided as keywords ('ctx'), and those will
not be overwritten by the fields from 'increq' object.
:param increq: Inclusion request object for which the request is made.
:type increq: `invenio_communities.models.InclusionRequest`
:param template: relative path to jinja template.
:type template: str
:param ctx: Optional extra context parameters passed to formatter.
:type ctx: dict.
:returns: Formatted message.
:rtype: str
"""
# Add minimal information to the contex (without overwriting).
curate_link = '{site_url}/communities/{id}/curate/'.format(
site_url=current_app.config['THEME_SITEURL'],
id=increq.community.id)
min_ctx = dict(
record=Record.get_record(increq.record.id),
requester=increq.user,
community=increq.community,
curate_link=curate_link,
)
for k, v in min_ctx.items():
if k not in ctx:
ctx[k] = v
msg_element = render_template_to_string(template, **ctx)
return msg_element | 0.000751 |
def normalizeX(value):
"""
Normalizes x coordinate.
* **value** must be an :ref:`type-int-float`.
* Returned value is the same type as the input value.
"""
if not isinstance(value, (int, float)):
raise TypeError("X coordinates must be instances of "
":ref:`type-int-float`, not %s."
% type(value).__name__)
return value | 0.002475 |
def _write(self, destination: TextIO) -> None:
"""
Writes the converted output to a destination.
"""
for line in self.dest_lines:
destination.write(line + NL) | 0.009901 |
def id_match_check(self, data_df, meta_df, dim):
"""
Verifies that id values match between:
- row case: index of data_df & index of row metadata
- col case: columns of data_df & index of column metadata
"""
if dim == "row":
if len(data_df.index) == len(meta_df.index) and set(data_df.index) == set(meta_df.index):
return True
else:
msg = ("The rids are inconsistent between data_df and row_metadata_df.\n" +
"data_df.index.values:\n{}\nrow_metadata_df.index.values:\n{}").format(data_df.index.values, meta_df.index.values)
self.logger.error(msg)
raise Exception("GCToo GCToo.id_match_check " + msg)
elif dim == "col":
if len(data_df.columns) == len(meta_df.index) and set(data_df.columns) == set(meta_df.index):
return True
else:
msg = ("The cids are inconsistent between data_df and col_metadata_df.\n" +
"data_df.columns.values:\n{}\ncol_metadata_df.index.values:\n{}").format(data_df.columns.values, meta_df.index.values)
self.logger.error(msg)
raise Exception("GCToo GCToo.id_match_check " + msg) | 0.007849 |
def relabel(self, xlabel=None, ylabel=None,
y2label=None, title=None, delay_draw=False):
" re draw labels (title, x, y labels)"
n = self.labelfont.get_size()
self.titlefont.set_size(n+1)
# print(" plot relabel ", delay_draw)
rcParams['xtick.labelsize'] = rcParams['ytick.labelsize'] = n
rcParams['xtick.color'] = rcParams['ytick.color'] = self.textcolor
if xlabel is not None:
self.xlabel = xlabel
if ylabel is not None:
self.ylabel = ylabel
if y2label is not None:
self.y2label = y2label
if title is not None:
self.title = title
axes = self.canvas.figure.get_axes()
kws = dict(fontproperties=self.titlefont, color=self.textcolor)
axes[0].set_title(self.title, **kws)
kws['fontproperties'] = self.labelfont
if len(self.xlabel) > 0 and self.xlabel not in ('', None, 'None'):
axes[0].set_xlabel(self.xlabel, **kws)
if len(self.ylabel) > 0 and self.ylabel not in ('', None, 'None'):
axes[0].set_ylabel(self.ylabel, **kws)
if (len(axes) > 1 and len(self.y2label) > 0 and
self.y2label not in ('', None, 'None')):
axes[1].set_ylabel(self.y2label, **kws)
for ax in axes[0].xaxis, axes[0].yaxis:
for t in (ax.get_ticklabels() + ax.get_ticklines()):
t.set_color(self.textcolor)
if hasattr(t, 'set_fontsize'):
t.set_fontsize(n)
self.set_added_text_size()
if self.mpl_legend is not None:
for t in self.mpl_legend.get_texts():
t.set_color(self.textcolor)
if not delay_draw:
self.canvas.draw() | 0.004507 |
def action_set(method_name):
"""
Creates a setter that will call the action method with the context's
key as first parameter and the value as second parameter.
@param method_name: the name of a method belonging to the action.
@type method_name: str
"""
def action_set(value, context, **_params):
method = getattr(context["action"], method_name)
return _set(method, context["key"], value, (), {})
return action_set | 0.00216 |
def _try_reduce(self) -> Tuple[bool, List[HdlStatement]]:
"""
Doc on parent class :meth:`HdlStatement._try_reduce`
"""
# flag if IO of statement has changed
io_change = False
self.ifTrue, rank_decrease, _io_change = self._try_reduce_list(
self.ifTrue)
self.rank -= rank_decrease
io_change |= _io_change
new_elifs = []
for cond, statements in self.elIfs:
_statements, rank_decrease, _io_change = self._try_reduce_list(
statements)
self.rank -= rank_decrease
io_change |= _io_change
new_elifs.append((cond, _statements))
if self.ifFalse is not None:
self.ifFalse, rank_decrease, _io_update_required = self._try_reduce_list(
self.ifFalse)
self.rank -= rank_decrease
io_change |= _io_change
reduce_self = not self.condHasEffect(
self.ifTrue, self.ifFalse, self.elIfs)
if reduce_self:
res = self.ifTrue
else:
res = [self, ]
self._on_reduce(reduce_self, io_change, res)
# try merge nested ifs as elifs
if self.ifFalse is not None and len(self.ifFalse) == 1:
child = self.ifFalse[0]
if isinstance(child, IfContainer):
self._merge_nested_if_from_else(child)
return res, io_change | 0.002102 |
def write(self, data):
'Put, possibly replace, file contents with (new) data'
if not hasattr(data, 'read'):
data = six.BytesIO(data)#StringIO(data)
self.jfs.up(self.path, data) | 0.018868 |
def plot_element_profile(self, element, comp, show_label_index=None,
xlim=5):
"""
Draw the element profile plot for a composition varying different
chemical potential of an element.
X value is the negative value of the chemical potential reference to
elemental chemical potential. For example, if choose Element("Li"),
X= -(µLi-µLi0), which corresponds to the voltage versus metal anode.
Y values represent for the number of element uptake in this composition
(unit: per atom). All reactions are printed to help choosing the
profile steps you want to show label in the plot.
Args:
element (Element): An element of which the chemical potential is
considered. It also must be in the phase diagram.
comp (Composition): A composition.
show_label_index (list of integers): The labels for reaction products
you want to show in the plot. Default to None (not showing any
annotation for reaction products). For the profile steps you want
to show the labels, just add it to the show_label_index. The
profile step counts from zero. For example, you can set
show_label_index=[0, 2, 5] to label profile step 0,2,5.
xlim (float): The max x value. x value is from 0 to xlim. Default to
5 eV.
Returns:
Plot of element profile evolution by varying the chemical potential
of an element.
"""
plt = pretty_plot(12, 8)
pd = self._pd
evolution = pd.get_element_profile(element, comp)
num_atoms = evolution[0]["reaction"].reactants[0].num_atoms
element_energy = evolution[0]['chempot']
for i, d in enumerate(evolution):
v = -(d["chempot"] - element_energy)
print ("index= %s, -\u0394\u03BC=%.4f(eV)," % (i, v), d["reaction"])
if i != 0:
plt.plot([x2, x2], [y1, d["evolution"] / num_atoms],
'k', linewidth=2.5)
x1 = v
y1 = d["evolution"] / num_atoms
if i != len(evolution) - 1:
x2 = - (evolution[i + 1]["chempot"] - element_energy)
else:
x2 = 5.0
if show_label_index is not None and i in show_label_index:
products = [re.sub(r"(\d+)", r"$_{\1}$", p.reduced_formula)
for p in d["reaction"].products
if p.reduced_formula != element.symbol]
plt.annotate(", ".join(products), xy=(v + 0.05, y1 + 0.05),
fontsize=24, color='r')
plt.plot([x1, x2], [y1, y1], 'r', linewidth=3)
else:
plt.plot([x1, x2], [y1, y1], 'k', linewidth=2.5)
plt.xlim((0, xlim))
plt.xlabel("-$\\Delta{\\mu}$ (eV)")
plt.ylabel("Uptake per atom")
return plt | 0.001677 |
def instance(self):
"""
Retrieve the single (expected) instance of this 'Part' (of `Category.MODEL`) as a 'Part'.
See :func:`Part.instances()` method for documentation.
:return: :class:`Part` with category `INSTANCE`
:raises NotFoundError: if the instance does not exist
:raises MultipleFoundError: if there are more than a single instance returned
"""
instances_list = list(self.instances())
if len(instances_list) == 1:
return instances_list[0]
elif len(instances_list) > 1:
raise MultipleFoundError("Part {} has more than a single instance. "
"Use the `Part.instances()` method".format(self.name))
else:
raise NotFoundError("Part {} has no instance".format(self.name)) | 0.007194 |
def delete_saved_search(self, keys):
""" Delete one or more saved searches by passing a list of one or more
unique search keys
"""
headers = {"Zotero-Write-Token": token()}
headers.update(self.default_headers())
req = requests.delete(
url=self.endpoint
+ "/{t}/{u}/searches".format(t=self.library_type, u=self.library_id),
headers=headers,
params={"searchKey": ",".join(keys)},
)
self.request = req
try:
req.raise_for_status()
except requests.exceptions.HTTPError:
error_handler(req)
return req.status_code | 0.004505 |
def volumes(self):
"""
Returns a `list` of all the `Volume` known to the cluster. Updates every time - no caching.
:return: a `list` of all the `Volume` known to the cluster.
:rtype: list
"""
self.connection._check_login()
response = self.connection._do_get("{}/{}".format(self.connection._api_url, "types/Volume/instances")).json()
all_volumes = []
for volume in response:
all_volumes.append(
SIO_Volume.from_dict(volume)
)
return all_volumes | 0.007092 |
def query_dns(cls, domains, records):
"""
Query DNS records for host.
:param domains: Iterable of domains to get DNS Records for
:param records: Iterable of DNS records to get from domain.
"""
results = {k: set() for k in records}
for record in records:
for domain in domains:
try:
answers = cls.resolver.query(domain, record)
for answer in answers:
# Add value to record type
results.get(record).add(answer)
except (resolver.NoAnswer, resolver.NXDOMAIN, resolver.NoNameservers):
# Type of record doesn't fit domain or no answer from ns
continue
return {k: v for k, v in results.items() if v} | 0.003619 |
def get_order(self, order_id):
'''Get an order'''
resp = self.get('/orders/{}'.format(order_id))
return Order(resp) | 0.014388 |