text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def add_edge(self, x, y, label=None):
"""Add an edge from distribution *x* to distribution *y* with the given
*label*.
:type x: :class:`distutils2.database.InstalledDistribution` or
:class:`distutils2.database.EggInfoDistribution`
:type y: :class:`distutils2.database.InstalledDistribution` or
:class:`distutils2.database.EggInfoDistribution`
:type label: ``str`` or ``None``
"""
self.adjacency_list[x].append((y, label))
# multiple edges are allowed, so be careful
if x not in self.reverse_list[y]:
self.reverse_list[y].append(x) | 0.003082 |
def set_delegate(address=None, pubkey=None, secret=None):
"""Set delegate parameters. Call set_delegate with no arguments to clear."""
c.DELEGATE['ADDRESS'] = address
c.DELEGATE['PUBKEY'] = pubkey
c.DELEGATE['PASSPHRASE'] = secret | 0.00813 |
def service_status(hostname=None, service=None, **kwargs):
'''
Check status of a particular service on a host on it in Nagios.
By default statuses are returned in a numeric format.
Parameters:
hostname
The hostname to check the status of the service in Nagios.
service
The service to check the status of in Nagios.
numeric
Turn to false in order to return status in text format
('OK' instead of 0, 'Warning' instead of 1 etc)
:return: status: 'OK', 'Warning', 'Critical' or 'Unknown'
CLI Example:
.. code-block:: bash
salt '*' nagios_rpc.service_status hostname=webserver.domain.com service='HTTP'
salt '*' nagios_rpc.service_status hostname=webserver.domain.com service='HTTP' numeric=False
'''
if not hostname:
raise CommandExecutionError('Missing hostname parameter')
if not service:
raise CommandExecutionError('Missing service parameter')
target = 'service'
numeric = kwargs.get('numeric')
data = _status_query(target, hostname, service=service, enumerate=numeric)
ret = {'result': data['result']}
if ret['result']:
ret['status'] = data.get('json_data', {}).get('data', {}).get(target, {}).get('status',
not numeric and 'Unknown' or 2)
else:
ret['error'] = data['error']
return ret | 0.003453 |
def disconnect(self, name=None):
"""Clear internal Channel cache, allowing currently unused channels to be implictly closed.
:param str name: None, to clear the entire cache, or a name string to clear only a certain entry.
"""
if name is None:
self._channels = {}
else:
self._channels.pop(name)
if self._ctxt is not None:
self._ctxt.disconnect(name) | 0.009217 |
def _get_attribute(self, offset):
"""Determines attribute type at the offset and returns \
initialized attribute object.
Returns:
MftAttr: One of the attribute objects \
(eg. :class:`~.mft_attribute.MftAttrFilename`).
None: If atttribute type does not mach any one of the supported \
attribute types.
"""
attr_type = self.get_uint_le(offset)
# Attribute length is in header @ offset 0x4
length = self.get_uint_le(offset + 0x04)
data = self.get_chunk(offset, length)
return MftAttr.factory(attr_type, data) | 0.00319 |
def print_stats(correctness, confidence, name):
"""
Prints out accuracy, coverage, etc. statistics
:param correctness: ndarray
One bool per example specifying whether it was correctly classified
:param confidence: ndarray
The probability associated with each prediction
:param name: str
The name of this type of data (e.g. "clean", "MaxConfidence")
"""
accuracy = correctness.mean()
wrongness = 1 - correctness
denom1 = np.maximum(1, wrongness.sum())
ave_prob_on_mistake = (wrongness * confidence).sum() / denom1
assert ave_prob_on_mistake <= 1., ave_prob_on_mistake
denom2 = np.maximum(1, correctness.sum())
ave_prob_on_correct = (correctness * confidence).sum() / denom2
covered = confidence > 0.5
cov_half = covered.mean()
acc_half = (correctness * covered).sum() / np.maximum(1, covered.sum())
print('Accuracy on %s examples: %0.4f' % (name, accuracy))
print("Average prob on mistakes: %0.4f" % ave_prob_on_mistake)
print("Average prob on correct: %0.4f" % ave_prob_on_correct)
print("Accuracy when prob thresholded at .5: %0.4f" % acc_half)
print("Coverage when prob thresholded at .5: %0.4f" % cov_half)
success_rate = acc_half * cov_half
# Success is correctly classifying a covered example
print("Success rate at .5: %0.4f" % success_rate)
# Failure is misclassifying a covered example
failure_rate = (1. - acc_half) * cov_half
print("Failure rate at .5: %0.4f" % failure_rate)
print() | 0.016416 |
def write_config(self, config_file_path=None, system_id=None):
"""
Write the ROUGE configuration file, which is basically a list
of system summary files and their matching model summary files.
This is a non-static version of write_config_file_static().
config_file_path: Path of the configuration file.
system_id: Optional system ID string which will
appear in the ROUGE output.
"""
if not system_id:
system_id = 1
if (not config_file_path) or (not self._config_dir):
self._config_dir = mkdtemp()
config_filename = "rouge_conf.xml"
else:
config_dir, config_filename = os.path.split(config_file_path)
verify_dir(config_dir, "configuration file")
self._config_file = os.path.join(self._config_dir, config_filename)
Rouge155.write_config_static(
self._system_dir, self._system_filename_pattern,
self._model_dir, self._model_filename_pattern,
self._config_file, system_id)
self.log.info(
"Written ROUGE configuration to {}".format(self._config_file)) | 0.00165 |
def executemany(self, query, args):
"""Execute a multi-row query.
query -- string, query to execute on server
args
Sequence of sequences or mappings, parameters to use with
query.
Returns long integer rows affected, if any.
This method improves performance on multiple-row INSERT and
REPLACE. Otherwise it is equivalent to looping over args with
execute().
"""
del self.messages[:]
db = self._get_db()
if not args: return
if isinstance(query, unicode):
query = query.encode(db.unicode_literal.charset)
m = insert_values.search(query)
if not m:
r = 0
for a in args:
r = r + self.execute(query, a)
return r
p = m.start(1)
e = m.end(1)
qv = m.group(1)
try:
q = [ qv % db.literal(a) for a in args ]
except TypeError, msg:
if msg.args[0] in ("not enough arguments for format string",
"not all arguments converted"):
self.errorhandler(self, ProgrammingError, msg.args[0])
else:
self.errorhandler(self, TypeError, msg)
except (SystemExit, KeyboardInterrupt):
raise
except:
exc, value, tb = sys.exc_info()
del tb
self.errorhandler(self, exc, value)
r = self._query('\n'.join([query[:p], ',\n'.join(q), query[e:]]))
if not self._defer_warnings: self._warning_check()
return r | 0.006143 |
def set_start_location(self, new_location):
# type: (int) -> None
'''
A method to set the location of the start of the partition.
Parameters:
new_location - The new extent the UDF partition should start at.
Returns:
Nothing.
'''
if not self._initialized:
raise pycdlibexception.PyCdlibInternalError('UDF Partition Volume Descriptor not initialized')
self.part_start_location = new_location | 0.008264 |
def phase_step(z,Ns,p_step,Nstep):
"""
Create a one sample per symbol signal containing a phase rotation
step Nsymb into the waveform.
:param z: complex baseband signal after matched filter
:param Ns: number of sample per symbol
:param p_step: size in radians of the phase step
:param Nstep: symbol sample location where the step turns on
:return: the one sample symbol signal containing the phase step
Mark Wickert July 2014
"""
nn = np.arange(0,len(z[::Ns]))
theta = np.zeros(len(nn))
idx = np.where(nn >= Nstep)
theta[idx] = p_step*np.ones(len(idx))
z_rot = z[::Ns]*np.exp(1j*theta)
return z_rot | 0.00753 |
def setData(self, index, value, role=Qt.DisplayRole):
"""Set the value to the index position depending on Qt::ItemDataRole and data type of the column
Args:
index (QtCore.QModelIndex): Index to define column and row.
value (object): new value.
role (Qt::ItemDataRole): Use this role to specify what you want to do.
Raises:
TypeError: If the value could not be converted to a known datatype.
Returns:
True if value is changed. Calls layoutChanged after update.
False if value is not different from original value.
"""
if not index.isValid() or not self.editable:
return False
if value != index.data(role):
self.layoutAboutToBeChanged.emit()
row = self._dataFrame.index[index.row()]
col = self._dataFrame.columns[index.column()]
#print 'before change: ', index.data().toUTC(), self._dataFrame.iloc[row][col]
columnDtype = self._dataFrame[col].dtype
if columnDtype == object:
pass
elif columnDtype in self._intDtypes:
dtypeInfo = numpy.iinfo(columnDtype)
if value < dtypeInfo.min:
value = dtypeInfo.min
elif value > dtypeInfo.max:
value = dtypeInfo.max
elif columnDtype in self._floatDtypes:
value = numpy.float64(value).astype(columnDtype)
elif columnDtype in self._boolDtypes:
value = numpy.bool_(value)
elif columnDtype in self._dateDtypes:
# convert the given value to a compatible datetime object.
# if the conversation could not be done, keep the original
# value.
if isinstance(value, QtCore.QDateTime):
value = value.toString(self.timestampFormat)
try:
value = pandas.Timestamp(value)
except Exception:
raise Exception("Can't convert '{0}' into a datetime".format(value))
# return False
else:
raise TypeError("try to set unhandled data type")
self._dataFrame.set_value(row, col, value)
#print 'after change: ', value, self._dataFrame.iloc[row][col]
self.layoutChanged.emit()
return True
else:
return False | 0.00321 |
def draw(self, mode="triangle_strip"):
""" Draw collection """
gl.glDepthMask(gl.GL_FALSE)
Collection.draw(self, mode)
gl.glDepthMask(gl.GL_TRUE) | 0.011236 |
def _is_collinear(self, lons, lats):
"""
Checks if first three points are collinear - in the spherical
case this corresponds to all points lying on a great circle
and, hence, all coordinate vectors being in a single plane.
"""
x, y, z = lonlat2xyz(lons[:3], lats[:3])
pts = np.column_stack([x, y, z])
collinearity = (np.linalg.det(pts.T) == 0.0)
return collinearity | 0.004545 |
def alter(self, interfaces):
"""
Used to provide the ability to alter the interfaces dictionary before
it is returned from self.parse().
Required Arguments:
interfaces
The interfaces dictionary.
Returns: interfaces dict
"""
# fixup some things
for device, device_dict in interfaces.items():
if len(device_dict['inet4']) > 0:
device_dict['inet'] = device_dict['inet4'][0]
if 'inet' in device_dict and not device_dict['inet'] is None:
try:
host = socket.gethostbyaddr(device_dict['inet'])[0]
interfaces[device]['hostname'] = host
except (socket.herror, socket.gaierror):
interfaces[device]['hostname'] = None
# To be sure that hex values and similar are always consistent, we
# return everything in lowercase. For instance, Windows writes
# MACs in upper-case.
for key, device_item in device_dict.items():
if hasattr(device_item, 'lower'):
interfaces[device][key] = device_dict[key].lower()
return interfaces | 0.001629 |
def request_name(self, name):
"""Request a name, might return the name or a similar one if already
used or reserved
"""
while name in self._blacklist:
name += "_"
self._blacklist.add(name)
return name | 0.007663 |
def reparentUnions(self):
'''
Helper method for :func:`~exhale.graph.ExhaleRoot.reparentAll`. Namespaces and
classes should have the unions defined in them to be in the child list of itself
rather than floating around. Union nodes that are reparented (e.g. a union
defined in a class) will be removed from the list ``self.unions`` since the
Breathe directive for its parent (e.g. the class) will include the documentation
for the union. The consequence of this is that a union defined in a class will
**not** appear in the full api listing of Unions.
'''
# unions declared in a class will not link to the individual union page, so
# we will instead elect to remove these from the list of unions
removals = []
for u in self.unions:
parts = u.name.split("::")
if len(parts) >= 2:
# TODO: nested unions are not supported right now...
parent_name = "::".join(p for p in parts[:-1])
reparented = False
# see if the name matches any potential parents
for node in itertools.chain(self.class_like, self.namespaces):
if node.name == parent_name:
node.children.append(u)
u.parent = node
reparented = True
break
# if not reparented, try the namespaces
if reparented:
removals.append(u)
else:
# << verboseBuild
utils.verbose_log(
"The union {0} has '::' in its name, but no parent was found!".format(u.name),
utils.AnsiColors.BOLD_RED
)
# remove the unions from self.unions that were declared in class_like objects
for rm in removals:
self.unions.remove(rm) | 0.006067 |
def get_plugin_instance(plugin_class, *args, **kwargs):
"""Returns an instance of a fully initialized plugin class
Every plugin class is kept in a plugin cache, effectively making
every plugin into a singleton object.
When a plugin has a yaz.dependency decorator, it will be called
as well, before the instance is returned.
"""
assert issubclass(plugin_class, BasePlugin), type(plugin_class)
global _yaz_plugin_instance_cache
qualname = plugin_class.__qualname__
if not qualname in _yaz_plugin_instance_cache:
plugin_class = get_plugin_list()[qualname]
_yaz_plugin_instance_cache[qualname] = plugin = plugin_class(*args, **kwargs)
# find any yaz.dependency decorators, and call them when necessary
funcs = [func
for _, func
in inspect.getmembers(plugin)
if inspect.ismethod(func) and hasattr(func, "yaz_dependency_config")]
for func in funcs:
signature = inspect.signature(func)
assert all(parameter.kind is parameter.POSITIONAL_OR_KEYWORD and issubclass(parameter.annotation, BasePlugin) for parameter in signature.parameters.values()), "All parameters for {} must type hint to a BasePlugin".format(func)
func(*[get_plugin_instance(parameter.annotation)
for parameter
in signature.parameters.values()])
return _yaz_plugin_instance_cache[qualname] | 0.003418 |
def get_port_channel_detail_output_lacp_individual_agg(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_port_channel_detail = ET.Element("get_port_channel_detail")
config = get_port_channel_detail
output = ET.SubElement(get_port_channel_detail, "output")
lacp = ET.SubElement(output, "lacp")
individual_agg = ET.SubElement(lacp, "individual-agg")
individual_agg.text = kwargs.pop('individual_agg')
callback = kwargs.pop('callback', self._callback)
return callback(config) | 0.003396 |
def to_barrier_key(cls, barrier_index_key):
"""Converts a _BarrierIndex key to a _BarrierRecord key.
Args:
barrier_index_key: db.Key for a _BarrierIndex entity.
Returns:
db.Key for the corresponding _BarrierRecord entity.
"""
barrier_index_path = barrier_index_key.to_path()
# Pick out the items from the _BarrierIndex key path that we need to
# construct the _BarrierRecord key path.
(pipeline_kind, dependent_pipeline_id,
unused_kind, purpose) = barrier_index_path[-4:]
barrier_record_path = (
pipeline_kind, dependent_pipeline_id,
_BarrierRecord.kind(), purpose)
return db.Key.from_path(*barrier_record_path) | 0.001451 |
def Sigmoid(input_vertex: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex:
"""
Applies the sigmoid function to a vertex.
The sigmoid function is a special case of the Logistic function.
:param input_vertex: the vertex
"""
return Double(context.jvm_view().SigmoidVertex, label, cast_to_double_vertex(input_vertex)) | 0.016529 |
def skip_child(self, child, ancestry):
""" get whether or not to skip the specified child """
if child.any(): return True
for x in ancestry:
if x.choice():
return True
return False | 0.0125 |
def strip_oembeds(text, args=None):
"""
Take a block of text and strip all the embeds from it, optionally taking
a maxwidth, maxheight / resource_type
Usage:
{{ post.content|strip_embeds }}
{{ post.content|strip_embeds:"600x600xphoto" }}
{{ post.content|strip_embeds:"video" }}
"""
resource_type = width = height = None
if args:
dimensions = args.lower().split('x')
if len(dimensions) in (3, 1):
resource_type = dimensions.pop()
if len(dimensions) == 2:
width, height = map(lambda x: int(x), dimensions)
client = OEmbedConsumer()
return mark_safe(client.strip(text, width, height, resource_type)) | 0.007013 |
def validate_checkpoint_files(checkpoint_file, backup_file):
"""Checks if the given checkpoint and/or backup files are valid.
The checkpoint file is considered valid if:
* it passes all tests run by ``check_integrity``;
* it has at least one sample written to it (indicating at least one
checkpoint has happened).
The same applies to the backup file. The backup file must also have the
same number of samples as the checkpoint file, otherwise, the backup is
considered invalid.
If the checkpoint (backup) file is found to be valid, but the backup
(checkpoint) file is not valid, then the checkpoint (backup) is copied to
the backup (checkpoint). Thus, this function ensures that checkpoint and
backup files are either both valid or both invalid.
Parameters
----------
checkpoint_file : string
Name of the checkpoint file.
backup_file : string
Name of the backup file.
Returns
-------
checkpoint_valid : bool
Whether or not the checkpoint (and backup) file may be used for loading
samples.
"""
# check if checkpoint file exists and is valid
try:
check_integrity(checkpoint_file)
checkpoint_valid = True
except (ValueError, KeyError, IOError):
checkpoint_valid = False
# backup file
try:
check_integrity(backup_file)
backup_valid = True
except (ValueError, KeyError, IOError):
backup_valid = False
# check if there are any samples in the file; if not, we'll just start from
# scratch
if checkpoint_valid:
with loadfile(checkpoint_file, 'r') as fp:
try:
group = '{}/{}'.format(fp.samples_group, fp.variable_params[0])
nsamples = fp[group].size
checkpoint_valid = nsamples != 0
except KeyError:
checkpoint_valid = False
# check if there are any samples in the backup file
if backup_valid:
with loadfile(backup_file, 'r') as fp:
try:
group = '{}/{}'.format(fp.samples_group, fp.variable_params[0])
backup_nsamples = fp[group].size
backup_valid = backup_nsamples != 0
except KeyError:
backup_valid = False
# check that the checkpoint and backup have the same number of samples;
# if not, assume the checkpoint has the correct number
if checkpoint_valid and backup_valid:
backup_valid = nsamples == backup_nsamples
# decide what to do based on the files' statuses
if checkpoint_valid and not backup_valid:
# copy the checkpoint to the backup
logging.info("Backup invalid; copying checkpoint file")
shutil.copy(checkpoint_file, backup_file)
backup_valid = True
elif backup_valid and not checkpoint_valid:
logging.info("Checkpoint invalid; copying backup file")
# copy the backup to the checkpoint
shutil.copy(backup_file, checkpoint_file)
checkpoint_valid = True
return checkpoint_valid | 0.000323 |
def init_app(self, app, storage=None, cache=None, file_upload=None):
"""
Initialize the engine.
:param app: The app to use
:type app: Object
:param storage: The blog storage instance that implements the
:type storage: Object
:param cache: (Optional) A Flask-Cache object to enable caching
:type cache: Object
``Storage`` class interface.
"""
self.app = app
self.config = self.app.config
self.storage = storage or self.storage
self.file_upload = file_upload or self.file_upload
self.cache = cache or self.cache
self._register_plugins(self.app, self.config)
from .views import create_blueprint
blog_app = create_blueprint(__name__, self)
# external urls
blueprint_created.send(self.app, engine=self, blueprint=blog_app)
self.app.register_blueprint(
blog_app, url_prefix=self.config.get("BLOGGING_URL_PREFIX"))
self.app.extensions["FLASK_BLOGGING_ENGINE"] = self # duplicate
self.app.extensions["blogging"] = self
self.principal = Principal(self.app)
engine_initialised.send(self.app, engine=self)
if self.config.get("BLOGGING_ALLOW_FILEUPLOAD", True):
self.ffu = self.file_upload or FlaskFileUpload(app) | 0.001493 |
def set_interactive_policy(*, locals=None, banner=None, serve=None,
prompt_control=None):
"""Use an interactive event loop by default."""
policy = InteractiveEventLoopPolicy(
locals=locals,
banner=banner,
serve=serve,
prompt_control=prompt_control)
asyncio.set_event_loop_policy(policy) | 0.002801 |
def resolve_egg_link(path):
"""
Given a path to an .egg-link, resolve distributions
present in the referenced path.
"""
referenced_paths = non_empty_lines(path)
resolved_paths = (
os.path.join(os.path.dirname(path), ref)
for ref in referenced_paths
)
dist_groups = map(find_distributions, resolved_paths)
return next(dist_groups, ()) | 0.002597 |
def size(self):
"""
The size of this block, in bytes
"""
if self._size is None:
self._size = sum(s.len for s in self.statements if type(s) is stmt.IMark)
return self._size | 0.013453 |
def angle_between_vectors(x, y):
""" Compute the angle between vector x and y """
dp = dot_product(x, y)
if dp == 0:
return 0
xm = magnitude(x)
ym = magnitude(y)
return math.acos(dp / (xm*ym)) * (180. / math.pi) | 0.004115 |
def to_graph_decomposition(H):
"""Returns a DirectedHypergraph object that has the same nodes (and
corresponding attributes) as the given hypergraph, except that for all
hyperedges in the given hypergraph, each node in the tail of the hyperedge
is pairwise connected to each node in the head of the hyperedge in the
new hypergraph.
Said another way, each of the original hyperedges are decomposed in the
new hypergraph into fully-connected bipartite components.
:param H: the hypergraph to decompose into a graph.
:returns: DirectedHypergraph -- the decomposed hypergraph.
:raises: TypeError -- Transformation only applicable to
directed hypergraphs
"""
if not isinstance(H, DirectedHypergraph):
raise TypeError("Transformation only applicable to \
directed hypergraphs")
G = DirectedHypergraph()
nodes = [(node, H.get_node_attributes(node_attributes))
for node in G.node_iterator()]
G.add_nodes(nodes)
edges = [([tail_node], [head_node])
for hyperedge_id in H.hyperedge_id_iterator()
for tail_node in H.get_hyperedge_tail(hyperedge_id)
for head_node in H.get_hyperedge_head(hyperedge_id)]
G.add_hyperedges(edges)
return G | 0.000772 |
def depth(self):
"""Depth at grid centers (m)
:getter: Returns the points of axis ``'depth'`` if availible in the
process's domains.
:type: array
:raises: :exc:`ValueError`
if no ``'depth'`` axis can be found.
"""
try:
for domname, dom in self.domains.items():
try:
thisdepth = dom.axes['depth'].points
except:
pass
return thisdepth
except:
raise ValueError('Can\'t resolve a depth axis.') | 0.006633 |
def h5ToDict(h5, readH5pyDataset=True):
""" Read a hdf5 file into a dictionary """
h = h5py.File(h5, "r")
ret = unwrapArray(h, recursive=True, readH5pyDataset=readH5pyDataset)
if readH5pyDataset: h.close()
return ret | 0.008475 |
def get_house_conn_gen_load(graph, node):
"""
Get generation capacity/ peak load of neighboring house connected to main
branch
Parameters
----------
graph : :networkx:`NetworkX Graph Obj< >`
Directed graph
node : graph node
Node of the main branch of LV grid
Returns
-------
:any:`list`
A list containing two items
# peak load of connected house branch
# generation capacity of connected generators
"""
generation = 0
peak_load = 0
for cus_1 in graph.successors(node):
for cus_2 in graph.successors(cus_1):
if not isinstance(cus_2, list):
cus_2 = [cus_2]
generation += sum([gen.capacity for gen in cus_2
if isinstance(gen, GeneratorDing0)])
peak_load += sum([load.peak_load for load in cus_2
if isinstance(load, LVLoadDing0)])
return [peak_load, generation] | 0.004073 |
def initialize(self, request, response):
"""Initialize.
1. call webapp init.
2. check request is indeed from taskqueue.
3. check the task has not been retried too many times.
4. run handler specific processing logic.
5. run error handling logic if precessing failed.
Args:
request: a webapp.Request instance.
response: a webapp.Response instance.
"""
super(TaskQueueHandler, self).initialize(request, response)
# Check request is from taskqueue.
if "X-AppEngine-QueueName" not in self.request.headers:
logging.error(self.request.headers)
logging.error("Task queue handler received non-task queue request")
self.response.set_status(
403, message="Task queue handler received non-task queue request")
return
# Check task has not been retried too many times.
if self.task_retry_count() + 1 > parameters.config.TASK_MAX_ATTEMPTS:
logging.error(
"Task %s has been attempted %s times. Dropping it permanently.",
self.request.headers["X-AppEngine-TaskName"],
self.task_retry_count() + 1)
self._drop_gracefully()
return
try:
self._preprocess()
self._preprocess_success = True
# pylint: disable=bare-except
except:
self._preprocess_success = False
logging.error(
"Preprocess task %s failed. Dropping it permanently.",
self.request.headers["X-AppEngine-TaskName"])
self._drop_gracefully() | 0.00939 |
def add_user(name, password=None, runas=None):
'''
Add a rabbitMQ user via rabbitmqctl user_add <user> <password>
CLI Example:
.. code-block:: bash
salt '*' rabbitmq.add_user rabbit_user password
'''
clear_pw = False
if password is None:
# Generate a random, temporary password. RabbitMQ requires one.
clear_pw = True
password = ''.join(random.SystemRandom().choice(
string.ascii_uppercase + string.digits) for x in range(15))
if runas is None and not salt.utils.platform.is_windows():
runas = salt.utils.user.get_user()
if salt.utils.platform.is_windows():
# On Windows, if the password contains a special character
# such as '|', normal execution will fail. For example:
# cmd: rabbitmq.add_user abc "asdf|def"
# stderr: 'def' is not recognized as an internal or external
# command,\r\noperable program or batch file.
# Work around this by using a shell and a quoted command.
python_shell = True
cmd = '"{0}" add_user "{1}" "{2}"'.format(
RABBITMQCTL, name, password
)
else:
python_shell = False
cmd = [RABBITMQCTL, 'add_user', name, password]
res = __salt__['cmd.run_all'](
cmd,
reset_system_locale=False,
output_loglevel='quiet',
runas=runas,
python_shell=python_shell)
if clear_pw:
# Now, Clear the random password from the account, if necessary
try:
clear_password(name, runas)
except Exception:
# Clearing the password failed. We should try to cleanup
# and rerun and error.
delete_user(name, runas)
raise
msg = 'Added'
return _format_response(res, msg) | 0.000553 |
def create_lab_meeting(self, event_type, presenters, foodie = None, locked = False):
'Presenters can be a comma-separated list of presenters.'
e = self.initialize_tagged_copy()
summary_texts = {
'Lab meeting' : 'Kortemme Lab meeting',
'Kortemme/DeGrado joint meeting' : 'DeGrado/Kortemme labs joint meeting'
}
assert(summary_texts.get(event_type))
e['extendedProperties']['shared']['event_type'] = event_type
e['extendedProperties']['shared']['Presenters'] = presenters
e['extendedProperties']['shared']['Food'] = foodie
e['extendedProperties']['shared']['Locked meeting'] = locked
print(presenters)
print([[p for p in presenters.split(',')] + [foodie]])
participants = [p.strip() for p in ([p for p in presenters.split(',')] + [foodie]) if p and p.strip()]
participants = [p for p in [self.email_map.get(p) for p in participants] if p]
participant_names = [self.username_map.get(p.strip(), p.strip()) for p in presenters.split(',') if p.strip()]
if participants:
e['extendedProperties']['shared']['ParticipantList'] = ','.join(participants)
if not e['summary']:
e['summary'] = '%s: %s' % (summary_texts[event_type], ', '.join(participant_names))
e['description'] = e['description'] or e['summary']
return e | 0.010722 |
def Wang_Chiang_Lu(m, x, rhol, rhog, mul, mug, D, roughness=0, L=1):
r'''Calculates two-phase pressure drop with the Wang, Chiang, and Lu (1997)
correlation given in [1]_ and reviewed in [2]_ and [3]_.
.. math::
\Delta P = \Delta P_{g} \phi_g^2
.. math::
\phi_g^2 = 1 + 9.397X^{0.62} + 0.564X^{2.45} \text{ for } G >= 200 kg/m^2/s
.. math::
\phi_g^2 = 1 + CX + X^2 \text{ for lower mass fluxes}
.. math::
C = 0.000004566X^{0.128}Re_{lo}^{0.938}\left(\frac{\rho_l}{\rho_g}
\right)^{-2.15}\left(\frac{\mu_l}{\mu_g}\right)^{5.1}
.. math::
X^2 = \frac{\Delta P_l}{\Delta P_g}
Parameters
----------
m : float
Mass flow rate of fluid, [kg/s]
x : float
Quality of fluid, [-]
rhol : float
Liquid density, [kg/m^3]
rhog : float
Gas density, [kg/m^3]
mul : float
Viscosity of liquid, [Pa*s]
mug : float
Viscosity of gas, [Pa*s]
D : float
Diameter of pipe, [m]
roughness : float, optional
Roughness of pipe for use in calculating friction factor, [m]
L : float, optional
Length of pipe, [m]
Returns
-------
dP : float
Pressure drop of the two-phase flow, [Pa]
Notes
-----
Examples
--------
>>> Wang_Chiang_Lu(m=0.6, x=0.1, rhol=915., rhog=2.67, mul=180E-6,
... mug=14E-6, D=0.05, roughness=0, L=1)
448.29981978639154
References
----------
.. [1] Wang, Chi-Chuan, Ching-Shan Chiang, and Ding-Chong Lu. "Visual
Observation of Two-Phase Flow Pattern of R-22, R-134a, and R-407C in a
6.5-Mm Smooth Tube." Experimental Thermal and Fluid Science 15, no. 4
(November 1, 1997): 395-405. doi:10.1016/S0894-1777(97)00007-1.
.. [2] Kim, Sung-Min, and Issam Mudawar. "Universal Approach to Predicting
Two-Phase Frictional Pressure Drop for Adiabatic and Condensing Mini/
Micro-Channel Flows." International Journal of Heat and Mass Transfer
55, no. 11-12 (May 2012): 3246-61.
doi:10.1016/j.ijheatmasstransfer.2012.02.047.
.. [3] Xu, Yu, Xiande Fang, Xianghui Su, Zhanru Zhou, and Weiwei Chen.
"Evaluation of Frictional Pressure Drop Correlations for Two-Phase Flow
in Pipes." Nuclear Engineering and Design, SI : CFD4NRS-3, 253 (December
2012): 86-97. doi:10.1016/j.nucengdes.2012.08.007.
'''
G_tp = m/(pi/4*D**2)
# Actual Liquid flow
v_l = m*(1-x)/rhol/(pi/4*D**2)
Re_l = Reynolds(V=v_l, rho=rhol, mu=mul, D=D)
fd_l = friction_factor(Re=Re_l, eD=roughness/D)
dP_l = fd_l*L/D*(0.5*rhol*v_l**2)
# Actual gas flow
v_g = m*x/rhog/(pi/4*D**2)
Re_g = Reynolds(V=v_g, rho=rhog, mu=mug, D=D)
fd_g = friction_factor(Re=Re_g, eD=roughness/D)
dP_g = fd_g*L/D*(0.5*rhog*v_g**2)
X = (dP_l/dP_g)**0.5
if G_tp >= 200:
phi_g2 = 1 + 9.397*X**0.62 + 0.564*X**2.45
else:
# Liquid-only flow; Re_lo is oddly needed
v_lo = m/rhol/(pi/4*D**2)
Re_lo = Reynolds(V=v_lo, rho=rhol, mu=mul, D=D)
C = 0.000004566*X**0.128*Re_lo**0.938*(rhol/rhog)**-2.15*(mul/mug)**5.1
phi_g2 = 1 + C*X + X**2
return dP_g*phi_g2 | 0.000622 |
def log(self):
""" Returns the natural logarithm of the quaternion.
(not tested)
"""
# Init
norm = self.norm()
vecNorm = self.x**2 + self.y**2 + self.z**2
tmp = self.w / norm
q = Quaternion()
# Calculate
q.w = np.log(norm)
q.x = np.log(norm) * self.x * np.arccos(tmp) / vecNorm
q.y = np.log(norm) * self.y * np.arccos(tmp) / vecNorm
q.z = np.log(norm) * self.z * np.arccos(tmp) / vecNorm
return q | 0.011194 |
def delete_node(self, node_id):
"""Removes the node identified by node_id from the graph."""
node = self.get_node(node_id)
# Remove all edges from the node
for e in node['edges']:
self.delete_edge_by_id(e)
# Remove all edges to the node
edges = [edge_id for edge_id, edge in list(self.edges.items()) if edge['vertices'][1] == node_id]
for e in edges:
self.delete_edge_by_id(e)
# Remove the node from the node list
del self.nodes[node_id]
self._num_nodes -= 1 | 0.0053 |
def _save_artifact(build, data, content_type):
"""Saves an artifact to the DB and returns it."""
sha1sum = hashlib.sha1(data).hexdigest()
artifact = models.Artifact.query.filter_by(id=sha1sum).first()
if artifact:
logging.debug('Upload already exists: artifact_id=%r', sha1sum)
else:
logging.info('Upload received: artifact_id=%r, content_type=%r',
sha1sum, content_type)
artifact = models.Artifact(
id=sha1sum,
content_type=content_type,
data=data)
_artifact_created(artifact)
artifact.owners.append(build)
return artifact | 0.007987 |
def get_handler(handler_name):
"""
Imports the module for a DOAC handler based on the string representation of the module path that is provided.
"""
from .conf import options
handlers = options.handlers
for handler in handlers:
handler_path = handler.split(".")
name = handler_path[-2]
if handler_name == name:
handler_module = __import__(".".join(handler_path[:-1]), {}, {}, str(handler_path[-1]))
return getattr(handler_module, handler_path[-1])()
return None | 0.013913 |
def lookup_thread_id(self):
"Lookup the thread id as path to comment file."
path = os.path.join(self.realm, self.topic + '.csv')
return path | 0.012121 |
def _is_lang_change(self, request):
"""Return True if the lang param is present and URL isn't exempt."""
if 'lang' not in request.GET:
return False
return not any(request.path.endswith(url) for url in self.exempt_urls) | 0.007843 |
def propagate(self, date):
"""Compute state of orbit at a given date, past or future
Args:
date (Date)
Return:
Orbit:
"""
i0, Ω0, e0, ω0, M0, n0 = self.tle
n0 *= 60 # conversion to min⁻¹
if isinstance(date, Date):
t0 = self.tle.date.datetime
tdiff = (date.datetime - t0).total_seconds() / 60.
elif isinstance(date, timedelta):
tdiff = date.total_seconds() / 60.
date = self.tle.date + date
else:
raise TypeError("Unhandled type for 'date': %s" % type(date))
bstar = self.tle.complements['bstar']
µ = self.gravity.µ_e
r_e = self.gravity.r_e
k_e = self.gravity.k_e
# retrieve initialized variables
_i = self._init
n0 = _i.n0
Mdf = M0 + _i.Mdot * n0 * tdiff
ωdf = ω0 + _i.ωdot * n0 * tdiff
Ωdf = Ω0 + _i.Ωdot * n0 * tdiff
delta_ω = bstar * _i.C3 * cos(ω0) * tdiff
delta_M = 0.
if e0 > 1e-4:
delta_M = - 2 / 3 * (_i.q0 - _i.s) ** 4 * bstar * _i.ξ ** 4 / (e0 * _i.η) * ((1 + _i.η * cos(Mdf)) ** 3 - (1 + _i.η * cos(M0)) ** 3)
Mp = (Mdf + delta_ω + delta_M) % (2 * np.pi)
ω = ωdf - delta_ω - delta_M
Ω = Ωdf - 21 * n0 * _i.k2 * _i.θ / (2 * _i.a0 ** 2 * _i.β_0 ** 2) * _i.C1 * tdiff ** 2
e = e0 - bstar * _i.C4 * tdiff - bstar * _i.C5 * (sin(Mp) - sin(M0))
if e < 1e-6:
e = 1e-6
a = _i.a0 * (1 - _i.C1 * tdiff - _i.D2 * tdiff ** 2 - _i.D3 * tdiff ** 3 - _i.D4 * tdiff ** 4) ** 2
L = Mp + ω + Ω + n0 * (3 / 2 * _i.C1 * tdiff ** 2 + (_i.D2 + 2 * _i.C1 ** 2) * tdiff ** 3 + 1 / 4 * (3 * _i.D3 + 12 * _i.C1 * _i.D2 + 10 * _i.C1 ** 3) * tdiff ** 4 + 1 / 5 * (3 * _i.D4 + 12 * _i.C1 * _i.D3 + 6 * _i.D2 ** 2 + 30 * _i.C1 ** 2 * _i.D2 + 15 * _i.C1 ** 4) * tdiff ** 5)
β = sqrt(1 - e ** 2)
n = µ / (a ** (3 / 2))
# Long-period terms
axN = e * cos(ω)
ayNL = _i.A30 * sin(i0) / (4 * _i.k2 * a * β ** 2)
tmp = (1 + _i.θ) if (1 + _i.θ) > 1.5e-12 else 1.5e-12
L_L = ayNL / 2 * axN * ((3 + 5 * _i.θ) / tmp)
L_T = L + L_L
ayN = e * sin(ω) + ayNL
# Resolving of kepler equation
U = (L_T - Ω) % (2 * np.pi)
Epω = U
for xxx in range(10):
delta_Epω = (U - ayN * cos(Epω) + axN * sin(Epω) - Epω) / (1 - ayN * sin(Epω) - axN * cos(Epω))
if abs(delta_Epω) < 1e-12:
break
Epω = Epω + delta_Epω
# Short-period terms
ecosE = axN * cos(Epω) + ayN * sin(Epω)
esinE = axN * sin(Epω) - ayN * cos(Epω)
e_L = sqrt(axN ** 2 + ayN ** 2)
p_L = a * (1 - e_L ** 2)
r = a * (1 - ecosE)
rdot = sqrt(a) / r * esinE
rfdot = sqrt(p_L) / r
cosu = a / r * (cos(Epω) - axN + ayN * esinE / (1 + sqrt(1 - e_L ** 2)))
sinu = a / r * (sin(Epω) - ayN - axN * esinE / (1 + sqrt(1 - e_L ** 2)))
u = arctan2(sinu, cosu)
Delta_r = _i.k2 / (2 * p_L) * (1 - _i.θ ** 2) * cos(2 * u)
Delta_u = - _i.k2 / (4 * p_L ** 2) * (7 * _i.θ ** 2 - 1) * sin(2 * u)
Delta_Ω = 3 * _i.k2 * _i.θ / (2 * p_L ** 2) * sin(2 * u)
Delta_i = 3 * _i.k2 * _i.θ / (2 * p_L ** 2) * sin(i0) * cos(2 * u)
Delta_rdot = - n * _i.k2 * (1 - _i.θ ** 2) * sin(2 * u) / (p_L * µ)
Delta_rfdot = _i.k2 * n * ((1 - _i.θ ** 2) * cos(2 * u) - 3 / 2 * (1 - 3 * _i.θ ** 2)) / (p_L * µ)
rk = r * (1 - 3 / 2 * _i.k2 * sqrt(1 - e_L ** 2) / (p_L ** 2) * (3 * _i.θ ** 2 - 1)) + Delta_r
uk = u + Delta_u
Ωk = Ω + Delta_Ω
ik = i0 + Delta_i
rdotk = rdot + Delta_rdot
rfdotk = rfdot + Delta_rfdot
# Vectors
vM = np.array([- sin(Ωk) * cos(ik), cos(Ωk) * cos(ik), sin(ik)])
vN = np.array([cos(Ωk), sin(Ωk), 0])
vU = vM * sin(uk) + vN * cos(uk)
vV = vM * cos(uk) - vN * sin(uk)
vR = rk * vU * r_e
vRdot = (rdotk * vU + rfdotk * vV) * (r_e * k_e / 60.)
vector = np.concatenate((vR, vRdot)) * 1000 # conversion to meters
return self.tle.__class__(date, vector, 'cartesian', 'TEME', self.__class__(), **self.tle.complements) | 0.002803 |
def dbserver(cmd, dbhostport=None,
dbpath=os.path.expanduser(config.dbserver.file)):
"""
start/stop/restart the database server, or return its status
"""
if config.dbserver.multi_user and getpass.getuser() != 'openquake':
sys.exit('oq dbserver only works in single user mode')
status = dbs.get_status()
if cmd == 'status':
print('dbserver ' + status)
elif cmd == 'stop':
if status == 'running':
pid = logs.dbcmd('getpid')
os.kill(pid, signal.SIGINT) # this is trapped by the DbServer
else:
print('dbserver already stopped')
elif cmd == 'start':
if status == 'not-running':
dbs.run_server(dbpath, dbhostport)
else:
print('dbserver already running')
elif cmd == 'restart':
if status == 'running':
pid = logs.dbcmd('getpid')
os.kill(pid, signal.SIGINT)
dbs.run_server(dbpath, dbhostport) | 0.001017 |
def _parse_parameter_locks(optimizer, meta_parameters, parameter_locks):
"""Synchronize meta_parameters and locked_values.
The union of these two sets will have all necessary parameters.
locked_values will have the parameters specified in parameter_locks.
"""
# WARNING: meta_parameters is modified inline
locked_values = {}
if parameter_locks:
for name in parameter_locks:
# Store the current optimzier value
# and remove from our dictionary of paramaters to optimize
locked_values[name] = getattr(optimizer, name)
meta_parameters.pop(name)
return locked_values | 0.001531 |
def purge_scenario(scenario_id, **kwargs):
"""
Set the status of a scenario.
"""
_check_can_edit_scenario(scenario_id, kwargs['user_id'])
user_id = kwargs.get('user_id')
scenario_i = _get_scenario(scenario_id, user_id)
db.DBSession.delete(scenario_i)
db.DBSession.flush()
return 'OK' | 0.003058 |
def delete_communication_channel_id(self, id, user_id):
"""
Delete a communication channel.
Delete an existing communication channel.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - user_id
"""ID"""
path["user_id"] = user_id
# REQUIRED - PATH - id
"""ID"""
path["id"] = id
self.logger.debug("DELETE /api/v1/users/{user_id}/communication_channels/{id} with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("DELETE", "/api/v1/users/{user_id}/communication_channels/{id}".format(**path), data=data, params=params, single_item=True) | 0.005362 |
def run(self,
horizon: int,
initial_state: Optional[StateTensor] = None) -> SimulationOutput:
'''Builds the MDP graph and simulates in batch the trajectories
with given `horizon`. Returns the non-fluents, states, actions, interms
and rewards. Fluents and non-fluents are returned in factored form.
Note:
All output arrays have shape: (batch_size, horizon, fluent_shape).
Except initial state that has shape: (batch_size, fluent_shape).
Args:
horizon (int): The number of timesteps in the simulation.
initial_state (Optional[Sequence[tf.Tensor]]): The initial state tensors.
Returns:
Tuple[NonFluentsArray, StatesArray, ActionsArray, IntermsArray, np.array]: Simulation ouput tuple.
'''
trajectory = self.trajectory(horizon, initial_state)
with tf.Session(graph=self.graph) as sess:
sess.run(tf.global_variables_initializer())
non_fluents = sess.run(self._non_fluents)
initial_state, states, actions, interms, rewards = sess.run(trajectory)
# non-fluents
non_fluent_ordering = self._cell._compiler.rddl.domain.non_fluent_ordering
non_fluents = tuple(zip(non_fluent_ordering, non_fluents))
# states
state_fluent_ordering = self._cell._compiler.rddl.domain.state_fluent_ordering
states = tuple(zip(state_fluent_ordering, states))
# interms
interm_fluent_ordering = self._cell._compiler.rddl.domain.interm_fluent_ordering
interms = tuple(zip(interm_fluent_ordering, interms))
# actions
action_fluent_ordering = self._cell._compiler.rddl.domain.action_fluent_ordering
actions = tuple(zip(action_fluent_ordering, actions))
# rewards
rewards = np.squeeze(rewards)
outputs = (non_fluents, initial_state, states, actions, interms, rewards)
return outputs | 0.006073 |
def create(self, data):
"""
Create a new SyncListItemInstance
:param dict data: The data
:returns: Newly created SyncListItemInstance
:rtype: twilio.rest.preview.sync.service.sync_list.sync_list_item.SyncListItemInstance
"""
data = values.of({'Data': serialize.object(data), })
payload = self._version.create(
'POST',
self._uri,
data=data,
)
return SyncListItemInstance(
self._version,
payload,
service_sid=self._solution['service_sid'],
list_sid=self._solution['list_sid'],
) | 0.004601 |
def get_sections(self, section_name):
"""
Return the list of sections stored in self.timers() given `section_name`
A fake section is returned if the timer does not have section_name.
"""
sections = []
for timer in self.timers():
for sect in timer.sections:
if sect.name == section_name:
sections.append(sect)
break
else:
sections.append(AbinitTimerSection.fake())
return sections | 0.005639 |
def account_for_stripped_whitespace(spaces_keys,
removed_spaces,
replacement_types,
len_reportnums,
journals_matches,
replacement_index):
"""To build a processed (MARC XML) reference line in which the
recognised citations such as standardised periodical TITLEs and
REPORT-NUMBERs have been marked up, it is necessary to read from
the reference line BEFORE all punctuation was stripped and it was
made into upper-case. The indices of the cited items in this
'original line', however, will be different to those in the
'working-line', in which punctuation and multiple-spaces were
stripped out. For example, the following reading-line:
[26] E. Witten and S.-T. Yau, hep-th/9910245.
...becomes (after punctuation and multiple white-space stripping):
[26] E WITTEN AND S T YAU HEP TH/9910245
It can be seen that the report-number citation (hep-th/9910245) is
at a different index in the two strings. When refextract searches
for this citation, it uses the 2nd string (i.e. that which is
capitalised and has no punctuation). When it builds the MARC XML
representation of the reference line, however, it needs to read from
the first string. It must therefore consider the whitespace,
punctuation, etc that has been removed, in order to get the correct
index for the cited item. This function accounts for the stripped
characters before a given TITLE or REPORT-NUMBER index.
@param spaces_keys: (list) - the indices at which spaces were
removed from the reference line.
@param removed_spaces: (dictionary) - keyed by the indices at which
spaces were removed from the line, the values are the number of
spaces actually removed from that position.
So, for example, "3 spaces were removed from position 25 in
the line."
@param replacement_types: (dictionary) - at each 'replacement_index'
in the line, the of replacement to make (title or reportnumber).
@param len_reportnums: (dictionary) - the lengths of the REPORT-
NUMBERs matched at the various indices in the line.
@param len_titles: (dictionary) - the lengths of the various
TITLEs matched at the various indices in the line.
@param replacement_index: (integer) - the index in the working line
of the identified TITLE or REPORT-NUMBER citation.
@return: (tuple) containing 2 elements:
+ the true replacement index of a replacement in
the reading line;
+ any extras to add into the replacement index;
"""
extras = 0
true_replacement_index = replacement_index
spare_replacement_index = replacement_index
for space in spaces_keys:
if space < true_replacement_index:
# There were spaces stripped before the current replacement
# Add the number of spaces removed from this location to the
# current replacement index:
true_replacement_index += removed_spaces[space]
spare_replacement_index += removed_spaces[space]
elif space >= spare_replacement_index and \
replacement_types[replacement_index] == u"journal" and \
space < (spare_replacement_index +
len(journals_matches[replacement_index])):
# A periodical title is being replaced. Account for multi-spaces
# that may have been stripped from the title before its
# recognition:
spare_replacement_index += removed_spaces[space]
extras += removed_spaces[space]
elif space >= spare_replacement_index and \
replacement_types[replacement_index] == u"reportnumber" and \
space < (spare_replacement_index +
len_reportnums[replacement_index]):
# An institutional preprint report-number is being replaced.
# Account for multi-spaces that may have been stripped from it
# before its recognition:
spare_replacement_index += removed_spaces[space]
extras += removed_spaces[space]
# return the new values for replacement indices with stripped
# whitespace accounted for:
return true_replacement_index, extras | 0.000221 |
def _query_wrap(fun, *args, **kwargs):
"""Wait until at least QUERY_WAIT_TIME seconds have passed
since the last invocation of this function, then call the given
function with the given arguments.
"""
with _query_lock:
global _last_query_time
since_last_query = time.time() - _last_query_time
if since_last_query < QUERY_WAIT_TIME:
time.sleep(QUERY_WAIT_TIME - since_last_query)
_last_query_time = time.time()
return fun(*args, **kwargs) | 0.001965 |
def load_existing_json():
"""
Look for an existing json under :meth:`logger.get_logger_dir()` named "stats.json",
and return the loaded list of statistics if found. Returns None otherwise.
"""
dir = logger.get_logger_dir()
fname = os.path.join(dir, JSONWriter.FILENAME)
if tf.gfile.Exists(fname):
with open(fname) as f:
stats = json.load(f)
assert isinstance(stats, list), type(stats)
return stats
return None | 0.007505 |
def _upgrade_genome_resources(galaxy_dir, base_url):
"""Retrieve latest version of genome resource YAML configuration files.
"""
import requests
for dbkey, ref_file in genome.get_builds(galaxy_dir):
# Check for a remote genome resources file
remote_url = base_url % dbkey
requests.packages.urllib3.disable_warnings()
r = requests.get(remote_url, verify=False)
if r.status_code == requests.codes.ok:
local_file = os.path.join(os.path.dirname(ref_file), os.path.basename(remote_url))
if os.path.exists(local_file):
with open(local_file) as in_handle:
local_config = yaml.safe_load(in_handle)
remote_config = yaml.safe_load(r.text)
needs_update = remote_config["version"] > local_config.get("version", 0)
if needs_update:
shutil.move(local_file, local_file + ".old%s" % local_config.get("version", 0))
else:
needs_update = True
if needs_update:
print("Updating %s genome resources configuration" % dbkey)
with open(local_file, "w") as out_handle:
out_handle.write(r.text) | 0.003215 |
def create_method(self):
"""
Build the estimator method or function.
Returns
-------
:return : string
The built method as string.
"""
n_indents = 0 if self.target_language in ['c', 'go'] else 1
method_type = 'separated.{}.method'.format(self.prefix)
method_temp = self.temp(method_type, n_indents=n_indents,
skipping=True)
return method_temp.format(**self.__dict__) | 0.004082 |
def normalize_etpinard_df(df='https://plot.ly/~etpinard/191.csv', columns='x y size text'.split(),
category_col='category', possible_categories=['Africa', 'Americas', 'Asia', 'Europe', 'Oceania']):
"""Reformat a dataframe in etpinard's format for use in plot functions and sklearn models"""
possible_categories = ['Africa', 'Americas', 'Asia', 'Europe',
'Oceania'] if possible_categories is None else possible_categories
df.columns = clean_columns(df.columns)
df = pd.read_csv(df) if isinstance(df, str) else df
columns = clean_columns(list(columns))
df2 = pd.DataFrame(columns=columns)
df2[category_col] = np.concatenate([np.array([categ] * len(df)) for categ in possible_categories])
columns = zip(columns, [[clean_columns(categ + ', ' + column) for categ in possible_categories] for column in columns])
for col, category_cols in columns:
df2[col] = np.concatenate([df[label].values for label in category_cols])
return df2 | 0.007805 |
def fill_in_table(self, table, worksheet, flags):
'''
Fills in any rows with missing right hand side data with empty cells.
'''
max_row = 0
min_row = sys.maxint
for row in table:
if len(row) > max_row:
max_row = len(row)
if len(row) < min_row:
min_row = len(row)
if max_row != min_row:
for row in table:
if len(row) < max_row:
row.extend([None]*(max_row-len(row))) | 0.003817 |
def min(a, axis=None):
"""
Request the minimum of an Array over any number of axes.
.. note:: Currently limited to operating on a single axis.
Parameters
----------
a : Array object
The object whose minimum is to be found.
axis : None, or int, or iterable of ints
Axis or axes along which the operation is performed. The default
(axis=None) is to perform the operation over all the dimensions of the
input array. The axis may be negative, in which case it counts from
the last to the first axis. If axis is a tuple of ints, the operation
is performed over multiple axes.
Returns
-------
out : Array
The Array representing the requested mean.
"""
axes = _normalise_axis(axis, a)
assert axes is not None and len(axes) == 1
return _Aggregation(a, axes[0],
_MinStreamsHandler, _MinMaskedStreamsHandler,
a.dtype, {}) | 0.001027 |
def create_form(self, label_columns=None, inc_columns=None,
description_columns=None, validators_columns=None,
extra_fields=None, filter_rel_fields=None):
"""
Converts a model to a form given
:param label_columns:
A dictionary with the column's labels.
:param inc_columns:
A list with the columns to include
:param description_columns:
A dictionary with a description for cols.
:param validators_columns:
A dictionary with WTForms validators ex::
validators={'personal_email':EmailValidator}
:param extra_fields:
A dictionary containing column names and a WTForm
Form fields to be added to the form, these fields do not
exist on the model itself ex::
extra_fields={'some_col':BooleanField('Some Col', default=False)}
:param filter_rel_fields:
A filter to be applied on relationships
"""
label_columns = label_columns or {}
inc_columns = inc_columns or []
description_columns = description_columns or {}
validators_columns = validators_columns or {}
extra_fields = extra_fields or {}
form_props = {}
for col_name in inc_columns:
if col_name in extra_fields:
form_props[col_name] = extra_fields.get(col_name)
else:
self._convert_col(col_name, self._get_label(col_name, label_columns),
self._get_description(col_name, description_columns),
self._get_validators(col_name, validators_columns),
filter_rel_fields, form_props)
return type('DynamicForm', (DynamicForm,), form_props) | 0.004188 |
def print_validation_errors(result):
""" Accepts validation result object and prints report (in red)"""
click.echo(red('\nValidation failed:'))
click.echo(red('-' * 40))
messages = result.get_messages()
for property in messages.keys():
click.echo(yellow(property + ':'))
for error in messages[property]:
click.echo(red('* ' + error))
click.echo('') | 0.002475 |
def notify(correlation_id, components, args = None):
"""
Notifies multiple components.
To be notified components must implement [[INotifiable]] interface.
If they don't the call to this method has no effect.
:param correlation_id: (optional) transaction id to trace execution through call chain.
:param components: a list of components that are to be notified.
:param args: notification arguments.
"""
if components == None:
return
args = args if args != None else Parameters()
for component in components:
Notifier.notify_one(correlation_id, component, args) | 0.010355 |
def _main(self, client, fileobj, bucket, key, extra_args):
"""
:param client: The client to use when calling PutObject
:param fileobj: The file to upload.
:param bucket: The name of the bucket to upload to
:param key: The name of the key to upload to
:param extra_args: A dictionary of any extra arguments that may be
used in the upload.
"""
with fileobj as body:
client.put_object(Bucket=bucket, Key=key, Body=body, **extra_args) | 0.003861 |
def listFileArray(self):
"""
API to list files in DBS. Either non-wildcarded logical_file_name, non-wildcarded dataset,
non-wildcarded block_name or non-wildcarded lfn list is required.
The combination of a non-wildcarded dataset or block_name with an wildcarded logical_file_name is supported.
* For lumi_list the following two json formats are supported:
- [a1, a2, a3,]
- [[a,b], [c, d],]
* lumi_list can be either a list of lumi section numbers as [a1, a2, a3,] or a list of lumi section range as [[a,b], [c, d],]. Thay cannot be mixed.
* If lumi_list is provided run only run_num=single-run-number is allowed
* When lfn list is present, no run or lumi list is allowed.
* When run_num =1 is present, logical_file_name should be present too.
:param logical_file_name: logical_file_name of the file
:type logical_file_name: str, list
:param dataset: dataset
:type dataset: str
:param block_name: block name
:type block_name: str
:param release_version: release version
:type release_version: str
:param pset_hash: parameter set hash
:type pset_hash: str
:param app_name: Name of the application
:type app_name: str
:param output_module_label: name of the used output module
:type output_module_label: str
:param run_num: run , run ranges, and run list. Possible format are: run_num, 'run_min-run_max' or ['run_min-run_max', run1, run2, ...]. Max length 1000.
:type run_num: int, list, string
:param origin_site_name: site where the file was created
:type origin_site_name: str
:param lumi_list: List containing luminosity sections. Max length 1000.
:type lumi_list: list
:param detail: Get detailed information about a file
:type detail: bool
:param validFileOnly: default=0 return all the files. when =1, only return files with is_file_valid=1 or dataset_access_type=PRODUCTION or VALID
:type validFileOnly: int
:param sumOverLumi: default=0 event_count is the event_count/file, when=1 and run_num is specified, the event_count is sum of the event_count/lumi for that run; When sumOverLumi = 1, no other input can be a list, for example no run_num list, lumi list or lfn list.
:type sumOverLumi: int
:returns: List of dictionaries containing the following keys (logical_file_name). If detail parameter is true, the dictionaries contain the following keys (check_sum, branch_hash_id, adler32, block_id, event_count, file_type, create_by, logical_file_name, creation_date, last_modified_by, dataset, block_name, file_id, file_size, last_modification_date, dataset_id, file_type_id, auto_cross_section, md5, is_file_valid)
:rtype: list of dicts
"""
ret = []
try :
body = request.body.read()
if body:
data = cjson.decode(body)
data = validateJSONInputNoCopy("files", data, True)
if 'sumOverLumi' in data and data['sumOverLumi'] ==1:
if ('logical_file_name' in data and isinstance(data['logical_file_name'], list)) \
or ('run_num' in data and isinstance(data['run_num'], list)):
dbsExceptionHandler("dbsException-invalid-input",
"When sumOverLumi=1, no input can be a list becaue nesting of WITH clause within WITH clause not supported yet by Oracle. ", self.logger.exception)
if 'lumi_list' in data and data['lumi_list']:
if 'sumOverLumi' in data and data['sumOverLumi'] ==1:
dbsExceptionHandler("dbsException-invalid-input",
"When lumi_list is given, sumOverLumi must set to 0 becaue nesting of WITH clause within WITH clause not supported yet by Oracle.", self.logger.exception)
data['lumi_list'] = self.dbsUtils2.decodeLumiIntervals(data['lumi_list'])
if 'run_num' not in data.keys() or not data['run_num'] or data['run_num'] ==-1 :
dbsExceptionHandler("dbsException-invalid-input",
"When lumi_list is given, require a single run_num.", self.logger.exception)
#check if run_num =1 w/o lfn
if ('logical_file_name' not in data or not data['logical_file_name']) and 'run_num' in data:
if isinstance(data['run_num'], list):
if 1 in data['run_num'] or '1' in data['run_num']:
raise dbsExceptionHandler("dbsException-invalid-input",
'files API does not supprt run_num=1 without logical_file_name.', self.logger.exception)
else:
if data['run_num'] == 1 or data['run_num'] == '1':
raise dbsExceptionHandler("dbsException-invalid-input",
'files API does not supprt run_num=1 without logical_file_name.', self.logger.exception)
#Because CMSWEB has a 300 seconds responding time. We have to limit the array siz to make sure that
#the API can be finished in 300 second. See github issues #465 for tests' results.
# YG May-20-2015
max_array_size = 1000
if ( 'run_num' in data.keys() and isinstance(data['run_num'], list) and len(data['run_num'])>max_array_size)\
or ('lumi_list' in data.keys() and isinstance(data['lumi_list'], list) and len(data['lumi_list'])>max_array_size)\
or ('logical_file_name' in data.keys() and isinstance(data['logical_file_name'], list) and len(data['logical_file_name'])>max_array_size):
dbsExceptionHandler("dbsException-invalid-input",
"The Max list length supported in listFileArray is %s." %max_array_size, self.logger.exception)
#
ret = self.dbsFile.listFiles(input_body=data)
except cjson.DecodeError as De:
dbsExceptionHandler('dbsException-invalid-input2', "Invalid input", self.logger.exception, str(De))
except dbsException as de:
dbsExceptionHandler(de.eCode, de.message, self.logger.exception, de.serverError)
except HTTPError as he:
raise he
except Exception as ex:
sError = "DBSReaderModel/listFileArray. %s \n Exception trace: \n %s" \
% (ex, traceback.format_exc())
dbsExceptionHandler('dbsException-server-error', ex.message, self.logger.exception, sError)
for item in ret:
yield item | 0.010044 |
def launch(self, args=None):
"""
This method triggers the parsing of arguments.
"""
self.options = self.parse_args(args)
if self.options.saveinputmeta:
# save original input options
self.save_input_meta()
if self.options.inputmeta:
# read new options from JSON file
self.options = self.get_options_from_file(self.options.inputmeta)
self.run(self.options)
# if required save meta data for the output after running the plugin app
if self.options.saveoutputmeta:
self.save_output_meta() | 0.004878 |
def run_simulation(
t, y0=None, volume=1.0, model=None, solver='ode',
is_netfree=False, species_list=None, without_reset=False,
return_type='matplotlib', opt_args=(), opt_kwargs=None,
structures=None, observers=(), progressbar=0, rndseed=None,
factory=None, ## deprecated
**kwargs):
"""Run a simulation with the given model and plot the result on IPython
notebook with matplotlib.
Parameters
----------
t : array or Real
A sequence of time points for which to solve for 'm'.
y0 : dict
Initial condition.
volume : Real or Real3, optional
A size of the simulation volume.
Keyword 'v' is a shortcut for specifying 'volume'.
model : Model, optional
Keyword 'm' is a shortcut for specifying 'model'.
solver : str, tuple or Factory, optional
Solver type. Choose one from 'ode', 'gillespie', 'spatiocyte', 'meso',
'bd' and 'egfrd'. Default is 'ode'.
When tuple is given, the first value must be str as explained above.
All the rest is used as arguments for the corresponding factory class.
Keyword 's' is a shortcut for specifying 'solver'.
species_list : list of str, optional
A list of names of Species observed. If None, log all.
Default is None.
return_type : str, optional
Choose a type of return value from 'array', 'observer',
'matplotlib', 'nyaplot', 'world', 'dataframe', 'none' or None.
If None or 'none', return and plot nothing. Default is 'matplotlib'.
'dataframe' requires numpy and pandas libraries.
Keyword 'r' is a shortcut for specifying 'return_type'.
opt_args: list, tuple or dict, optional
Arguments for plotting. If return_type suggests no plotting, just ignored.
opt_kwargs: dict, optional
Arguments for plotting. If return_type suggests no plotting or
opt_args is a list or tuple, just ignored.
i.e.) viz.plot_number_observer(obs, *opt_args, **opt_kwargs)
is_netfree: bool, optional
Whether the model is netfree or not. When a model is given as an
argument, just ignored. Default is False.
structures : list or dict, optional
A dictionary which gives pairs of a name and shape of structures.
Not fully supported yet.
observers : Observer or list, optional
A list of extra observer references.
progressbar : float, optional
A timeout for a progress bar in seconds.
When the value is not more than 0, show nothing.
Default is 0.
rndseed : int, optional
A random seed for a simulation.
This argument will be ignored when 'solver' is given NOT as a string.
Returns
-------
value : list, TimingNumberObserver, World or None
Return a value suggested by ``return_type``.
When ``return_type`` is 'array', return a time course data.
When ``return_type`` is 'observer', return an observer.
When ``return_type`` is 'world', return the last state of ``World``.
Return nothing if else.
"""
y0 = y0 or {}
opt_kwargs = opt_kwargs or {}
structures = structures or {}
for key, value in kwargs.items():
if key == 'r':
return_type = value
elif key == 'v':
volume = value
elif key == 's':
solver = value
elif key == 'm':
model = value
else:
raise ValueError(
"An unknown keyword argument was given [{}={}]".format(key, value))
import ecell4_base
if unit.HAS_PINT:
if isinstance(t, unit._Quantity):
if unit.STRICT and not unit.check_dimensionality(t, '[time]'):
raise ValueError("Cannot convert [t] from '{}' ({}) to '[time]'".format(t.dimensionality, t.u))
t = t.to_base_units().magnitude
if isinstance(volume, unit._Quantity):
if unit.STRICT:
if isinstance(volume.magnitude, ecell4_base.core.Real3) and not unit.check_dimensionality(volume, '[length]'):
raise ValueError("Cannot convert [volume] from '{}' ({}) to '[length]'".format(
volume.dimensionality, volume.u))
elif not unit.check_dimensionality(volume, '[volume]'):
raise ValueError("Cannot convert [volume] from '{}' ({}) to '[volume]'".format(
volume.dimensionality, volume.u))
volume = volume.to_base_units().magnitude
if not isinstance(solver, str) and isinstance(solver, collections.Iterable):
solver = [
value.to_base_units().magnitude if isinstance(value, unit._Quantity) else value
for value in solver]
if factory is not None:
# f = factory #XXX: will be deprecated in the future. just use solver
raise ValueError(
"Argument 'factory' is no longer available. Use 'solver' instead.")
elif isinstance(solver, str):
f = get_factory(solver)
elif isinstance(solver, collections.Iterable):
f = get_factory(*solver)
else:
f = solver
if rndseed is not None:
f = f.rng(ecell4_base.core.GSLRandomNumberGenerator(rndseed))
if model is None:
model = get_model(is_netfree, without_reset)
w = f.world(volume)
edge_lengths = w.edge_lengths()
if unit.HAS_PINT:
y0 = y0.copy()
for key, value in y0.items():
if isinstance(value, unit._Quantity):
if not unit.STRICT:
y0[key] = value.to_base_units().magnitude
elif unit.check_dimensionality(value, '[substance]'):
y0[key] = value.to_base_units().magnitude
elif unit.check_dimensionality(value, '[concentration]'):
volume = w.volume() if not isinstance(w, ecell4_base.spatiocyte.SpatiocyteWorld) else w.actual_volume()
y0[key] = value.to_base_units().magnitude * volume
else:
raise ValueError(
"Cannot convert a quantity for [{}] from '{}' ({}) to '[substance]'".format(
key, value.dimensionality, value.u))
if not isinstance(w, ecell4_base.ode.ODEWorld):
w.bind_to(model)
for (name, shape) in (structures.items() if isinstance(structures, dict) else structures):
if isinstance(shape, str):
w.add_structure(ecell4_base.core.Species(name), get_shape(shape))
elif isinstance(shape, collections.Iterable):
w.add_structure(ecell4_base.core.Species(name), get_shape(*shape))
else:
w.add_structure(ecell4_base.core.Species(name), shape)
if isinstance(w, ecell4_base.ode.ODEWorld):
# w.bind_to(model) # stop binding for ode
for serial, n in y0.items():
w.set_value(ecell4_base.core.Species(serial), n)
else:
# w.bind_to(model)
for serial, n in y0.items():
w.add_molecules(ecell4_base.core.Species(serial), n)
if not isinstance(t, collections.Iterable):
t = [float(t) * i / 100 for i in range(101)]
if species_list is not None:
obs = ecell4_base.core.TimingNumberObserver(t, species_list)
else:
obs = ecell4_base.core.TimingNumberObserver(t)
sim = f.simulator(w, model)
# sim = f.simulator(w)
if not isinstance(observers, collections.Iterable):
observers = (observers, )
if return_type not in ('world', 'none', None):
observers = (obs, ) + tuple(observers)
if progressbar > 0:
from .progressbar import progressbar as pb
pb(sim, timeout=progressbar, flush=True).run(t[-1], observers)
else:
sim.run(t[-1], observers)
if return_type in ('matplotlib', 'm'):
if isinstance(opt_args, (list, tuple)):
viz.plot_number_observer(obs, *opt_args, **opt_kwargs)
elif isinstance(opt_args, dict):
# opt_kwargs is ignored
viz.plot_number_observer(obs, **opt_args)
else:
raise ValueError('opt_args [{}] must be list or dict.'.format(
repr(opt_args)))
elif return_type in ('nyaplot', 'n'):
if isinstance(opt_args, (list, tuple)):
viz.plot_number_observer_with_nya(obs, *opt_args, **opt_kwargs)
elif isinstance(opt_args, dict):
# opt_kwargs is ignored
viz.plot_number_observer_with_nya(obs, **opt_args)
else:
raise ValueError('opt_args [{}] must be list or dict.'.format(
repr(opt_args)))
elif return_type in ('observer', 'o'):
return obs
elif return_type in ('array', 'a'):
return obs.data()
elif return_type in ('dataframe', 'd'):
import pandas
import numpy
data = numpy.array(obs.data()).T
return pandas.concat([
pandas.DataFrame(dict(Time=data[0], Value=data[i + 1],
Species=sp.serial(), **opt_kwargs))
for i, sp in enumerate(obs.targets())])
elif return_type in ('world', 'w'):
return sim.world()
elif return_type is None or return_type in ('none', ):
return
else:
raise ValueError(
'An invald value for "return_type" was given [{}].'.format(str(return_type))
+ 'Use "none" if you need nothing to be returned.') | 0.001584 |
def shrink_wrap(self):
"""Tightly bound the current text respecting current padding."""
self.frame.size = (self.text_size[0] + self.padding[0] * 2,
self.text_size[1] + self.padding[1] * 2) | 0.008621 |
def getKwargs(self, args, values={}, get=Get()):
"""Gets necessary data from user input.
:args: Dictionary of arguments supplied in command line.
:values: Default values dictionary, supplied for editing.
:get: Object used to get values from user input.
:returns: A dictionary containing data gathered from user input.
"""
kwargs = dict()
for field in ['name', 'priority', 'comment', 'parent']:
fvalue = args.get(field) or get.get(field, values.get(field))
if fvalue is not None:
kwargs[field] = fvalue
return kwargs | 0.00318 |
def learnObject(self,
objectDescription,
randomLocation=False,
useNoise=False,
noisyTrainingTime=1):
"""
Train the network to recognize the specified object. Move the sensor to one of
its features and activate a random location representation in the location
layer. Move the sensor over the object, updating the location representation
through path integration. At each point on the object, form reciprocal
connections between the represention of the location and the representation
of the sensory input.
@param objectDescription (dict)
For example:
{"name": "Object 1",
"features": [{"top": 0, "left": 0, "width": 10, "height": 10, "name": "A"},
{"top": 0, "left": 10, "width": 10, "height": 10, "name": "B"}]}
@return locationsAreUnique (bool)
True if this object was assigned a unique set of locations. False if a
location on this object has the same location representation as another
location somewhere else.
"""
self.reset()
self.column.activateRandomLocation()
locationsAreUnique = True
if randomLocation or useNoise:
numIters = noisyTrainingTime
else:
numIters = 1
for i in xrange(numIters):
for iFeature, feature in enumerate(objectDescription["features"]):
self._move(feature, randomLocation=randomLocation, useNoise=useNoise)
featureSDR = self.features[feature["name"]]
self._sense(featureSDR, learn=True, waitForSettle=False)
locationRepresentation = self.column.getSensoryAssociatedLocationRepresentation()
self.locationRepresentations[(objectDescription["name"],
iFeature)].append(locationRepresentation)
self.inputRepresentations[(objectDescription["name"],
iFeature, feature["name"])] = (
self.column.L4.getWinnerCells())
locationTuple = tuple(locationRepresentation)
locationsAreUnique = (locationsAreUnique and
locationTuple not in self.representationSet)
self.representationSet.add(tuple(locationRepresentation))
self.learnedObjects.append(objectDescription)
return locationsAreUnique | 0.005582 |
def get_datasets(dataset_ids,**kwargs):
"""
Get a single dataset, by ID
"""
user_id = int(kwargs.get('user_id'))
datasets = []
if len(dataset_ids) == 0:
return []
try:
dataset_rs = db.DBSession.query(Dataset.id,
Dataset.type,
Dataset.unit_id,
Dataset.name,
Dataset.hidden,
Dataset.cr_date,
Dataset.created_by,
DatasetOwner.user_id,
null().label('metadata'),
case([(and_(Dataset.hidden=='Y', DatasetOwner.user_id is not None), None)],
else_=Dataset.value).label('value')).filter(
Dataset.id.in_(dataset_ids)).outerjoin(DatasetOwner,
and_(DatasetOwner.dataset_id==Dataset.id,
DatasetOwner.user_id==user_id)).all()
#convert the value row into a string as it is returned as a binary
for dataset_row in dataset_rs:
dataset_dict = dataset_row._asdict()
if dataset_row.value is not None:
dataset_dict['value'] = str(dataset_row.value)
if dataset_row.hidden == 'N' or (dataset_row.hidden == 'Y' and dataset_row.user_id is not None):
metadata = db.DBSession.query(Metadata).filter(Metadata.dataset_id == dataset_row.id).all()
dataset_dict['metadata'] = metadata
else:
dataset_dict['metadata'] = []
datasets.append(namedtuple('Dataset', dataset_dict.keys())(**dataset_dict))
except NoResultFound:
raise ResourceNotFoundError("Datasets not found.")
return datasets | 0.012783 |
def _twosComplement(x, bits=16):
"""Calculate the two's complement of an integer.
Then also negative values can be represented by an upper range of positive values.
See https://en.wikipedia.org/wiki/Two%27s_complement
Args:
* x (int): input integer.
* bits (int): number of bits, must be > 0.
Returns:
An int, that represents the two's complement of the input.
Example for bits=8:
==== =======
x returns
==== =======
0 0
1 1
127 127
-128 128
-127 129
-1 255
==== =======
"""
_checkInt(bits, minvalue=0, description='number of bits')
_checkInt(x, description='input')
upperlimit = 2 ** (bits - 1) - 1
lowerlimit = -2 ** (bits - 1)
if x > upperlimit or x < lowerlimit:
raise ValueError('The input value is out of range. Given value is {0}, but allowed range is {1} to {2} when using {3} bits.' \
.format(x, lowerlimit, upperlimit, bits))
# Calculate two'2 complement
if x >= 0:
return x
return x + 2 ** bits | 0.004647 |
def get_default_config(self):
"""
Returns default collector settings.
"""
config = super(FilesCollector, self).get_default_config()
config.update({
'path': '.',
'dir': '/tmp/diamond',
'delete': False,
})
return config | 0.006472 |
def cancelar_ultima_venda(self, chave_cfe, dados_cancelamento):
"""Sobrepõe :meth:`~satcfe.base.FuncoesSAT.cancelar_ultima_venda`.
:return: Uma resposta SAT especializada em ``CancelarUltimaVenda``.
:rtype: satcfe.resposta.cancelarultimavenda.RespostaCancelarUltimaVenda
"""
retorno = super(ClienteSATLocal, self).\
cancelar_ultima_venda(chave_cfe, dados_cancelamento)
return RespostaCancelarUltimaVenda.analisar(retorno) | 0.006173 |
def read_thrift(file_obj, ttype):
"""Read a thrift structure from the given fo."""
from thrift.transport.TTransport import TFileObjectTransport, TBufferedTransport
starting_pos = file_obj.tell()
# set up the protocol chain
ft = TFileObjectTransport(file_obj)
bufsize = 2 ** 16
# for accelerated reading ensure that we wrap this so that the CReadable transport can be used.
bt = TBufferedTransport(ft, bufsize)
pin = TCompactProtocol(bt)
# read out type
obj = ttype()
obj.read(pin)
# The read will actually overshoot due to the buffering that thrift does. Seek backwards to the correct spot,.
buffer_pos = bt.cstringio_buf.tell()
ending_pos = file_obj.tell()
blocks = ((ending_pos - starting_pos) // bufsize) - 1
if blocks < 0:
blocks = 0
file_obj.seek(starting_pos + blocks * bufsize + buffer_pos)
return obj | 0.004469 |
def cache(handle=lambda *args, **kwargs: None, args=UNDEFINED, kwargs=UNDEFINED, ignore=UNDEFINED, call_stack=UNDEFINED, callback=UNDEFINED, subsequent_rvalue=UNDEFINED):
"""
Store a call descriptor
:param lambda handle: Any callable will work here. The method to cache.
:param tuple args: The arguments to the method.
:param dict kwargs: The keyword arguments to the method.
:param tuple(list(int), list(str)) ignore: A tuple of arguments to ignore. The first element should be a list of positional arguments. The second should be a list of keys for keyword arguments.
:param caliendo.hooks.CallStack call_stack: The stack of calls thus far for this patch.
:param function callback: The callback function to execute each time there is a cache hit for 'handle' (actually mechanism is more complicated, but this is what it boils down to)
:param mixed subsequent_rvalue: If passed; this will be the return value each time this method is run regardless of what is returned when it is initially cached. Caching for this method will be skipped. This is useful when the method returns something unpickleable but we still need to stub it out.
:returns: The value of handle(*args, **kwargs)
"""
if args == UNDEFINED:
args = tuple()
if kwargs == UNDEFINED:
kwargs = {}
if not USE_CALIENDO:
return handle(*args, **kwargs)
filtered_args = ignore.filter_args(args) if ignore is not UNDEFINED else args
filtered_kwargs = ignore.filter_kwargs(kwargs) if ignore is not UNDEFINED else args
trace_string = util.get_stack(handle.__name__)
call_hash = get_hash(filtered_args, trace_string, filtered_kwargs, ignore)
cd = call_descriptor.fetch(call_hash)
modify_or_replace = 'no'
util.set_current_hash(call_hash)
if config.CALIENDO_PROMPT:
display_name = ("(test %s): " % caliendo.util.current_test) if caliendo.util.current_test else ''
if hasattr(handle, '__module__') and hasattr(handle, '__name__'):
display_name += "%s.%s" % (handle.__module__, handle.__name__)
else:
display_name += handle
if cd:
modify_or_replace = prompt.should_modify_or_replace_cached(display_name)
if not cd or modify_or_replace == 'replace':
returnval = handle(*args, **kwargs)
elif cd and modify_or_replace == 'modify':
returnval = prompt.modify_cached_value(cd.returnval,
calling_method=display_name,
calling_test='')
if cd and subsequent_rvalue != UNDEFINED:
return subsequent_rvalue
elif subsequent_rvalue != UNDEFINED:
original_rvalue = returnval
returnval = subsequent_rvalue
if not cd or modify_or_replace != 'no':
if isinstance(handle, types.MethodType):
filtered_args = list(filtered_args)
filtered_args[0] = util.serialize_item(filtered_args[0])
filtered_args = tuple(filtered_args)
cd = call_descriptor.CallDescriptor( hash = call_hash,
stack = trace_string,
method = handle.__name__,
returnval = returnval,
args = filtered_args,
kwargs = filtered_kwargs )
cd.save()
util.set_last_hash(cd.hash)
if call_stack != UNDEFINED:
call_stack.add(cd)
if callback != UNDEFINED:
call_stack.add_hook(Hook(call_descriptor_hash=cd.hash,
callback=callback))
if subsequent_rvalue == UNDEFINED:
return cd.returnval
else:
return original_rvalue | 0.009021 |
def p_scalar_group(self, p):
"""
scalar_group : SCALAR
| scalar_group SCALAR
"""
if len(p) == 2:
p[0] = (str(p[1]),)
if len(p) == 3:
p[0] = p[1] + (str(p[2]),)
if len(p) == 4:
p[0] = p[1] + (str(p[3]),) | 0.006349 |
def init(driverName=None, debug=False):
'''
Constructs a new TTS engine instance or reuses the existing instance for
the driver name.
@param driverName: Name of the platform specific driver to use. If
None, selects the default driver for the operating system.
@type: str
@param debug: Debugging output enabled or not
@type debug: bool
@return: Engine instance
@rtype: L{engine.Engine}
'''
try:
eng = _activeEngines[driverName]
except KeyError:
eng = Engine(driverName, debug)
_activeEngines[driverName] = eng
return eng | 0.001656 |
def reset_dirty_flags(self):
"""Set all marked_dirty flags of the state machine to false."""
for sm_id, sm in self.state_machines.items():
sm.marked_dirty = False | 0.010526 |
def get_box(self, box_key = None, sort_by = None):
'''Gets a list of one/all box objects. Performs a single GET.
To go deeper individual boxes need to be polled for their contents.
This is a directory for what we could ask for.
Args:
box_key key for the target box (default: None i.e. ALL)
sort_by in desc order by 'creationTimestamp' or 'lastUpdatedTimestamp'
returns (status code for the GET request, dict of box or a list thereof)
'''
uri = '/'.join([
self.api_uri,
self.boxes_suffix
])
if box_key:
uri = '/'.join([
uri,
box_key
])
if sort_by:
if sort_by in ['creationTimestamp', 'lastUpdatedTimestamp']:
uri += self.sort_by_postfix + sort_by
else:
return requests.codes.bad_request, {'success' : 'False',
'error': 'sortBy needs to be \'creationTimestamp\', or \'lastUpdatedTimestamp\''}
return self._req('get', uri) | 0.04 |
def infer_call_result(self, caller, context):
"""
The boundnode of the regular context with a function called
on ``object.__new__`` will be of type ``object``,
which is incorrect for the argument in general.
If no context is given the ``object.__new__`` call argument will
correctly inferred except when inside a call that requires
the additional context (such as a classmethod) of the boundnode
to determine which class the method was called from
"""
# If we're unbound method __new__ of builtin object, the result is an
# instance of the class given as first argument.
if (
self._proxied.name == "__new__"
and self._proxied.parent.frame().qname() == "%s.object" % BUILTINS
):
if caller.args:
node_context = context.extra_context.get(caller.args[0])
infer = caller.args[0].infer(context=node_context)
else:
infer = []
return (Instance(x) if x is not util.Uninferable else x for x in infer)
return self._proxied.infer_call_result(caller, context) | 0.002571 |
def make_grid(rect, cells={}, num_rows=0, num_cols=0, padding=None,
inner_padding=None, outer_padding=None, row_heights={}, col_widths={},
default_row_height='expand', default_col_width='expand'):
"""
Return rectangles for each cell in the specified grid. The rectangles are
returned in a dictionary where the keys are (row, col) tuples.
"""
grid = Grid(
bounding_rect=rect,
min_cell_rects=cells,
num_rows=num_rows,
num_cols=num_cols,
padding=padding,
inner_padding=inner_padding,
outer_padding=outer_padding,
row_heights=row_heights,
col_widths=col_widths,
default_row_height=default_row_height,
default_col_width=default_col_width,
)
return grid.make_cells() | 0.005959 |
def ltcube(self, **kwargs):
""" return the name of a livetime cube file
"""
kwargs_copy = self.base_dict.copy()
kwargs_copy.update(**kwargs)
kwargs_copy['dataset'] = kwargs.get('dataset', self.dataset(**kwargs))
localpath = NameFactory.ltcube_format.format(**kwargs_copy)
if kwargs.get('fullpath', False):
return self.fullpath(localpath=localpath)
return localpath | 0.004545 |
def main_browse():
"""Entry point for command line use for browsing a JobArchive """
parser = argparse.ArgumentParser(usage="job_archive.py [options]",
description="Browse a job archive")
parser.add_argument('--jobs', action='store', dest='job_archive_table',
type=str, default='job_archive_temp2.fits', help="Job archive file")
parser.add_argument('--files', action='store', dest='file_archive_table',
type=str, default='file_archive_temp2.fits', help="File archive file")
parser.add_argument('--base', action='store', dest='base_path',
type=str, default=os.path.abspath('.'), help="File archive base path")
args = parser.parse_args(sys.argv[1:])
job_ar = JobArchive.build_archive(**args.__dict__)
job_ar.table.pprint() | 0.00463 |
def _cast_to_type(self, value):
""" Raise error if the value is not a dict """
if not isinstance(value, dict):
self.fail('invalid', value=value)
return value | 0.010363 |
def pagerank(graph, damping_factor=0.85, max_iterations=100, min_delta=0.00001):
"""
Compute and return the PageRank in an directed graph.
@type graph: digraph
@param graph: Digraph.
@type damping_factor: number
@param damping_factor: PageRank dumping factor.
@type max_iterations: number
@param max_iterations: Maximum number of iterations.
@type min_delta: number
@param min_delta: Smallest variation required to have a new iteration.
@rtype: Dict
@return: Dict containing all the nodes PageRank.
"""
nodes = graph.nodes()
graph_size = len(nodes)
if graph_size == 0:
return {}
min_value = (1.0-damping_factor)/graph_size #value for nodes without inbound links
# itialize the page rank dict with 1/N for all nodes
pagerank = dict.fromkeys(nodes, 1.0/graph_size)
for i in range(max_iterations):
diff = 0 #total difference compared to last iteraction
# computes each node PageRank based on inbound links
for node in nodes:
rank = min_value
for referring_page in graph.incidents(node):
rank += damping_factor * pagerank[referring_page] / len(graph.neighbors(referring_page))
diff += abs(pagerank[node] - rank)
pagerank[node] = rank
#stop if PageRank has converged
if diff < min_delta:
break
return pagerank | 0.014696 |
def process_paper(model_name, pmid):
"""Process a paper with the given pubmed identifier
Parameters
----------
model_name : str
The directory for the INDRA machine
pmid : str
The PMID to process.
Returns
-------
rp : ReachProcessor
A ReachProcessor containing the extracted INDRA Statements
in rp.statements.
txt_format : str
A string representing the format of the text
"""
json_directory = os.path.join(model_name, 'jsons')
json_path = os.path.join(json_directory, 'PMID%s.json' % pmid)
if pmid.startswith('api') or pmid.startswith('PMID'):
logger.warning('Invalid PMID: %s' % pmid)
# If the paper has been read, use the json output file
if os.path.exists(json_path):
rp = reach.process_json_file(json_path, citation=pmid)
txt_format = 'existing_json'
# If the paper has not been read, download the text and read
else:
try:
txt, txt_format = get_full_text(pmid, 'pmid')
except Exception:
return None, None
if txt_format == 'pmc_oa_xml':
rp = reach.process_nxml_str(txt, citation=pmid, offline=True,
output_fname=json_path)
elif txt_format == 'elsevier_xml':
# Extract the raw text from the Elsevier XML
txt = elsevier_client.extract_text(txt)
rp = reach.process_text(txt, citation=pmid, offline=True,
output_fname=json_path)
elif txt_format == 'abstract':
rp = reach.process_text(txt, citation=pmid, offline=True,
output_fname=json_path)
else:
rp = None
if rp is not None:
check_pmids(rp.statements)
return rp, txt_format | 0.000546 |
def _end_of_century(self):
"""
Reset the date to the last day of the century
and the time to 23:59:59.999999.
:rtype: DateTime
"""
year = self.year - 1 - (self.year - 1) % YEARS_PER_CENTURY + YEARS_PER_CENTURY
return self.set(year, 12, 31, 23, 59, 59, 999999) | 0.009464 |
def is_stable(self,species):
'''
This routine accepts input formatted like 'He-3' and checks with
stable_el list if occurs in there. If it does, the routine
returns True, otherwise False.
Notes
-----
this method is designed to work with an se instance from
nugridse.py. In order to make it work with ppn.py some
additional work is required.
FH, April 20, 2013.
'''
element_name_of_iso = species.split('-')[0]
try:
a_of_iso = int(species.split('-')[1])
except ValueError: # if the species name contains in addition to the
# mass number some letters, e.g. for isomere, then
# we assume it is unstable. This is not correct but
# related to the fact that in nugridse.py we do not
# identify species properly by the three numbers A, Z
# and isomeric_state. We should do that!!!!!!
a_of_iso = 999
idp_of_element_in_stable_names = self.stable_names.index(element_name_of_iso)
if a_of_iso in self.stable_el[idp_of_element_in_stable_names][1:]:
return True
else:
return False | 0.014307 |
def _set_dscp_ttl_mode(self, v, load=False):
"""
Setter method for dscp_ttl_mode, mapped from YANG variable /interface/tunnel/dscp_ttl_mode (enumeration)
If this variable is read-only (config: false) in the
source YANG file, then _set_dscp_ttl_mode is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_dscp_ttl_mode() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'pipe': {'value': 2}, u'uniform': {'value': 1}},), is_leaf=True, yang_name="dscp-ttl-mode", rest_name="dscp-ttl-mode", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Tunnel dscp ttl mode', u'cli-full-no': None}}, namespace='urn:brocade.com:mgmt:brocade-gre-vxlan', defining_module='brocade-gre-vxlan', yang_type='enumeration', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """dscp_ttl_mode must be of a type compatible with enumeration""",
'defined-type': "brocade-gre-vxlan:enumeration",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'pipe': {'value': 2}, u'uniform': {'value': 1}},), is_leaf=True, yang_name="dscp-ttl-mode", rest_name="dscp-ttl-mode", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Tunnel dscp ttl mode', u'cli-full-no': None}}, namespace='urn:brocade.com:mgmt:brocade-gre-vxlan', defining_module='brocade-gre-vxlan', yang_type='enumeration', is_config=True)""",
})
self.__dscp_ttl_mode = t
if hasattr(self, '_set'):
self._set() | 0.004764 |
def get_go2sectiontxt(self):
"""Return a dict with actual header and user GO IDs as keys and their sections as values."""
go2txt = {}
_get_secs = self.hdrobj.get_sections
hdrgo2sectxt = {h:" ".join(_get_secs(h)) for h in self.get_hdrgos()}
usrgo2hdrgo = self.get_usrgo2hdrgo()
for goid, ntgo in self.go2nt.items():
hdrgo = ntgo.GO if ntgo.is_hdrgo else usrgo2hdrgo[ntgo.GO]
go2txt[goid] = hdrgo2sectxt[hdrgo]
return go2txt | 0.007968 |
def addToTimeInv(self,*params):
'''
Adds any number of parameters to time_inv for this instance.
Parameters
----------
params : string
Any number of strings naming attributes to be added to time_inv
Returns
-------
None
'''
for param in params:
if param not in self.time_inv:
self.time_inv.append(param) | 0.007059 |
def clinvar_submission_lines(submission_objs, submission_header):
"""Create the lines to include in a Clinvar submission csv file from a list of submission objects and a custom document header
Args:
submission_objs(list): a list of objects (variants or casedata) to include in a csv file
submission_header(dict) : as in constants CLINVAR_HEADER and CASEDATA_HEADER, but with required fields only
Returns:
submission_lines(list) a list of strings, each string represents a line of the clinvar csv file to be doenloaded
"""
submission_lines = []
for submission_obj in submission_objs: # Loop over the submission objects. Each of these is a line
csv_line = []
for header_key, header_value in submission_header.items(): # header_keys are the same keys as in submission_objs
if header_key in submission_obj: # The field is filled in for this variant/casedata object
csv_line.append('"'+submission_obj.get(header_key)+'"')
else: # Empty field for this this variant/casedata object
csv_line.append('""')
submission_lines.append(','.join(csv_line))
return submission_lines | 0.009836 |
def get_feedback_from_submission(self, submission, only_feedback=False, show_everything=False, translation=gettext.NullTranslations()):
"""
Get the input of a submission. If only_input is False, returns the full submissions with a dictionnary object at the key "input".
Else, returns only the dictionnary.
If show_everything is True, feedback normally hidden is shown.
"""
if only_feedback:
submission = {"text": submission.get("text", None), "problems": dict(submission.get("problems", {}))}
if "text" in submission:
submission["text"] = ParsableText(submission["text"], submission["response_type"], show_everything, translation).parse()
if "problems" in submission:
for problem in submission["problems"]:
if isinstance(submission["problems"][problem], str): # fallback for old-style submissions
submission["problems"][problem] = (submission.get('result', 'crash'), ParsableText(submission["problems"][problem],
submission["response_type"],
show_everything, translation).parse())
else: # new-style submission
submission["problems"][problem] = (submission["problems"][problem][0], ParsableText(submission["problems"][problem][1],
submission["response_type"],
show_everything, translation).parse())
return submission | 0.007127 |
def extend(self, collection):
""" L.extend(iterable) -- extend list by appending elements from the iterable """
if type(collection) is list:
if self._col_dict is None and self._col_list is None:
self._col_list = collection
else:
self._col_list += collection
self._sync_list_to_dict(collection)
elif type(collection) is dict:
if self._col_dict is None and self._col_list is None:
self._col_dict = collection
self._col_list = collection.values()
else:
for key, value in collection.items():
self._set_key(key, value)
else:
raise NotImplementedError() | 0.003995 |
def retention_policy_add(database,
name,
duration,
replication,
default=False,
user=None,
password=None,
host=None,
port=None):
'''
Add a retention policy.
database
The database to operate on.
name
Name of the policy to modify.
duration
How long InfluxDB keeps the data.
replication
How many copies of the data are stored in the cluster.
default
Whether this policy should be the default or not. Default is False.
CLI Example:
.. code-block:: bash
salt '*' influxdb.retention_policy_add metrics default 1d 1
'''
client = _client(user=user, password=password, host=host, port=port)
client.create_retention_policy(name, duration, replication, database, default)
return True | 0.002043 |
def _commonWordStart(self, words):
"""Get common start of all words.
i.e. for ['blablaxxx', 'blablayyy', 'blazzz'] common start is 'bla'
"""
if not words:
return ''
length = 0
firstWord = words[0]
otherWords = words[1:]
for index, char in enumerate(firstWord):
if not all([word[index] == char for word in otherWords]):
break
length = index + 1
return firstWord[:length] | 0.00404 |
def exists(device=''):
'''
Check to see if the partition exists
CLI Example:
.. code-block:: bash
salt '*' partition.exists /dev/sdb1
'''
if os.path.exists(device):
dev = os.stat(device).st_mode
if stat.S_ISBLK(dev):
return True
return False | 0.003226 |
def parallel(func, arr:Collection, max_workers:int=None):
"Call `func` on every element of `arr` in parallel using `max_workers`."
max_workers = ifnone(max_workers, defaults.cpus)
if max_workers<2: results = [func(o,i) for i,o in progress_bar(enumerate(arr), total=len(arr))]
else:
with ProcessPoolExecutor(max_workers=max_workers) as ex:
futures = [ex.submit(func,o,i) for i,o in enumerate(arr)]
results = []
for f in progress_bar(concurrent.futures.as_completed(futures), total=len(arr)): results.append(f.result())
if any([o is not None for o in results]): return results | 0.025078 |
def p_identlist(self, t):
'''identlist : IDENT
| NOT IDENT
| IDENT AND identlist
| NOT IDENT AND identlist
'''
if len(t)==5 :
#print(t[1],t[2],t[3],t[4])
t[0] = t[1]+t[2]+t[3]+t[4]
elif len(t)==4 :
#print(t[1],t[2],t[3])
t[0] = t[1]+t[2]+t[3]
elif len(t)==3 :
#print(t[1],t[2])
t[0] = t[1]+t[2]
elif len(t)==2 :
#print(t[0],t[1])
t[0]=t[1]
else:
print("Syntax error at '",str(t),"'") | 0.052727 |
def _openssl_key_iv(passphrase, salt):
"""
Returns a (key, iv) tuple that can be used in AES symmetric encryption
from a *passphrase* (a byte or unicode string) and *salt* (a byte array).
"""
def _openssl_kdf(req):
if hasattr(passphrase, 'encode'):
passwd = passphrase.encode('ascii', 'ignore')
else:
passwd = passphrase
prev = b''
while req > 0:
digest = hashes.Hash(hashes.MD5(), backend=default_backend())
digest.update(prev + passwd + salt)
prev = digest.finalize()
req -= IV_BLOCK_SIZE
yield prev
assert passphrase is not None
assert salt is not None
# AES key: 32 bytes, IV: 16 bytes
mat = b''.join([x for x in _openssl_kdf(32 + IV_BLOCK_SIZE)])
return mat[0:32], mat[32:32 + IV_BLOCK_SIZE] | 0.001174 |
def _update_structure_lines(self):
'''ATOM and HETATM lines may be altered by function calls. When this happens, this function should be called to keep self.structure_lines up to date.'''
structure_lines = []
atom_chain_order = []
chain_atoms = {}
for line in self.lines:
linetype = line[0:6]
if linetype == 'ATOM ' or linetype == 'HETATM' or linetype == 'TER ':
chain_id = line[21]
self.residue_types.add(line[17:20].strip())
if missing_chain_ids.get(self.pdb_id):
chain_id = missing_chain_ids[self.pdb_id]
structure_lines.append(line)
if (chain_id not in atom_chain_order) and (chain_id != ' '):
atom_chain_order.append(chain_id)
if linetype == 'ATOM ':
atom_type = line[12:16].strip()
if atom_type:
chain_atoms[chain_id] = chain_atoms.get(chain_id, set())
chain_atoms[chain_id].add(atom_type)
if linetype == 'ENDMDL':
colortext.warning("ENDMDL detected: Breaking out early. We do not currently handle NMR structures properly.")
break
self.structure_lines = structure_lines
self.atom_chain_order = atom_chain_order
self.chain_atoms = chain_atoms | 0.004258 |