text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def _results(self, product, path, cls=Results, **kwargs):
"""Returns _results for the specified API path with the specified **kwargs parameters"""
if product != 'account-information' and self.rate_limit and not self.limits_set and not self.limits:
self._rate_limit()
uri = '/'.join(('{0}://api.domaintools.com'.format('https' if self.https else 'http'), path.lstrip('/')))
parameters = self.default_parameters.copy()
parameters['api_username'] = self.username
if self.https:
parameters['api_key'] = self.key
else:
parameters['timestamp'] = datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ')
parameters['signature'] = hmac(self.key.encode('utf8'), ''.join([self.username, parameters['timestamp'],
path]).encode('utf8'),
digestmod=sha1).hexdigest()
parameters.update(dict((key, str(value).lower() if value in (True, False) else value) for
key, value in kwargs.items() if value is not None))
return cls(self, product, uri, **parameters) | 0.008313 |
def get_file_properties(
client, fileshare, prefix, timeout=None, snapshot=None):
# type: (azure.storage.file.FileService, str, str, int, str) ->
# azure.storage.file.models.File
"""Get file properties
:param FileService client: blob client
:param str fileshare: file share name
:param str prefix: path prefix
:param int timeout: timeout
:param str snapshot: snapshot
:rtype: azure.storage.file.models.File
:return: file properties
"""
dirname, fname, ss = parse_file_path(prefix)
if ss is not None:
if snapshot is not None:
raise RuntimeError(
'snapshot specified as {} but parsed {} from prefix {}'.format(
snapshot, ss, prefix))
else:
snapshot = ss
try:
return client.get_file_properties(
share_name=fileshare,
directory_name=dirname,
file_name=fname,
timeout=timeout,
snapshot=snapshot,
)
except azure.common.AzureMissingResourceHttpError:
return None | 0.000917 |
def _get_ann_labels_data(self, order_ann, bins_ann):
"""Generate ColumnDataSource dictionary for annular labels.
"""
if self.yticks is None:
return dict(x=[], y=[], text=[], angle=[])
mapping = self._compute_tick_mapping("radius", order_ann, bins_ann)
values = [(label, radius[0]) for label, radius in mapping.items()]
labels, radius = zip(*values)
radius = np.array(radius)
y_coord = np.sin(np.deg2rad(self.yrotation)) * radius + self.max_radius
x_coord = np.cos(np.deg2rad(self.yrotation)) * radius + self.max_radius
return dict(x=x_coord,
y=y_coord,
text=labels,
angle=[0]*len(labels)) | 0.002681 |
def is_valid_file(parser,arg):
"""verify the validity of the given file. Never trust the End-User"""
if not os.path.exists(arg):
parser.error("File %s not found"%arg)
else:
return arg | 0.058824 |
def _alpha_eff(self, r_eff, n_sersic, k_eff):
"""
deflection angle at r_eff
:param r_eff:
:param n_sersic:
:param k_eff:
:return:
"""
b = self.b_n(n_sersic)
alpha_eff = n_sersic * r_eff * k_eff * b**(-2*n_sersic) * np.exp(b) * special.gamma(2*n_sersic)
return -alpha_eff | 0.008571 |
def split_taf(txt: str) -> [str]: # type: ignore
"""
Splits a TAF report into each distinct time period
"""
lines = []
split = txt.split()
last_index = 0
for i, item in enumerate(split):
if starts_new_line(item) and i != 0 and not split[i - 1].startswith('PROB'):
lines.append(' '.join(split[last_index:i]))
last_index = i
lines.append(' '.join(split[last_index:]))
return lines | 0.004474 |
def intersect(self, other):
"""Constructs an unminimized DFA recognizing
the intersection of the languages of two given DFAs.
Args:
other (DFA): The other DFA that will be used
for the intersect operation
Returns:
Returns:
DFA: The resulting DFA
"""
operation = bool.__and__
self.cross_product(other, operation)
return self | 0.006742 |
def download(self, id, attid): # pylint: disable=invalid-name,redefined-builtin
"""Download a device's attachment.
:param id: Device ID as an int.
:param attid: Attachment ID as an int.
:rtype: tuple `(io.BytesIO, 'filename')`
"""
resp = self.service.get_id(self._base(id), attid, params={'format': 'download'}, stream=True)
b = io.BytesIO()
stream.stream_response_to_file(resp, path=b)
resp.close()
b.seek(0)
return (b, self.service.filename(resp)) | 0.007435 |
def editProtocol(self, clusterProtocolObj):
"""
Updates the Cluster Protocol. This will cause the cluster to be
restarted with updated protocol configuration.
"""
if isinstance(clusterProtocolObj, ClusterProtocol): pass
else:
raise AttributeError("Invalid Input, must be a ClusterProtocal Object")
url = self._url + "/editProtocol"
params = {
"f" : "json",
"tcpClusterPort" : str(clusterProtocolObj.value['tcpClusterPort'])
}
return self._post(url=url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port) | 0.012531 |
def results_class_wise_metrics(self):
"""Class-wise metrics
Returns
-------
dict
results in a dictionary format
"""
results = {}
for scene_id, scene_label in enumerate(self.scene_label_list):
if scene_label not in results:
results[scene_label] = {}
results[scene_label]['count'] = {}
results[scene_label]['count']['Ncorr'] = self.scene_wise[scene_label]['Ncorr']
results[scene_label]['count']['Nref'] = self.scene_wise[scene_label]['Nref']
results[scene_label]['count']['Nsys'] = self.scene_wise[scene_label]['Nsys']
results[scene_label]['accuracy'] = {
'accuracy': metric.accuracy_corr(
Ncorr=self.scene_wise[scene_label]['Ncorr'],
N=self.scene_wise[scene_label]['Nref']
)
}
return results | 0.005302 |
def get_objective_query_session(self, proxy):
"""Gets the ``OsidSession`` associated with the objective query service.
:param proxy: a proxy
:type proxy: ``osid.proxy.Proxy``
:return: an ``ObjectiveQuerySession``
:rtype: ``osid.learning.ObjectiveQuerySession``
:raise: ``NullArgument`` -- ``proxy`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``Unimplemented`` -- ``supports_objective_query()`` is ``false``
*compliance: optional -- This method must be implemented if ``supports_objective_query()`` is ``true``.*
"""
if not self.supports_objective_query():
raise Unimplemented()
try:
from . import sessions
except ImportError:
raise OperationFailed()
proxy = self._convert_proxy(proxy)
try:
session = sessions.ObjectiveQuerySession(proxy=proxy, runtime=self._runtime)
except AttributeError:
raise OperationFailed()
return session | 0.00565 |
def get(self, reg, ty):
"""
Load a value from a machine register into a VEX temporary register.
All values must be loaded out of registers before they can be used with operations, etc
and stored back into them when the instruction is over. See Put().
:param reg: Register number as an integer, or register string name
:param ty: The Type to use.
:return: A VexValue of the gotten value.
"""
offset = self.lookup_register(self.irsb_c.irsb.arch, reg)
if offset == self.irsb_c.irsb.arch.ip_offset:
return self.constant(self.addr, ty)
rdt = self.irsb_c.rdreg(offset, ty)
return VexValue(self.irsb_c, rdt) | 0.004225 |
def random(length: int = 8, chars: str = digits + ascii_lowercase) -> Iterator[str]:
"""
A random string.
Not unique, but has around 1 in a million chance of collision (with the default 8
character length). e.g. 'fubui5e6'
Args:
length: Length of the random string.
chars: The characters to randomly choose from.
"""
while True:
yield "".join([choice(chars) for _ in range(length)]) | 0.006881 |
def aimport_module(self, module_name):
"""Import a module, and mark it reloadable
Returns
-------
top_module : module
The imported module if it is top-level, or the top-level
top_name : module
Name of top_module
"""
self.mark_module_reloadable(module_name)
import_module(module_name)
top_name = module_name.split('.')[0]
top_module = sys.modules[top_name]
return top_module, top_name | 0.004016 |
def create_class(request):
"""Create new class
POST parameters (JSON):
name:
Human readable name of class
code (optional):
unique code of class used for joining to class
"""
if request.method == 'GET':
return render(request, 'classes_create.html', {}, help_text=create_class.__doc__)
if request.method == 'POST':
if not request.user.is_authenticated() or not hasattr(request.user, "userprofile"):
return render_json(request, {
'error': _('User is not logged in.'),
'error_type': 'user_unauthorized'
}, template='classes_create.html', status=401)
data = json_body(request.body.decode("utf-8"))
if 'code' in data and Class.objects.filter(code=data['code']).exists():
return render_json(request, {
'error': _('A class with this code already exists.'),
'error_type': 'class_with_code_exists'
}, template='classes_create.html', status=400)
if 'name' not in data or not data['name']:
return render_json(request, {'error': _('Class name is missing.'), 'error_type': 'missing_class_name'},
template='classes_create.html', status=400)
cls = Class(name=data['name'], owner=request.user.userprofile)
if 'code' in data:
cls.code = data['code']
cls.save()
return render_json(request, cls.to_json(), template='classes_create.html', status=201)
else:
return HttpResponseBadRequest("method %s is not allowed".format(request.method)) | 0.003674 |
def run(self, result):
"""Run tests in suite inside of suite fixtures.
"""
# proxy the result for myself
log.debug("suite %s (%s) run called, tests: %s", id(self), self, self._tests)
#import pdb
#pdb.set_trace()
if self.resultProxy:
result, orig = self.resultProxy(result, self), result
else:
result, orig = result, result
try:
self.setUp()
except KeyboardInterrupt:
raise
except:
self.error_context = 'setup'
result.addError(self, self._exc_info())
return
try:
for test in self._tests:
if result.shouldStop:
log.debug("stopping")
break
# each nose.case.Test will create its own result proxy
# so the cases need the original result, to avoid proxy
# chains
test(orig)
finally:
self.has_run = True
try:
self.tearDown()
except KeyboardInterrupt:
raise
except:
self.error_context = 'teardown'
result.addError(self, self._exc_info()) | 0.005551 |
def update_email_asset(self, asset_id, name, asset_type):
"""
Updates a Email Asset
Args:
name: The name provided to the email asset
asset_type: The type provided to the email asset
asset_id:
Returns:
"""
self.update_asset('EMAIL', asset_id, name, asset_type) | 0.005797 |
def parse_requests_response(response, **kwargs):
"""Build a ContentDisposition from a requests (PyPI) response.
"""
return parse_headers(
response.headers.get('content-disposition'), response.url, **kwargs) | 0.004405 |
def delete_collection_namespaced_daemon_set(self, namespace, **kwargs):
"""
delete collection of DaemonSet
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_collection_namespaced_daemon_set(namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_collection_namespaced_daemon_set_with_http_info(namespace, **kwargs)
else:
(data) = self.delete_collection_namespaced_daemon_set_with_http_info(namespace, **kwargs)
return data | 0.002748 |
def average_true_range_percent(close_data, period):
"""
Average True Range Percent.
Formula:
ATRP = (ATR / CLOSE) * 100
"""
catch_errors.check_for_period_error(close_data, period)
atrp = (atr(close_data, period) / np.array(close_data)) * 100
return atrp | 0.003497 |
def on_capacity(self, connection, command, query_kwargs, response,
capacity):
""" Hook that runs in response to a 'returned capacity' event """
now = time.time()
args = (connection, command, query_kwargs, response, capacity)
# Check total against the total_cap
self._wait(args, now, self.total_cap, self._total_consumed,
capacity.total)
# Increment table consumed capacity & check it
if capacity.tablename in self.table_caps:
table_cap = self.table_caps[capacity.tablename]
else:
table_cap = self.default_cap
consumed_history = self.get_consumed(capacity.tablename)
if capacity.table_capacity is not None:
self._wait(args, now, table_cap, consumed_history,
capacity.table_capacity)
# The local index consumed capacity also counts against the table
if capacity.local_index_capacity is not None:
for consumed in six.itervalues(capacity.local_index_capacity):
self._wait(args, now, table_cap, consumed_history, consumed)
# Increment global indexes
# check global indexes against the table+index cap or default
gic = capacity.global_index_capacity
if gic is not None:
for index_name, consumed in six.iteritems(gic):
full_name = capacity.tablename + ':' + index_name
if index_name in table_cap:
index_cap = table_cap[index_name]
elif full_name in self.table_caps:
index_cap = self.table_caps[full_name]
else:
# If there's no specified capacity for the index,
# use the cap on the table
index_cap = table_cap
consumed_history = self.get_consumed(full_name)
self._wait(args, now, index_cap, consumed_history, consumed) | 0.001519 |
def get_structure_with_only_magnetic_atoms(self, make_primitive=True):
"""
Returns a Structure with only magnetic atoms present.
:return: Structure
"""
sites = [site for site in self.structure if abs(site.properties["magmom"]) > 0]
structure = Structure.from_sites(sites)
if make_primitive:
structure = structure.get_primitive_structure(use_site_props=True)
return structure | 0.006608 |
def run(command, verbose=False):
"""
Run a shell command. Capture the stdout and stderr as a single stream.
Capture the status code.
If verbose=True, then print command and the output to the terminal as it
comes in.
"""
def do_nothing(*args, **kwargs):
return None
v_print = print if verbose else do_nothing
p = subprocess.Popen(
command,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
universal_newlines=True,
)
v_print("run:", command)
def log_and_yield(line):
if six.PY2:
# If not unicode, try to decode it first
if isinstance(line, str):
line = line.decode('utf8', 'replace')
v_print(line)
return line
output = ''.join(map(log_and_yield, p.stdout))
status_code = p.wait()
return CommandResult(command, output, status_code) | 0.001085 |
def main(argv=None):
"""script main.
parses command line options in sys.argv, unless *argv* is given.
"""
if argv is None:
argv = sys.argv
# setup command line parser
parser = U.OptionParser(version="%prog version: $Id$",
usage=usage,
description=globals()["__doc__"])
group = U.OptionGroup(parser, "dedup-specific options")
group.add_option("--output-stats", dest="stats", type="string",
default=False,
help="Specify location to output stats")
parser.add_option_group(group)
# add common options (-h/--help, ...) and parse command line
(options, args) = U.Start(parser, argv=argv)
U.validateSamOptions(options, group=False)
if options.random_seed:
np.random.seed(options.random_seed)
if options.stdin != sys.stdin:
in_name = options.stdin.name
options.stdin.close()
else:
raise ValueError("Input on standard in not currently supported")
if options.stdout != sys.stdout:
if options.no_sort_output:
out_name = options.stdout.name
else:
out_name = U.getTempFilename(dir=options.tmpdir)
sorted_out_name = options.stdout.name
options.stdout.close()
else:
if options.no_sort_output:
out_name = "-"
else:
out_name = U.getTempFilename(dir=options.tmpdir)
sorted_out_name = "-"
if not options.no_sort_output: # need to determine the output format for sort
if options.out_sam:
sort_format = "sam"
else:
sort_format = "bam"
if options.in_sam:
in_mode = "r"
else:
in_mode = "rb"
if options.out_sam:
out_mode = "wh"
else:
out_mode = "wb"
if options.stats and options.ignore_umi:
raise ValueError("'--output-stats' and '--ignore-umi' options"
" cannot be used together")
infile = pysam.Samfile(in_name, in_mode)
outfile = pysam.Samfile(out_name, out_mode, template=infile)
if options.paired:
outfile = sam_methods.TwoPassPairWriter(infile, outfile)
nInput, nOutput, input_reads, output_reads = 0, 0, 0, 0
if options.detection_method:
bam_features = detect_bam_features(infile.filename)
if not bam_features[options.detection_method]:
if sum(bam_features.values()) == 0:
raise ValueError(
"There are no bam tags available to detect multimapping. "
"Do not set --multimapping-detection-method")
else:
raise ValueError(
"The chosen method of detection for multimapping (%s) "
"will not work with this bam. Multimapping can be detected"
" for this bam using any of the following: %s" % (
options.detection_method, ",".join(
[x for x in bam_features if bam_features[x]])))
gene_tag = options.gene_tag
metacontig2contig = None
if options.chrom:
inreads = infile.fetch(reference=options.chrom)
else:
if options.per_contig and options.gene_transcript_map:
metacontig2contig = sam_methods.getMetaContig2contig(
infile, options.gene_transcript_map)
metatag = "MC"
inreads = sam_methods.metafetcher(infile, metacontig2contig, metatag)
gene_tag = metatag
else:
inreads = infile.fetch()
# set up ReadCluster functor with methods specific to
# specified options.method
processor = network.ReadDeduplicator(options.method)
bundle_iterator = sam_methods.get_bundles(
options,
metacontig_contig=metacontig2contig)
if options.stats:
# set up arrays to hold stats data
stats_pre_df_dict = {"UMI": [], "counts": []}
stats_post_df_dict = {"UMI": [], "counts": []}
pre_cluster_stats = []
post_cluster_stats = []
pre_cluster_stats_null = []
post_cluster_stats_null = []
topology_counts = collections.Counter()
node_counts = collections.Counter()
read_gn = umi_methods.random_read_generator(
infile.filename, chrom=options.chrom,
barcode_getter=bundle_iterator.barcode_getter)
for bundle, key, status in bundle_iterator(inreads):
nInput += sum([bundle[umi]["count"] for umi in bundle])
while nOutput >= output_reads + 100000:
output_reads += 100000
U.info("Written out %i reads" % output_reads)
while nInput >= input_reads + 1000000:
input_reads += 1000000
U.info("Parsed %i input reads" % input_reads)
if options.stats:
# generate pre-dudep stats
average_distance = umi_methods.get_average_umi_distance(bundle.keys())
pre_cluster_stats.append(average_distance)
cluster_size = len(bundle)
random_umis = read_gn.getUmis(cluster_size)
average_distance_null = umi_methods.get_average_umi_distance(random_umis)
pre_cluster_stats_null.append(average_distance_null)
if options.ignore_umi:
for umi in bundle:
nOutput += 1
outfile.write(bundle[umi]["read"])
else:
# dedup using umis and write out deduped bam
reads, umis, umi_counts = processor(
bundle=bundle,
threshold=options.threshold)
for read in reads:
outfile.write(read)
nOutput += 1
if options.stats:
# collect pre-dudupe stats
stats_pre_df_dict['UMI'].extend(bundle)
stats_pre_df_dict['counts'].extend(
[bundle[UMI]['count'] for UMI in bundle])
# collect post-dudupe stats
post_cluster_umis = [bundle_iterator.barcode_getter(x)[0] for x in reads]
stats_post_df_dict['UMI'].extend(umis)
stats_post_df_dict['counts'].extend(umi_counts)
average_distance = umi_methods.get_average_umi_distance(post_cluster_umis)
post_cluster_stats.append(average_distance)
cluster_size = len(post_cluster_umis)
random_umis = read_gn.getUmis(cluster_size)
average_distance_null = umi_methods.get_average_umi_distance(random_umis)
post_cluster_stats_null.append(average_distance_null)
outfile.close()
if not options.no_sort_output:
# sort the output
pysam.sort("-o", sorted_out_name, "-O", sort_format, out_name)
os.unlink(out_name) # delete the tempfile
if options.stats:
# generate the stats dataframe
stats_pre_df = pd.DataFrame(stats_pre_df_dict)
stats_post_df = pd.DataFrame(stats_post_df_dict)
# tally the counts per umi per position
pre_counts = collections.Counter(stats_pre_df["counts"])
post_counts = collections.Counter(stats_post_df["counts"])
counts_index = list(set(pre_counts.keys()).union(set(post_counts.keys())))
counts_index.sort()
with U.openFile(options.stats + "_per_umi_per_position.tsv", "w") as outf:
outf.write("counts\tinstances_pre\tinstances_post\n")
for count in counts_index:
values = (count, pre_counts[count], post_counts[count])
outf.write("\t".join(map(str, values)) + "\n")
# aggregate stats pre/post per UMI
agg_pre_df = aggregateStatsDF(stats_pre_df)
agg_post_df = aggregateStatsDF(stats_post_df)
agg_df = pd.merge(agg_pre_df, agg_post_df, how='left',
left_index=True, right_index=True,
sort=True, suffixes=["_pre", "_post"])
# TS - if count value not observed either pre/post-dedup,
# merge will leave an empty cell and the column will be cast as a float
# see http://pandas.pydata.org/pandas-docs/dev/missing_data.html
# --> Missing data casting rules and indexing
# so, back fill with zeros and convert back to int
agg_df = agg_df.fillna(0).astype(int)
agg_df.index = [x.decode() for x in agg_df.index]
agg_df.index.name = 'UMI'
agg_df.to_csv(options.stats + "_per_umi.tsv", sep="\t")
# bin distances into integer bins
max_ed = int(max(map(max, [pre_cluster_stats,
post_cluster_stats,
pre_cluster_stats_null,
post_cluster_stats_null])))
cluster_bins = range(-1, int(max_ed) + 2)
def bin_clusters(cluster_list, bins=cluster_bins):
''' take list of floats and return bins'''
return np.digitize(cluster_list, bins, right=True)
def tallyCounts(binned_cluster, max_edit_distance):
''' tally counts per bin '''
return np.bincount(binned_cluster,
minlength=max_edit_distance + 3)
pre_cluster_binned = bin_clusters(pre_cluster_stats)
post_cluster_binned = bin_clusters(post_cluster_stats)
pre_cluster_null_binned = bin_clusters(pre_cluster_stats_null)
post_cluster_null_binned = bin_clusters(post_cluster_stats_null)
edit_distance_df = pd.DataFrame(
{"unique": tallyCounts(pre_cluster_binned, max_ed),
"unique_null": tallyCounts(pre_cluster_null_binned, max_ed),
options.method: tallyCounts(post_cluster_binned, max_ed),
"%s_null" % options.method: tallyCounts(post_cluster_null_binned, max_ed),
"edit_distance": cluster_bins},
columns=["unique", "unique_null", options.method,
"%s_null" % options.method, "edit_distance"])
# TS - set lowest bin (-1) to "Single_UMI"
edit_distance_df['edit_distance'][0] = "Single_UMI"
edit_distance_df.to_csv(options.stats + "_edit_distance.tsv",
index=False, sep="\t")
# write footer and output benchmark information.
U.info(
"Reads: %s" % ", ".join(["%s: %s" % (x[0], x[1]) for x in
bundle_iterator.read_events.most_common()]))
U.info("Number of reads out: %i" % nOutput)
if not options.ignore_umi: # otherwise processor has not been used
U.info("Total number of positions deduplicated: %i" %
processor.UMIClusterer.positions)
if processor.UMIClusterer.positions > 0:
U.info("Mean number of unique UMIs per position: %.2f" %
(float(processor.UMIClusterer.total_umis_per_position) /
processor.UMIClusterer.positions))
U.info("Max. number of unique UMIs per position: %i" %
processor.UMIClusterer.max_umis_per_position)
else:
U.warn("The BAM did not contain any valid "
"reads/read pairs for deduplication")
U.Stop() | 0.000981 |
def _get_xk(self, yk):
'''Compute approximate solution from initial guess and approximate
solution of the preconditioned linear system.'''
if yk is not None:
return self.x0 + self.linear_system.Mr * yk
return self.x0 | 0.007692 |
def copy_to_clipboard(self):
"""
Copies selected items to clipboard.
"""
tree = self.treeview
# get the selected item:
selection = tree.selection()
if selection:
self.filter_remove(remember=True)
root = ET.Element('selection')
for item in selection:
node = self.tree_node_to_xml('', item)
root.append(node)
# python2 issue
try:
text = ET.tostring(root, encoding='unicode')
except LookupError:
text = ET.tostring(root, encoding='UTF-8')
tree.clipboard_clear()
tree.clipboard_append(text)
self.filter_restore() | 0.002717 |
def create_307_response(self):
"""
Creates a 307 "Temporary Redirect" response including a HTTP Warning
header with code 299 that contains the user message received during
processing the request.
"""
request = get_current_request()
msg_mb = UserMessageMember(self.message)
coll = request.root['_messages']
coll.add(msg_mb)
# Figure out the new location URL.
qs = self.__get_new_query_string(request.query_string,
self.message.slug)
resubmit_url = "%s?%s" % (request.path_url, qs)
headers = [('Warning', '299 %s' % self.message.text),
# ('Content-Type', cnt_type),
]
http_exc = HttpWarningResubmit(location=resubmit_url,
detail=self.message.text,
headers=headers)
return request.get_response(http_exc) | 0.003061 |
def image_function(f='sin(5*x)*cos(5*y)', xmin=-1, xmax=1, ymin=-1, ymax=1, xsteps=100, ysteps=100, p='x,y', g=None, **kwargs):
"""
Plots a 2-d function over the specified range
Parameters
----------
f='sin(5*x)*cos(5*y)'
Takes two inputs and returns one value. Can also
be a string function such as sin(x*y)
xmin=-1, xmax=1, ymin=-1, ymax=1
Range over which to generate/plot the data
xsteps=100, ysteps=100
How many points to plot on the specified range
p='x,y'
If using strings for functions, this is a string of parameters.
g=None
Optional additional globals. Try g=globals()!
See spinmob.plot.image.data() for additional optional keyword arguments.
"""
default_kwargs = dict(clabel=str(f), xlabel='x', ylabel='y')
default_kwargs.update(kwargs)
# aggregate globals
if not g: g = {}
for k in list(globals().keys()):
if k not in g: g[k] = globals()[k]
if type(f) == str:
f = eval('lambda ' + p + ': ' + f, g)
# generate the grid x and y coordinates
xones = _n.linspace(1,1,ysteps)
x = _n.linspace(xmin, xmax, xsteps)
xgrid = _n.outer(xones, x)
yones = _n.linspace(1,1,xsteps)
y = _n.linspace(ymin, ymax, ysteps)
ygrid = _n.outer(y, yones)
# now get the z-grid
try:
# try it the fast numpy way. Add 0 to assure dimensions
zgrid = f(xgrid, ygrid) + xgrid*0.0
except:
print("Notice: function is not rocking hardcore. Generating grid the slow way...")
# manually loop over the data to generate the z-grid
zgrid = []
for ny in range(0, len(y)):
zgrid.append([])
for nx in range(0, len(x)):
zgrid[ny].append(f(x[nx], y[ny]))
zgrid = _n.array(zgrid)
# now plot!
image_data(zgrid.transpose(), x, y, **default_kwargs) | 0.008264 |
def scan_in_memory(node, env, path=()):
"""
"Scans" a Node.FS.Dir for its in-memory entries.
"""
try:
entries = node.entries
except AttributeError:
# It's not a Node.FS.Dir (or doesn't look enough like one for
# our purposes), which can happen if a target list containing
# mixed Node types (Dirs and Files, for example) has a Dir as
# the first entry.
return []
entry_list = sorted(filter(do_not_scan, list(entries.keys())))
return [entries[n] for n in entry_list] | 0.001848 |
def cell_normalize(data):
"""
Returns the data where the expression is normalized so that the total
count per cell is equal.
"""
if sparse.issparse(data):
data = sparse.csc_matrix(data.astype(float))
# normalize in-place
sparse_cell_normalize(data.data,
data.indices,
data.indptr,
data.shape[1],
data.shape[0])
return data
data_norm = data.astype(float)
total_umis = []
for i in range(data.shape[1]):
di = data_norm[:,i]
total_umis.append(di.sum())
di /= total_umis[i]
med = np.median(total_umis)
data_norm *= med
return data_norm | 0.007205 |
def move_item(self, token, item_id, src_folder_id, dest_folder_id):
"""
Move an item from the source folder to the destination folder.
:param token: A valid token for the user in question.
:type token: string
:param item_id: The id of the item to be moved
:type item_id: int | long
:param src_folder_id: The id of source folder where the item is located
:type src_folder_id: int | long
:param dest_folder_id: The id of destination folder where the item is
moved to
:type dest_folder_id: int | long
:returns: Dictionary containing the details of the moved item
:rtype: dict
"""
parameters = dict()
parameters['token'] = token
parameters['id'] = item_id
parameters['srcfolderid'] = src_folder_id
parameters['dstfolderid'] = dest_folder_id
response = self.request('midas.item.move', parameters)
return response | 0.002041 |
def find_by_id(self, team, params={}, **options):
"""Returns the full record for a single team.
Parameters
----------
team : {Id} Globally unique identifier for the team.
[params] : {Object} Parameters for the request
"""
path = "/teams/%s" % (team)
return self.client.get(path, params, **options) | 0.008264 |
def get_watchlist_ttl(self, item: str) -> int:
"""
Get the amount of time a specific item will remain on the watchlist.
:param str item: The item to get the TTL for on the watchlist
:return: Time in seconds. Returns None for a non-existing element
:rtype: int
"""
assert item is not None
item = self._encode_item(item)
return self.__get_ttl(self.__redis_conf['watchlist_template'].format(item)) | 0.006424 |
def _clean_key_type(key_name, escape_char=ESCAPE_SEQ):
"""Removes type specifier returning detected type and
a key name without type specifier.
:param str key_name: A key name containing type postfix.
:rtype: tuple[type|None, str]
:returns: Type definition and cleaned key name.
"""
for i in (2, 1):
if len(key_name) < i:
return None, key_name
type_v = key_name[-i:]
if type_v in _KEY_SPLIT:
if len(key_name) <= i:
return _KEY_SPLIT[type_v], ''
esc_cnt = 0
for pos in range(-i - 1, -len(key_name) - 1, -1):
if key_name[pos] == escape_char:
esc_cnt += 1
else:
break
if esc_cnt % 2 == 0:
return _KEY_SPLIT[type_v], key_name[:-i]
else:
return None, key_name
return None, key_name | 0.001073 |
def fill_auth_list(self, auth_provider, name, groups, auth_list=None, permissive=None):
'''
Returns a list of authorisation matchers that a user is eligible for.
This list is a combination of the provided personal matchers plus the
matchers of any group the user is in.
'''
if auth_list is None:
auth_list = []
if permissive is None:
permissive = self.opts.get('permissive_acl')
name_matched = False
for match in auth_provider:
if match == '*' and not permissive:
continue
if match.endswith('%'):
if match.rstrip('%') in groups:
auth_list.extend(auth_provider[match])
else:
if salt.utils.stringutils.expr_match(match, name):
name_matched = True
auth_list.extend(auth_provider[match])
if not permissive and not name_matched and '*' in auth_provider:
auth_list.extend(auth_provider['*'])
return auth_list | 0.002804 |
def randomizer_bin_und(R, alpha, seed=None):
'''
This function randomizes a binary undirected network, while preserving
the degree distribution. The function directly searches for rewirable
edge pairs (rather than trying to rewire edge pairs at random), and
hence avoids long loops and works especially well in dense matrices.
Parameters
----------
A : NxN np.ndarray
binary undirected connection matrix
alpha : float
fraction of edges to rewire
seed : hashable, optional
If None (default), use the np.random's global random state to generate random numbers.
Otherwise, use a new np.random.RandomState instance seeded with the given value.
Returns
-------
R : NxN np.ndarray
randomized network
'''
rng = get_rng(seed)
R = binarize(R, copy=True) # binarize
if not np.all(R == R.T):
raise BCTParamError(
'randomizer_bin_und only takes undirected matrices')
ax = len(R)
nr_poss_edges = (np.dot(ax, ax) - ax) / 2 # find maximum possible edges
savediag = np.diag(R)
np.fill_diagonal(R, np.inf) # replace diagonal with high value
# if there are more edges than non-edges, invert the matrix to reduce
# computation time. "invert" means swap meaning of 0 and 1, not matrix
# inversion
i, j = np.where(np.triu(R, 1))
k = len(i)
if k > nr_poss_edges / 2:
swap = True
R = np.logical_not(R)
np.fill_diagonal(R, np.inf)
i, j = np.where(np.triu(R, 1))
k = len(i)
else:
swap = False
# exclude fully connected nodes
fullnodes = np.where((np.sum(np.triu(R, 1), axis=0) +
np.sum(np.triu(R, 1), axis=1).T) == (ax - 1))
if np.size(fullnodes):
R[fullnodes, :] = 0
R[:, fullnodes] = 0
np.fill_diagonal(R, np.inf)
i, j = np.where(np.triu(R, 1))
k = len(i)
if k == 0 or k >= (nr_poss_edges - 1):
raise BCTParamError("No possible randomization")
for it in range(k):
if rng.random_sample() > alpha:
continue # rewire alpha% of edges
a = i[it]
b = j[it] # it is the chosen edge from a<->b
alliholes, = np.where(R[:, a] == 0) # find where each end can connect
alljholes, = np.where(R[:, b] == 0)
# we can only use edges with connection to neither node
i_intersect = np.intersect1d(alliholes, alljholes)
# find which of these nodes are connected
ii, jj = np.where(R[np.ix_(i_intersect, i_intersect)])
# if there is an edge to switch
if np.size(ii):
# choose one randomly
nummates = np.size(ii)
mate = rng.randint(nummates)
# randomly orient the second edge
if rng.random_sample() > .5:
c = i_intersect[ii[mate]]
d = i_intersect[jj[mate]]
else:
d = i_intersect[ii[mate]]
c = i_intersect[jj[mate]]
# swap the edges
R[a, b] = 0
R[c, d] = 0
R[b, a] = 0
R[d, c] = 0
R[a, c] = 1
R[b, d] = 1
R[c, a] = 1
R[d, b] = 1
# update the edge index (this is inefficient)
for m in range(k):
if i[m] == d and j[m] == c:
i.setflags(write=True)
j.setflags(write=True)
i[it] = c
j[m] = b
elif i[m] == c and j[m] == d:
i.setflags(write=True)
j.setflags(write=True)
j[it] = c
i[m] = b
# restore fullnodes
if np.size(fullnodes):
R[fullnodes, :] = 1
R[:, fullnodes] = 1
# restore inversion
if swap:
R = np.logical_not(R)
# restore diagonal
np.fill_diagonal(R, 0)
R += savediag
return np.array(R, dtype=int) | 0.000748 |
def single(self, trigger_id, full=False):
"""
Get an existing (full) trigger definition.
:param trigger_id: Trigger definition id to be retrieved.
:param full: Fetch the full definition, default is False.
:return: Trigger of FullTrigger depending on the full parameter value.
"""
if full:
returned_dict = self._get(self._service_url(['triggers', 'trigger', trigger_id]))
return FullTrigger(returned_dict)
else:
returned_dict = self._get(self._service_url(['triggers', trigger_id]))
return Trigger(returned_dict) | 0.00641 |
def _check_infinite_flows(self, steps, flows=None):
"""
Recursively loop through the flow_config and check if there are any cycles.
:param steps: Set of step definitions to loop through
:param flows: Flows already visited.
:return: None
"""
if flows is None:
flows = []
for step in steps.values():
if "flow" in step:
flow = step["flow"]
if flow == "None":
continue
if flow in flows:
raise FlowInfiniteLoopError(
"Infinite flows detected with flow {}".format(flow)
)
flows.append(flow)
flow_config = self.project_config.get_flow(flow)
self._check_infinite_flows(flow_config.steps, flows) | 0.003509 |
def get_is_authorized(request, pid):
"""MNAuthorization.isAuthorized(did, action) -> Boolean."""
if 'action' not in request.GET:
raise d1_common.types.exceptions.InvalidRequest(
0, 'Missing required parameter. required="action"'
)
# Convert action string to action level. Raises InvalidRequest if the
# action string is not valid.
level = d1_gmn.app.auth.action_to_level(request.GET['action'])
d1_gmn.app.auth.assert_allowed(request, level, pid)
return d1_gmn.app.views.util.http_response_with_boolean_true_type() | 0.001757 |
def refresh(self, id_or_uri, timeout=-1):
"""
The Refresh action reclaims the top-of-rack switches in a logical switch.
Args:
id_or_uri:
Can be either the Logical Switch ID or URI
timeout:
Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation
in OneView, just stop waiting for its completion.
Returns:
dict: The Logical Switch
"""
uri = self._client.build_uri(id_or_uri) + "/refresh"
return self._client.update_with_zero_body(uri, timeout=timeout) | 0.006319 |
def _rgetattr(obj, key):
"""Recursive getattr for handling dots in keys."""
for k in key.split("."):
obj = getattr(obj, k)
return obj | 0.006536 |
def dt_from_rfc8601(date_str):
"""Convert 8601 (ISO) date string to datetime object.
Handles "Z" and milliseconds transparently.
:param date_str: Date string.
:type date_str: ``string``
:return: Date time.
:rtype: :class:`datetime.datetime`
"""
# Normalize string and adjust for milliseconds. Note that Python 2.6+ has
# ".%f" format, but we're going for Python 2.5, so truncate the portion.
date_str = date_str.rstrip('Z').split('.')[0]
# Format string. (2010-04-13T14:02:48.000Z)
fmt = "%Y-%m-%dT%H:%M:%S"
# Python 2.6+: Could format and handle milliseconds.
# if date_str.find('.') >= 0:
# fmt += ".%f"
return datetime.strptime(date_str, fmt) | 0.001393 |
def _complete_values(self, symbol = ""):
"""Compiles a list of possible symbols that can hold a value in
place. These consist of local vars, global vars, and functions."""
result = {}
#Also add the subroutines from the module and its dependencies.
moddict = self._generic_filter_execs(self.context.module)
self._cond_update(result, moddict, symbol)
self._cond_update(result, self.context.module.interfaces, symbol)
for depend in self.context.module.dependencies:
if depend in self.context.module.parent.modules:
#We don't want to display executables that are part of an interface, or that are embedded in
#a derived type, since those will be called through the type or interface
filtdict = self._generic_filter_execs(self.context.module.parent.modules[depend])
self._cond_update(result, filtdict, symbol)
self._cond_update(result, self.context.module.parent.modules[depend].interfaces, symbol)
#Add all the local vars if we are in an executable
if (isinstance(self.context.element, Function) or
isinstance(self.context.element, Subroutine)):
self._cond_update(result, self.element.members, symbol)
#Next add the global variables from the module
if self.context.module is not None:
self._cond_update(result, self.context.module.members, symbol)
#Next add user defined functions to the mix
for execkey in self.context.module.executables:
iexec = self.context.module.executables[execkey]
if isinstance(iexec, Function) and self._symbol_in(symbol, iexec.name):
result[iexec.name] = iexec
#Finally add the builtin functions to the mix. We need to add support
#for these in a separate file so we have their call signatures.
if symbol == "":
#Use the abbreviated list of most common fortran builtins
self._cond_update(result, cache.common_builtin, symbol)
else:
#we can use the full list as there will probably not be that
#many left over.
self._cond_update(result, cache.builtin, symbol)
return result | 0.010354 |
def number_to_string(n, alphabet):
"""
Given an non-negative integer ``n``, convert it to a string composed of
the given ``alphabet`` mapping, where the position of each element in
``alphabet`` is its radix value.
Examples::
>>> number_to_string(12345678, '01')
'101111000110000101001110'
>>> number_to_string(12345678, 'ab')
'babbbbaaabbaaaababaabbba'
>>> number_to_string(12345678, string.ascii_letters + string.digits)
'ZXP0'
>>> number_to_string(12345, ['zero ', 'one ', 'two ', 'three ', 'four ', 'five ', 'six ', 'seven ', 'eight ', 'nine '])
'one two three four five '
"""
result = ''
base = len(alphabet)
current = int(n)
if current < 0:
raise ValueError("invalid n (must be non-negative): %s", n)
while current:
result = alphabet[current % base] + result
current = current // base
return result | 0.002114 |
def list_from_json(source_list_json):
"""
Deserialise all the items in source_list from json
"""
result = []
if source_list_json == [] or source_list_json == None:
return result
for list_item in source_list_json:
item = json.loads(list_item)
try:
if item['class_name'] == 'Departure':
temp = Departure()
elif item['class_name'] == 'Disruption':
temp = Disruption()
elif item['class_name'] == 'Station':
temp = Station()
elif item['class_name'] == 'Trip':
temp = Trip()
elif item['class_name'] == 'TripRemark':
temp = TripRemark()
elif item['class_name'] == 'TripStop':
temp = TripStop()
elif item['class_name'] == 'TripSubpart':
temp = TripSubpart()
else:
print('Unrecognised Class ' + item['class_name'] + ', skipping')
continue
temp.from_json(list_item)
result.append(temp)
except KeyError:
print('Unrecognised item with no class_name, skipping')
continue
return result | 0.002461 |
def install_handler(self, app):
"""Install logging handler."""
# Configure python logging
if app.config['LOGGING_CONSOLE_PYWARNINGS']:
self.capture_pywarnings(logging.StreamHandler())
if app.config['LOGGING_CONSOLE_LEVEL'] is not None:
for h in app.logger.handlers:
h.setLevel(app.config['LOGGING_CONSOLE_LEVEL'])
# Add request_id to log record
app.logger.addFilter(add_request_id_filter) | 0.004184 |
def t(root, children=None, debug=False, root_id=None):
"Create (DGParented)Tree from a root (str) and a list of (str, list) tuples."
if isinstance(root, Tree):
if children is None:
return root
return root.__class__(root, children, root_id)
elif isinstance(root, basestring):
root = debug_root_label(root, debug, root_id)
# Beware: (DGParented)Tree is a subclass of list!
if isinstance(children, Tree):
child_trees = [children]
elif isinstance(children, list):
child_trees = []
for child in children:
if isinstance(child, Tree):
child_trees.append(child)
elif isinstance(child, list):
child_trees.extend(child)
elif isinstance(child, tuple):
child_trees.append(t(*child))
elif isinstance(child, basestring):
child_trees.append(child)
else:
raise NotImplementedError
elif isinstance(children, basestring):
# this tree does only have one child, a leaf node
# TODO: this is a workaround for the following problem:
# Tree('foo', [Tree('bar', [])]) != Tree('foo', ['bar'])
child_trees = [Tree(children, [])]
else:
# this tree only consists of one leaf node
assert children is None
child_trees = []
return DGParentedTree(root, child_trees, root_id)
else:
raise NotImplementedError | 0.001259 |
def get_cf_files(path, queue):
"""Get rule files in a directory and put them in a queue"""
for root, _, files in os.walk(os.path.abspath(path)):
if not files:
continue
for filename in files:
fullname = os.path.join(root, filename)
if os.path.isfile(fullname) and fullname.endswith('.cf') or \
fullname.endswith('.post'):
queue.put(fullname) | 0.002294 |
def get_image(self):
"""Get the image currently being displayed.
Returns
-------
image : `~ginga.AstroImage.AstroImage` or `~ginga.RGBImage.RGBImage`
Image object.
"""
if self._imgobj is not None:
# quick optomization
return self._imgobj.get_image()
canvas_img = self.get_canvas_image()
return canvas_img.get_image() | 0.004773 |
def img2img_transformer_tiny():
"""Tiny params."""
hparams = img2img_transformer2d_base()
hparams.num_hidden_layers = 2
hparams.hidden_size = 128
hparams.batch_size = 4
hparams.max_length = 128
hparams.attention_key_channels = hparams.attention_value_channels = 0
hparams.filter_size = 128
hparams.num_heads = 1
hparams.pos = "timing"
return hparams | 0.032345 |
def rpcproxy(spec):
"""Decorator to enable this class to proxy RPC client calls
The decorated class constructor takes two additional arguments,
`context=` is required to be a :class:`~p4p.client.thread.Context`.
`format`= can be a string, tuple, or dictionary and is applied
to PV name strings given to :py:func:`rpcall`.
Other arguments are passed to the user class constructor. ::
@rpcproxy
class MyProxy(object):
@rpccall("%s:add")
def add(lhs='d', rhs='d'):
pass
ctxt = Context('pva')
proxy = MyProxy(context=ctxt, format="tst:") # evaluates "%s:add"%"tst:"
The decorated class will be a sub-class of the provided class and :class:`RPCProxyBase`.
"""
# inject our ctor first so we don't have to worry about super() non-sense.
def _proxyinit(self, context=None, format={}, **kws):
assert context is not None, context
self.context = context
self.format = format
spec.__init__(self, **kws)
obj = {'__init__': _proxyinit}
for K, V in inspect.getmembers(spec, lambda M: hasattr(M, '_call_PV')):
obj[K] = _wrapMethod(K, V)
return type(spec.__name__, (RPCProxyBase, spec), obj) | 0.002421 |
def resolve_weak_types(storage, debug=False):
"""Reslove weak type rules W1 - W3.
See: http://unicode.org/reports/tr9/#Resolving_Weak_Types
"""
for run in storage['runs']:
prev_strong = prev_type = run['sor']
start, length = run['start'], run['length']
chars = storage['chars'][start:start+length]
for _ch in chars:
# W1. Examine each nonspacing mark (NSM) in the level run, and
# change the type of the NSM to the type of the previous character.
# If the NSM is at the start of the level run, it will get the type
# of sor.
bidi_type = _ch['type']
if bidi_type == 'NSM':
_ch['type'] = bidi_type = prev_type
# W2. Search backward from each instance of a European number until
# the first strong type (R, L, AL, or sor) is found. If an AL is
# found, change the type of the European number to Arabic number.
if bidi_type == 'EN' and prev_strong == 'AL':
_ch['type'] = 'AN'
# update prev_strong if needed
if bidi_type in ('R', 'L', 'AL'):
prev_strong = bidi_type
prev_type = _ch['type']
# W3. Change all ALs to R
for _ch in chars:
if _ch['type'] == 'AL':
_ch['type'] = 'R'
# W4. A single European separator between two European numbers changes
# to a European number. A single common separator between two numbers of
# the same type changes to that type.
for idx in range(1, len(chars) - 1):
bidi_type = chars[idx]['type']
prev_type = chars[idx-1]['type']
next_type = chars[idx+1]['type']
if bidi_type == 'ES' and (prev_type == next_type == 'EN'):
chars[idx]['type'] = 'EN'
if bidi_type == 'CS' and prev_type == next_type and \
prev_type in ('AN', 'EN'):
chars[idx]['type'] = prev_type
# W5. A sequence of European terminators adjacent to European numbers
# changes to all European numbers.
for idx in range(len(chars)):
if chars[idx]['type'] == 'EN':
for et_idx in range(idx-1, -1, -1):
if chars[et_idx]['type'] == 'ET':
chars[et_idx]['type'] = 'EN'
else:
break
for et_idx in range(idx+1, len(chars)):
if chars[et_idx]['type'] == 'ET':
chars[et_idx]['type'] = 'EN'
else:
break
# W6. Otherwise, separators and terminators change to Other Neutral.
for _ch in chars:
if _ch['type'] in ('ET', 'ES', 'CS'):
_ch['type'] = 'ON'
# W7. Search backward from each instance of a European number until the
# first strong type (R, L, or sor) is found. If an L is found, then
# change the type of the European number to L.
prev_strong = run['sor']
for _ch in chars:
if _ch['type'] == 'EN' and prev_strong == 'L':
_ch['type'] = 'L'
if _ch['type'] in ('L', 'R'):
prev_strong = _ch['type']
if debug:
debug_storage(storage, runs=True) | 0.000594 |
def verify_upload(self):
"""
Confirm that the last upload was sucessful.
Raises TusUploadFailed exception if the upload was not sucessful.
"""
if self.request.status_code == 204:
return True
else:
raise TusUploadFailed('', self.request.status_code, self.request.response_content) | 0.008547 |
def copy_artifact(src_path: str, artifact_hash: str, conf: Config):
"""Copy the artifact at `src_path` with hash `artifact_hash` to artifacts
cache dir.
If an artifact already exists at that location, it is assumed to be
identical (since it's based on hash), and the copy is skipped.
TODO: pruning policy to limit cache size.
"""
cache_dir = conf.get_artifacts_cache_dir()
if not isdir(cache_dir):
makedirs(cache_dir)
cached_artifact_path = join(cache_dir, artifact_hash)
if isfile(cached_artifact_path) or isdir(cached_artifact_path):
logger.debug('Skipping copy of existing cached artifact {} -> {}',
src_path, cached_artifact_path)
return
abs_src_path = join(conf.project_root, src_path)
logger.debug('Caching artifact {} under {}',
abs_src_path, cached_artifact_path)
shutil.copy(abs_src_path, cached_artifact_path) | 0.001066 |
def set_user_agent_component(self, key, value, sanitize=True):
"""Add or replace new user-agent component strings.
Given strings are formatted along the format agreed upon by Mollie and implementers:
- key and values are separated by a forward slash ("/").
- multiple key/values are separated by a space.
- keys are camel-cased, and cannot contain spaces.
- values cannot contain spaces.
Note: When you set sanitize=false yuu need to make sure the formatting is correct yourself.
"""
if sanitize:
key = ''.join(_x.capitalize() for _x in re.findall(r'\S+', key))
if re.search(r'\s+', value):
value = '_'.join(re.findall(r'\S+', value))
self.user_agent_components[key] = value | 0.005025 |
def split_url(url):
"""
Split the given URL ``base#anchor`` into ``(base, anchor)``,
or ``(base, None)`` if no anchor is present.
In case there are two or more ``#`` characters,
return only the first two tokens: ``a#b#c => (a, b)``.
:param string url: the url
:rtype: list of str
"""
if url is None:
return (None, None)
array = url.split("#")
if len(array) == 1:
array.append(None)
return tuple(array[0:2]) | 0.002119 |
def get_logging_level(debug):
"""Returns logging level based on boolean"""
level = logging.INFO
if debug:
level = logging.DEBUG
return level | 0.006098 |
def done(self, result, noraise=False):
"""This method is called when a task has finished executing.
Subclass can override this method if desired, but should call
superclass method at the end.
"""
# [??] Should this be in a critical section?
# Has done() already been called on this task?
if self.ev_done.is_set():
# ??
if isinstance(self.result, Exception) and (not noraise):
raise self.result
return self.result
# calculate running time and other finalization
self.endtime = time.time()
try:
self.totaltime = self.endtime - self.starttime
except AttributeError:
# task was not initialized properly
self.totaltime = 0.0
self.result = result
# Release thread waiters
self.ev_done.set()
# Perform callbacks for event-style waiters
self.make_callback('resolved', self.result)
# If the result is an exception, then our final act is to raise
# it in the caller, unless the caller explicitly supressed that
if isinstance(result, Exception) and (not noraise):
raise result
return result | 0.001606 |
def quote_value(value):
"""
convert values to mysql code for the same
mostly delegate directly to the mysql lib, but some exceptions exist
"""
try:
if value == None:
return SQL_NULL
elif isinstance(value, SQL):
return quote_sql(value.template, value.param)
elif is_text(value):
return SQL("'" + "".join(ESCAPE_DCT.get(c, c) for c in value) + "'")
elif is_data(value):
return quote_value(json_encode(value))
elif is_number(value):
return SQL(text_type(value))
elif isinstance(value, datetime):
return SQL("str_to_date('" + value.strftime("%Y%m%d%H%M%S.%f") + "', '%Y%m%d%H%i%s.%f')")
elif isinstance(value, Date):
return SQL("str_to_date('" + value.format("%Y%m%d%H%M%S.%f") + "', '%Y%m%d%H%i%s.%f')")
elif hasattr(value, '__iter__'):
return quote_value(json_encode(value))
else:
return quote_value(text_type(value))
except Exception as e:
Log.error("problem quoting SQL {{value}}", value=repr(value), cause=e) | 0.00446 |
def recall_score(gold, pred, pos_label=1, ignore_in_gold=[], ignore_in_pred=[]):
"""
Calculate recall for a single class.
Args:
gold: A 1d array-like of gold labels
pred: A 1d array-like of predicted labels (assuming abstain = 0)
ignore_in_gold: A list of labels for which elements having that gold
label will be ignored.
ignore_in_pred: A list of labels for which elements having that pred
label will be ignored.
pos_label: The class label to treat as positive for recall
Returns:
rec: The (float) recall score
"""
gold, pred = _preprocess(gold, pred, ignore_in_gold, ignore_in_pred)
positives = np.where(pred == pos_label, 1, 0).astype(bool)
trues = np.where(gold == pos_label, 1, 0).astype(bool)
TP = np.sum(positives * trues)
FN = np.sum(np.logical_not(positives) * trues)
if TP or FN:
rec = TP / (TP + FN)
else:
rec = 0
return rec | 0.002043 |
def view_decorator(function_decorator):
"""Convert a function based decorator into a class based decorator usable
on class based Views.
Can't subclass the `View` as it breaks inheritance (super in particular),
so we monkey-patch instead.
Based on http://stackoverflow.com/a/8429311
"""
def simple_decorator(View):
View.dispatch = method_decorator(function_decorator)(View.dispatch)
return View
return simple_decorator | 0.025404 |
def send_request(self, request):
"""
Add itself to the observing list
:param request: the request
:return: the request unmodified
"""
if request.observe == 0:
# Observe request
host, port = request.destination
key_token = hash(str(host) + str(port) + str(request.token))
self._relations[key_token] = ObserveItem(time.time(), None, True, None)
return request | 0.006466 |
def tomorrow(hour=None, minute=None):
"""
Gives the ``datetime.datetime`` object corresponding to tomorrow. The
default value for optional parameters is the current value of hour and
minute. I.e: when called without specifying values for parameters, the
resulting object will refer to the time = now + 24 hours; when called with
only hour specified, the resulting object will refer to tomorrow at the
specified hour and at the current minute.
:param hour: the hour for tomorrow, in the format *0-23* (defaults to
``None``)
:type hour: int
:param minute: the minute for tomorrow, in the format *0-59* (defaults to
``None``)
:type minute: int
:returns: a ``datetime.datetime`` object
:raises: *ValueError* when hour or minute have bad values
"""
if hour is None:
hour = datetime.now().hour
if minute is None:
minute = datetime.now().minute
tomorrow_date = date.today() + timedelta(days=1)
return datetime(tomorrow_date.year, tomorrow_date.month, tomorrow_date.day,
hour, minute, 0) | 0.000905 |
def splitlines(self, keepends=False):
"""
B.splitlines([keepends]) -> list of lines
Return a list of the lines in B, breaking at line boundaries.
Line breaks are not included in the resulting list unless keepends
is given and true.
"""
# Py2 str.splitlines() takes keepends as an optional parameter,
# not as a keyword argument as in Python 3 bytes.
parts = super(newbytes, self).splitlines(keepends)
return [newbytes(part) for part in parts] | 0.003824 |
def distance(self, y, measure=None):
"""
Compute a pairwise distance measure between all rows of two numeric H2OFrames.
:param H2OFrame y: Frame containing queries (small)
:param str use: A string indicating what distance measure to use. Must be one of:
- ``"l1"``: Absolute distance (L1-norm, >=0)
- ``"l2"``: Euclidean distance (L2-norm, >=0)
- ``"cosine"``: Cosine similarity (-1...1)
- ``"cosine_sq"``: Squared Cosine similarity (0...1)
:examples:
>>>
>>> iris_h2o = h2o.import_file(path=pyunit_utils.locate("smalldata/iris/iris.csv"))
>>> references = iris_h2o[10:150,0:4
>>> queries = iris_h2o[0:10,0:4]
>>> A = references.distance(queries, "l1")
>>> B = references.distance(queries, "l2")
>>> C = references.distance(queries, "cosine")
>>> D = references.distance(queries, "cosine_sq")
>>> E = queries.distance(references, "l1")
>>> (E.transpose() == A).all()
:returns: An H2OFrame of the matrix containing pairwise distance / similarity between the
rows of this frame (N x p) and ``y`` (M x p), with dimensions (N x M).
"""
assert_is_type(y, H2OFrame)
if measure is None: measure = "l2"
return H2OFrame._expr(expr=ExprNode("distance", self, y, measure))._frame() | 0.006969 |
def correct_peaks(sig, peak_inds, search_radius, smooth_window_size,
peak_dir='compare'):
"""
Adjust a set of detected peaks to coincide with local signal maxima,
and
Parameters
----------
sig : numpy array
The 1d signal array
peak_inds : np array
Array of the original peak indices
max_gap : int
The radius within which the original peaks may be shifted.
smooth_window_size : int
The window size of the moving average filter applied on the
signal. Peak distance is calculated on the difference between
the original and smoothed signal.
peak_dir : str, optional
The expected peak direction: 'up' or 'down', 'both', or
'compare'.
- If 'up', the peaks will be shifted to local maxima
- If 'down', the peaks will be shifted to local minima
- If 'both', the peaks will be shifted to local maxima of the
rectified signal
- If 'compare', the function will try both 'up' and 'down'
options, and choose the direction that gives the largest mean
distance from the smoothed signal.
Returns
-------
corrected_peak_inds : numpy array
Array of the corrected peak indices
Examples
--------
"""
sig_len = sig.shape[0]
n_peaks = len(peak_inds)
# Subtract the smoothed signal from the original
sig = sig - smooth(sig=sig, window_size=smooth_window_size)
# Shift peaks to local maxima
if peak_dir == 'up':
shifted_peak_inds = shift_peaks(sig=sig,
peak_inds=peak_inds,
search_radius=search_radius,
peak_up=True)
elif peak_dir == 'down':
shifted_peak_inds = shift_peaks(sig=sig,
peak_inds=peak_inds,
search_radius=search_radius,
peak_up=False)
elif peak_dir == 'both':
shifted_peak_inds = shift_peaks(sig=np.abs(sig),
peak_inds=peak_inds,
search_radius=search_radius,
peak_up=True)
else:
shifted_peak_inds_up = shift_peaks(sig=sig,
peak_inds=peak_inds,
search_radius=search_radius,
peak_up=True)
shifted_peak_inds_down = shift_peaks(sig=sig,
peak_inds=peak_inds,
search_radius=search_radius,
peak_up=False)
# Choose the direction with the biggest deviation
up_dist = np.mean(np.abs(sig[shifted_peak_inds_up]))
down_dist = np.mean(np.abs(sig[shifted_peak_inds_down]))
if up_dist >= down_dist:
shifted_peak_inds = shifted_peak_inds_up
else:
shifted_peak_inds = shifted_peak_inds_down
return shifted_peak_inds | 0.000629 |
def call(self, name, options=None, o=None):
"""
Call another command.
:param name: The command name
:type name: str
:param options: The options
:type options: list or None
:param o: The output
:type o: cleo.outputs.output.Output
"""
if options is None:
options = []
command = self.get_application().find(name)
options = [('command', command.get_name())] + options
return command.run(ListInput(options), o) | 0.003795 |
def handle(self,
t_input: inference.TranslatorInput,
t_output: inference.TranslatorOutput,
t_walltime: float = 0.):
"""
:param t_input: Translator input.
:param t_output: Translator output.
:param t_walltime: Total walltime for translation.
"""
self.stream.write("input=%s\toutput=%s\tinput_tokens=%d\toutput_tokens=%d\ttranslation_time=%0.4f\n" %
(" ".join(t_input.tokens),
t_output.translation,
len(t_input.tokens),
len(t_output.tokens),
t_walltime))
self.stream.flush() | 0.008487 |
def __calculate_score(self, index_point, index_cluster):
"""!
@brief Calculates Silhouette score for the specific object defined by index_point.
@param[in] index_point (uint): Index point from input data for which Silhouette score should be calculated.
@param[in] index_cluster (uint): Index cluster to which the point belongs to.
@return (float) Silhouette score for the object.
"""
difference = self.__calculate_dataset_difference(index_point)
a_score = self.__calculate_within_cluster_score(index_cluster, difference)
b_score = self.__caclulate_optimal_neighbor_cluster_score(index_cluster, difference)
return (b_score - a_score) / max(a_score, b_score) | 0.009259 |
def pos(self, p_x=None, y=None, z=None):
"""Set/Get actor position."""
if p_x is None:
return np.array(self.GetPosition())
if z is None: # assume p_x is of the form (x,y,z)
self.SetPosition(p_x)
else:
self.SetPosition(p_x, y, z)
if self.trail:
self.updateTrail()
return self | 0.005391 |
def Ntubes_Phadkeb(DBundle, Do, pitch, Ntp, angle=30):
r'''Using tabulated values and correction factors for number of passes,
the highly accurate method of [1]_ is used to obtain the tube count
of a given tube bundle outer diameter for a given tube size and pitch.
Parameters
----------
DBundle : float
Outer diameter of tube bundle, [m]
Do : float
Tube outer diameter, [m]
pitch : float
Pitch; distance between two orthogonal tube centers, [m]
Ntp : int
Number of tube passes, [-]
angle : float, optional
The angle the tubes are positioned; 30, 45, 60 or 90, [degrees]
Returns
-------
Nt : int
Total number of tubes that fit in the heat exchanger, [-]
Notes
-----
For single-pass cases, the result is exact, and no tubes need to be removed
for any reason. For 4, 6, 8 pass arrangements, a number of tubes must be
removed to accomodate pass partition plates. The following assumptions
are involved with that:
* The pass partition plate is where a row of tubes would have been.
Only one or two rows are assumed affected.
* The thickness of partition plate is < 70% of the tube outer diameter.
* The distance between the centerline of the partition plate and the
centerline of the nearest row of tubes is equal to the pitch.
This function will fail when there are more than 100,000 tubes.
[1]_ tabulated values up to approximately 3,000 tubes derived with
number theory. The sequesnces of integers were identified in the
On-Line Encyclopedia of Integer Sequences (OEIS), and formulas listed in
it were used to generate more coefficient to allow up to 100,000 tubes.
The integer sequences are A003136, A038590, A001481, and A057961. The
generation of coefficients for A038590 is very slow, but the rest are
reasonably fast.
The number of tubes that fit generally does not increase one-by-one, but by
several.
>>> Ntubes_Phadkeb(DBundle=1.007, Do=.028, pitch=.036, Ntp=2, angle=45.)
558
>>> Ntubes_Phadkeb(DBundle=1.008, Do=.028, pitch=.036, Ntp=2, angle=45.)
574
Because a pass partition needs to be installed in multiple tube pass
shells, more tubes fit in an exchanger the fewer passes are used.
>>> Ntubes_Phadkeb(DBundle=1.008, Do=.028, pitch=.036, Ntp=1, angle=45.)
593
Examples
--------
>>> Ntubes_Phadkeb(DBundle=1.200-.008*2, Do=.028, pitch=.036, Ntp=2, angle=45.)
782
References
----------
.. [1] Phadke, P. S., Determining tube counts for shell and tube
exchangers, Chem. Eng., September, 91, 65-68 (1984).
'''
if DBundle <= Do*Ntp:
return 0
if Ntp == 6:
e = 0.265
elif Ntp == 8:
e = 0.404
else:
e = 0.
r = 0.5*(DBundle - Do)/pitch
s = r*r
Ns, Nr = floor(s), floor(r)
# If Ns is between two numbers, take the smaller one
# C1 is the number of tubes for a single pass arrangement.
if angle == 30 or angle == 60:
i = np.searchsorted(triangular_Ns, Ns, side='right')
C1 = int(triangular_C1s[i-1])
elif angle == 45 or angle == 90:
i = np.searchsorted(square_Ns, Ns, side='right')
C1 = int(square_C1s[i-1])
Cx = 2*Nr + 1.
# triangular and rotated triangular
if (angle == 30 or angle == 60):
w = 2*r/3**0.5
Nw = floor(w)
if Nw % 2 == 0:
Cy = 3*Nw
else:
Cy = 3*Nw + 1
if Ntp == 2:
if angle == 30 :
C2 = C1 - Cx
else:
C2 = C1 - Cy - 1
else: # 4 passes, or 8; this value is needed
C4 = C1 - Cx - Cy
if (angle == 30 or angle == 60) and (Ntp == 6 or Ntp == 8):
if angle == 30: # triangular
v = 2*e*r/3**0.5 + 0.5
Nv = floor(v)
u = 3**0.5*Nv/2.
if Nv % 2 == 0:
z = (s-u*u)**0.5
else:
z = (s-u*u)**0.5 - 0.5
Nz = floor(z)
if Ntp == 6:
C6 = C1 - Cy - 4*Nz - 1
else:
C8 = C4 - 4*Nz
else: # rotated triangular
v = 2.*e*r
Nv = floor(v)
u1 = 0.5*Nv
z = (s - u1*u1)**0.5
w1 = 2*z/2**0.5
# w1 = 2**2**0.5 # WRONG
u2 = 0.5*(Nv + 1)
zs = (s-u2*u2)**0.5
w2 = 2.*zs/3**0.5
if Nv%2 == 0:
z1 = 0.5*w1
z2 = 0.5*(w2+1)
else:
z1 = 0.5*(w1+1)
z2 = 0.5*w2
Nz1 = floor(z1)
Nz2 = floor(z2)
if Ntp == 6:
C6 = C1 - Cx - 4.*(Nz1 + Nz2)
else: # 8
C8 = C4 - 4.*(Nz1 + Nz2)
if (angle == 45 or angle == 90):
if angle == 90:
Cy = Cx - 1.
# eq 6 or 8 for c2 or c4
if Ntp == 2:
C2 = C1 - Cx
else: # 4 passes, or 8; this value is needed
C4 = C1 - Cx - Cy
else: # rotated square
w = r/2**0.5
Nw = floor(w)
Cx = 2.*Nw + 1
Cy = Cx - 1
if Ntp == 2:
C2 = C1 - Cx
else: # 4 passes, or 8; this value is needed
C4 = C1 - Cx - Cy
if (angle == 45 or angle == 90) and (Ntp == 6 or Ntp == 8):
if angle == 90:
v = e*r + 0.5
Nv = floor(v)
z = (s - Nv*Nv)**0.5
Nz = floor(z)
if Ntp == 6:
C6 = C1 - Cy - 4*Nz - 1
else:
C8 = C4 - 4*Nz
else:
w = r/2**0.5
Nw = floor(w)
Cx = 2*Nw + 1
v = 2**0.5*e*r
Nv = floor(v)
u1 = Nv/2**0.5
z = (s-u1*u1)**0.5
w1 = 2**0.5*z
u2 = (Nv + 1)/2**0.5
zs = (s-u2*u2)**0.5
w2 = 2**0.5*zs
# if Nv is odd, 21a and 22a. If even, 21b and 22b. Nz1, Nz2
if Nv %2 == 0:
z1 = 0.5*w1
z2 = 0.5*(w2 + 1)
else:
z1 = 0.5*(w1 + 1)
z2 = 0.5*w2
Nz1 = floor(z1)
Nz2 = floor(z2)
if Ntp == 6:
C6 = C1 - Cx - 4*(Nz1 + Nz2)
else: # 8
C8 = C4 - 4*(Nz1 + Nz2)
if Ntp == 1:
ans = C1
elif Ntp == 2:
ans = C2
elif Ntp == 4:
ans = C4
elif Ntp == 6:
ans = C6
elif Ntp == 8:
ans = C8
else:
raise Exception('Only 1, 2, 4, 6, or 8 tube passes are supported')
ans = int(ans)
# In some cases, a negative number would be returned by these formulas
if ans < 0:
ans = 0 # pragma: no cover
return ans | 0.003758 |
def binarize_percent(netin, level, sign='pos', axis='time'):
"""
Binarizes a network proprtionally. When axis='time' (only one available at the moment) then the top values for each edge time series are considered.
Parameters
----------
netin : array or dict
network (graphlet or contact representation),
level : float
Percent to keep (expressed as decimal, e.g. 0.1 = top 10%)
sign : str, default='pos'
States the sign of the thresholding. Can be 'pos', 'neg' or 'both'. If "neg", only negative values are thresholded and vice versa.
axis : str, default='time'
Specify which dimension thresholding is applied against. Can be 'time' (takes top % for each edge time-series) or 'graphlet' (takes top % for each graphlet)
Returns
-------
netout : array or dict (depending on input)
Binarized network
"""
netin, netinfo = process_input(netin, ['C', 'G', 'TO'])
# Set diagonal to 0
netin = set_diagonal(netin, 0)
if axis == 'graphlet' and netinfo['nettype'][-1] == 'u':
triu = np.triu_indices(netinfo['netshape'][0], k=1)
netin = netin[triu[0], triu[1], :]
netin = netin.transpose()
if sign == 'both':
net_sorted = np.argsort(np.abs(netin), axis=-1)
elif sign == 'pos':
net_sorted = np.argsort(netin, axis=-1)
elif sign == 'neg':
net_sorted = np.argsort(-1*netin, axis=-1)
else:
raise ValueError('Unknown value for parameter: sign')
# Predefine
netout = np.zeros(netinfo['netshape'])
if axis == 'time':
# These for loops can probabaly be removed for speed
for i in range(netinfo['netshape'][0]):
for j in range(netinfo['netshape'][1]):
netout[i, j, net_sorted[i, j, -
int(round(net_sorted.shape[-1])*level):]] = 1
elif axis == 'graphlet':
netout_tmp = np.zeros(netin.shape)
for i in range(netout_tmp.shape[0]):
netout_tmp[i, net_sorted[i, -
int(round(net_sorted.shape[-1])*level):]] = 1
netout_tmp = netout_tmp.transpose()
netout[triu[0], triu[1], :] = netout_tmp
netout[triu[1], triu[0], :] = netout_tmp
netout = set_diagonal(netout, 0)
# If input is contact, output contact
if netinfo['inputtype'] == 'C':
netinfo['nettype'] = 'b' + netinfo['nettype'][1]
netout = graphlet2contact(netout, netinfo)
netout.pop('inputtype')
netout.pop('values')
netout['diagonal'] = 0
return netout | 0.002304 |
def begin(self, service_endpoint):
"""Create an AuthRequest object for the specified
service_endpoint. This method will create an association if
necessary."""
if self.store is None:
assoc = None
else:
assoc = self._getAssociation(service_endpoint)
request = AuthRequest(service_endpoint, assoc)
request.return_to_args[self.openid1_nonce_query_arg_name] = mkNonce()
if request.message.isOpenID1():
request.return_to_args[self.openid1_return_to_identifier_name] = \
request.endpoint.claimed_id
return request | 0.00316 |
def scan_videopath(videopath, callback, recursive=False):
"""
Scan the videopath string for video files.
:param videopath: Path object
:param callback: Instance of ProgressCallback
:param recursive: True if the scanning should happen recursive
:return: tuple with list of videos and list of subtitles (videos have matched subtitles)
"""
log.debug('scan_videopath(videopath="{videopath}", recursive={recursive})'.format(
videopath=videopath, recursive=recursive))
if not videopath.exists():
log.debug('"{videopath}" does not exist'.format(videopath=videopath))
raise IllegalPathException(path=videopath)
if videopath.is_dir():
log.debug('"{videopath}" is a directory'.format(videopath=videopath))
return __scan_folder(videopath, callback=callback, recursive=recursive)
elif videopath.is_file():
log.debug('"{videopath}" is a file'.format(videopath=videopath))
videopath_dir = videopath.parent
[all_subs, _] = filter_files_extensions(videopath_dir.iterdir(), [SUBTITLES_EXT, VIDEOS_EXT])
[_, video] = filter_files_extensions([videopath], [SUBTITLES_EXT, VIDEOS_EXT])
sub_videos = [all_subs, video]
path_subvideos = {videopath_dir: sub_videos}
return merge_path_subvideo(path_subvideos, callback)
else:
log.debug('"{videopath}" is of unknown type'.format(videopath=videopath))
return [], [] | 0.004147 |
def striptags(self):
r"""Unescape markup into an unicode string and strip all tags. This
also resolves known HTML4 and XHTML entities. Whitespace is
normalized to one:
>>> Markup("Main » <em>About</em>").striptags()
u'Main \xbb About'
"""
stripped = u' '.join(_striptags_re.sub('', self).split())
return Markup(stripped).unescape() | 0.004938 |
def _set_siteinfo(self):
"""
capture API sitematrix data in data attribute
"""
data = self._load_response('siteinfo').get('query')
mostviewed = data.get('mostviewed')
self.data['mostviewed'] = []
for item in mostviewed[1:]:
if item['ns'] == 0:
self.data['mostviewed'].append(item)
general = data.get('general')
self.params.update({'title': general.get('sitename')})
self.params.update({'lang': general.get('lang')})
self.data['site'] = general.get('wikiid')
info = {}
for item in general:
ginfo = general.get(item)
if ginfo:
info[item] = ginfo
self.data['info'] = info
siteviews = data.get('siteviews')
if siteviews:
values = [x for x in siteviews.values() if x]
if values:
self.data['siteviews'] = int(sum(values) / len(values))
else:
self.data['siteviews'] = 0
stats = data.get('statistics')
for item in stats:
self.data[item] = stats[item] | 0.001754 |
def compose(self, to, subject, text):
"""Login required. Sends POST to send a message to a user. Returns True or raises :class:`exceptions.UnexpectedResponse` if non-"truthy" value in response.
URL: ``http://www.reddit.com/api/compose/``
:param to: username or :class`things.Account` of user to send to
:param subject: subject of message
:param text: message body text
"""
if isinstance(to, Account):
to = to.name
data = dict(to=to, subject=subject, text=text)
j = self.post('api', 'compose', data=data)
return assert_truthy(j) | 0.007825 |
def list(self, request, *args, **kwargs):
"""
To get a list of price list items, run **GET** against */api/merged-price-list-items/*
as authenticated user.
If service is not specified default price list items are displayed.
Otherwise service specific price list items are displayed.
In this case rendered object contains {"is_manually_input": true}
In order to specify service pass query parameters:
- service_type (Azure, OpenStack etc.)
- service_uuid
Example URL: http://example.com/api/merged-price-list-items/?service_type=Azure&service_uuid=cb658b491f3644a092dd223e894319be
"""
return super(MergedPriceListItemViewSet, self).list(request, *args, **kwargs) | 0.006579 |
def parse_theta2_report (self, fh):
""" Parse the final THetA2 log file. """
parsed_data = {}
for l in fh:
if l.startswith('#'):
continue
else:
s = l.split("\t")
purities = s[1].split(',')
parsed_data['proportion_germline'] = float(purities[0]) * 100.0
for i, v in enumerate(purities[1:]):
if i <= 5:
parsed_data['proportion_tumour_{}'.format(i+1)] = float(v) * 100.0
else:
parsed_data['proportion_tumour_gt5'] = (float(v) * 100.0) + parsed_data.get('proportion_tumour_gt5', 0)
break
return parsed_data | 0.008086 |
def get_login_button_url(self, button_color=None, caption_color=None, button_size=None):
"""Return URL for image used for RunKeeper Login button.
@param button_color: Button color. Either 'blue', 'grey' or 'black'.
Default: 'blue'.
@param caption_color: Button text color. Either 'white' or 'black'.
Default: 'white'
@param button_size: Button width in pixels. Either 200, 300 or 600.
Default: 200
@return: URL for Login Button Image.
"""
if not button_color in settings.LOGIN_BUTTON_COLORS:
button_color = settings.LOGIN_BUTTON_COLORS[0]
if not caption_color in settings.LOGIN_BUTTON_CAPTION_COLORS:
caption_color = settings.LOGIN_BUTTON_CAPTION_COLORS[0]
if settings.LOGIN_BUTTON_SIZES.has_key(button_size):
button_size = settings.LOGIN_BUTTON_SIZES[button_size]
else:
button_size = settings.LOGIN_BUTTON_SIZES['None']
return settings.LOGIN_BUTTON_URL % (button_color,
caption_color,
button_size) | 0.008013 |
def get_annotations(self, min_rho=None):
'''
Get the list of annotations found.
:param min_rho: if set, only get entities with a rho-score (confidence) higher than this.
'''
return (a for a in self.annotations if min_rho is None or a.score > min_rho) | 0.013793 |
def stationary_coefficients(self, j, coeff_type='ma'):
"""
Wold representation moving average or VAR coefficients for the
steady state Kalman filter.
Parameters
----------
j : int
The lag length
coeff_type : string, either 'ma' or 'var' (default='ma')
The type of coefficent sequence to compute. Either 'ma' for
moving average or 'var' for VAR.
"""
# == simplify notation == #
A, G = self.ss.A, self.ss.G
K_infinity = self.K_infinity
# == compute and return coefficients == #
coeffs = []
i = 1
if coeff_type == 'ma':
coeffs.append(np.identity(self.ss.k))
P_mat = A
P = np.identity(self.ss.n) # Create a copy
elif coeff_type == 'var':
coeffs.append(dot(G, K_infinity))
P_mat = A - dot(K_infinity, G)
P = np.copy(P_mat) # Create a copy
else:
raise ValueError("Unknown coefficient type")
while i <= j:
coeffs.append(dot(dot(G, P), K_infinity))
P = dot(P, P_mat)
i += 1
return coeffs | 0.001679 |
def notifyPop(self, queue, length = 1):
'''
Internal notify for sub-queues been poped
:returns: List of any events generated by this pop
'''
self.totalSize = self.totalSize - length
ret1 = []
ret2 = []
if self.isWaited and self.canAppend():
self.isWaited = False
ret1.append(QueueCanWriteEvent(self))
if self.isWaitEmpty and not self:
self.isWaitEmpty = False
ret2.append(QueueIsEmptyEvent(self))
if self.parent is not None:
pr = self.parent.notifyPop(self, length)
ret1 += pr[0]
ret2 += pr[1]
newblocked = not self.canPop()
if newblocked != self.blocked:
self.blocked = newblocked
if self.parent is not None:
self.parent.notifyBlock(self, newblocked)
return (ret1, ret2) | 0.007701 |
def actuator_on(self, service_location_id, actuator_id, duration=None):
"""
Turn actuator on
Parameters
----------
service_location_id : int
actuator_id : int
duration : int, optional
300,900,1800 or 3600 , specifying the time in seconds the actuator
should be turned on. Any other value results in turning on for an
undetermined period of time.
Returns
-------
requests.Response
"""
return self._actuator_on_off(
on_off='on', service_location_id=service_location_id,
actuator_id=actuator_id, duration=duration) | 0.00299 |
def overfit(self, tau=None, plot=True, clobber=False, w=9, **kwargs):
r"""
Compute the masked & unmasked overfitting metrics for the light curve.
This routine injects a transit model given by `tau` at every cadence
in the light curve and recovers the transit depth when (1) leaving
the transit unmasked and (2) masking the transit prior to performing
regression.
:param tau: A function or callable that accepts two arguments, \
`time` and `t0`, and returns an array corresponding to a \
zero-mean, unit depth transit model centered at \
`t0` and evaluated at `time`. \
The easiest way to provide this is to use an instance of \
:py:class:`everest.transit.TransitShape`. Default is \
:py:class:`everest.transit.TransitShape(dur=0.1)`, a transit \
with solar-like limb darkening and a duratio of 0.1 days.
:param bool plot: Plot the results as a PDF? Default :py:obj:`True`
:param bool clobber: Overwrite the results if present? Default \
:py:obj:`False`
:param int w: The size of the masking window in cadences for \
computing the masked overfitting metric. Default `9` \
(about 4.5 hours for `K2` long cadence).
:returns: An instance of `everest.basecamp.Overfitting`.
"""
fname = os.path.join(self.dir, self.name + '_overfit.npz')
figname = os.path.join(self.dir, self.name)
# Compute
if not os.path.exists(fname) or clobber:
# Baseline
med = np.nanmedian(self.fraw)
# Default transit model
if tau is None:
tau = TransitShape(dur=0.1)
# The overfitting metrics
O1 = [None for brkpt in self.breakpoints]
O2 = [None for brkpt in self.breakpoints]
O3 = [None for brkpt in self.breakpoints]
O4 = [None for brkpt in self.breakpoints]
O5 = [None for brkpt in self.breakpoints]
# Loop over all chunks
for b, brkpt in enumerate(self.breakpoints):
# Masks for current chunk
m = self.get_masked_chunk(b, pad=False)
time = self.time[m]
ferr = self.fraw_err[m] / med
y = self.fraw[m] / med - 1
# The metrics we're computing here
O1[b] = np.zeros(len(y)) * np.nan
O2[b] = np.zeros(len(y)) * np.nan
O3[b] = np.zeros(len(y)) * np.nan
O4[b] = np.zeros(len(y)) * np.nan
O5[b] = np.zeros(len(y)) * np.nan
# Compute the astrophysical covariance and its inverse
log.info("Computing the covariance...")
if self.kernel == 'Basic':
wh, am, ta = self.kernel_params
wh /= med
am /= med
kernel_params = [wh, am, ta]
elif self.kernel == 'QuasiPeriodic':
wh, am, ga, pe = self.kernel_params
wh /= med
am /= med
kernel_params = [wh, am, ga, pe]
K = GetCovariance(self.kernel, kernel_params, time, ferr)
Kinv = cho_solve((cholesky(K), False), np.eye(len(time)))
# Loop over all orders
log.info("Computing some large matrices...")
X = [None for n in range(self.pld_order)]
XL = [None for n in range(self.pld_order)]
XLX = [None for n in range(self.pld_order)]
for n in range(self.pld_order):
if (self.lam_idx >= n) and (self.lam[b][n] is not None):
X[n] = self.X(n, m, **kwargs)
XL[n] = (self.lam[b][n] / med ** 2) * X[n]
XLX[n] = np.dot(XL[n], X[n].T)
X = np.hstack(X)
XL = np.hstack(XL)
XLX = np.sum(XLX, axis=0)
# The full covariance
C = XLX + K
# The unmasked linear problem
log.info("Solving the unmasked linear problem...")
m = np.dot(XLX, np.linalg.solve(C, y))
m -= np.nanmedian(m)
f = y - m
R = np.linalg.solve(C, XLX.T).T
# The masked linear problem
log.info("Solving the masked linear problem...")
A = MaskSolve(C, y, w=w)
# Now loop through and compute the metric
log.info("Computing the overfitting metrics...")
for n in prange(len(y)):
#
# *** Unmasked overfitting metric ***
#
# Evaluate the sparse transit model
TAU = tau(time, t0=time[n])
i = np.where(TAU < 0)[0]
TAU = TAU.reshape(-1, 1)
# Fast sparse algebra
AA = np.dot(np.dot(TAU[i].T, Kinv[i, :][:, i]), TAU[i])
BB = np.dot(TAU[i].T, Kinv[i, :])
CC = TAU - np.dot(R[:, i], TAU[i])
O1[b][n] = AA
O2[b][n] = np.dot(BB, CC)
O3[b][n] = np.dot(BB, f)
O4[b][n] = np.dot(BB, y)
#
# *** Masked overfitting metric ***
#
# The current mask and mask centerpoint
mask = np.arange(n, n + w)
j = n + (w + 1) // 2 - 1
if j >= len(y) - w:
continue
# The regularized design matrix
# This is the same as
# XLmX[:, n - 1] = \
# np.dot(XL, np.delete(X, mask, axis=0).T)[:, n - 1]
if n == 0:
XLmX = np.dot(XL, np.delete(X, mask, axis=0).T)
else:
XLmX[:, n - 1] = np.dot(XL, X[n - 1, :].T)
# The linear solution to this step
m = np.dot(XLmX, A[n])
# Evaluate the sparse transit model
TAU = tau(time, t0=time[j])
i = np.where(TAU < 0)[0]
TAU = TAU[i].reshape(-1, 1)
# Dot the transit model in
den = np.dot(np.dot(TAU.T, Kinv[i, :][:, i]), TAU)
num = np.dot(TAU.T, Kinv[i, :])
# Compute the overfitting metric
# Divide this number by a depth
# to get the overfitting for that
# particular depth.
O5[b][j] = -np.dot(num, y - m) / den
# Save!
np.savez(fname, O1=O1, O2=O2, O3=O3, O4=O4, O5=O5)
else:
data = np.load(fname)
O1 = data['O1']
O2 = data['O2']
O3 = data['O3']
O4 = data['O4']
O5 = data['O5']
# Plot
if plot and (clobber or not os.path.exists(figname + '_overfit.pdf')):
log.info("Plotting the overfitting metrics...")
# Masked time array
time = self.apply_mask(self.time)
# Plot the final corrected light curve
ovr = OVERFIT()
self.plot_info(ovr)
# Loop over the two metrics
for kind, axes, axesh in zip(['unmasked', 'masked'],
[ovr.axes1, ovr.axes2],
[ovr.axes1h, ovr.axes2h]):
# Loop over three depths
for depth, ax, axh in zip([0.01, 0.001, 0.0001], axes, axesh):
# Get the metric
if kind == 'unmasked':
metric = 1 - (np.hstack(O2) +
np.hstack(O3) / depth) / np.hstack(O1)
color = 'r'
elif kind == 'masked':
metric = np.hstack(O5) / depth
color = 'b'
else:
raise ValueError("Invalid metric.")
# Median and median absolute deviation
med = np.nanmedian(metric)
mad = np.nanmedian(np.abs(metric - med))
# Plot the metric as a function of time
ax.plot(time, metric, 'k.', alpha=0.5, ms=2)
ax.plot(time, metric, 'k-', alpha=0.1, lw=0.5)
ylim = (-0.2, 1.0)
ax.margins(0, None)
ax.axhline(0, color='k', lw=1, alpha=0.5)
ax.set_ylim(*ylim)
if kind == 'masked' and depth == 0.0001:
ax.set_xlabel('Time (days)', fontsize=14)
else:
ax.set_xticklabels([])
# Plot the histogram
rng = (max(ylim[0], np.nanmin(metric)),
min(ylim[1], np.nanmax(metric)))
axh.hist(metric, bins=30, range=rng,
orientation="horizontal",
histtype="step", fill=False, color='k')
axh.axhline(med, color=color, ls='-', lw=1)
axh.axhspan(med - mad, med + mad, color=color, alpha=0.1)
axh.axhline(0, color='k', lw=1, alpha=0.5)
axh.yaxis.tick_right()
axh.set_ylim(*ax.get_ylim())
axh.set_xticklabels([])
bbox = dict(fc="w", ec="1", alpha=0.5)
info = r"$\mathrm{med}=%.3f$" % med + \
"\n" + r"$\mathrm{mad}=%.3f$" % mad
axh.annotate(info, xy=(0.1, 0.925),
xycoords='axes fraction',
ha="left", va="top", bbox=bbox, color=color)
bbox = dict(fc="w", ec="1", alpha=0.95)
ax.annotate("%s overfitting metric" % kind,
xy=(1-0.035, 0.92),
xycoords='axes fraction',
ha='right', va='top',
bbox=bbox, color=color)
pl.figtext(0.025, 0.77, "depth = 0.01", rotation=90,
ha='left', va='center', fontsize=18)
pl.figtext(0.025, 0.48, "depth = 0.001", rotation=90,
ha='left', va='center', fontsize=18)
pl.figtext(0.025, 0.19, "depth = 0.0001", rotation=90,
ha='left', va='center', fontsize=18)
ovr.fig.savefig(figname + '_overfit.pdf')
log.info("Saved plot to %s_overfit.pdf" % figname)
pl.close()
return Overfitting(O1, O2, O3, O4, O5, figname + '_overfit.pdf') | 0.00018 |
def run(self, scenario):
"""Run the algorithm, utilizing a classifier set to choose the
most appropriate action for each situation produced by the
scenario. Improve the situation/action mapping on each reward
cycle to maximize reward. Return the classifier set that was
created.
Usage:
scenario = MUXProblem()
model = algorithm.run(scenario)
Arguments:
scenario: A Scenario instance.
Return:
A new classifier set, trained on the given scenario.
"""
assert isinstance(scenario, scenarios.Scenario)
model = self.new_model(scenario)
model.run(scenario, learn=True)
return model | 0.002747 |
def __presence_unavailable(self,stanza):
"""Process an unavailable presence from a MUC room.
:Parameters:
- `stanza`: the stanza received.
:Types:
- `stanza`: `Presence`
:return: `True` if the stanza was properly recognized as generated by
one of the managed rooms, `False` otherwise.
:returntype: `bool`"""
fr=stanza.get_from()
key=fr.bare().as_unicode()
rs=self.rooms.get(key)
if not rs:
return False
rs.process_unavailable_presence(MucPresence(stanza))
return True | 0.009917 |
async def create(
cls, fabric: Union[Fabric, int], vid: int, *,
name: str = None, description: str = None, mtu: int = None,
relay_vlan: Union[Vlan, int] = None, dhcp_on: bool = False,
primary_rack: Union[RackController, str] = None,
secondary_rack: Union[RackController, str] = None,
space: Union[Space, int] = None):
"""
Create a `Vlan` in MAAS.
:param fabric: Fabric to create the VLAN on.
:type fabric: `Fabric` or `int`
:param vid: VID for the VLAN.
:type vid: `int`
:param name: The name of the VLAN (optional).
:type name: `str`
:param description: A description of the VLAN (optional).
:type description: `str`
:param mtu: The MTU for VLAN (optional, default of 1500 will be used).
:type mtu: `int`
:param relay_vlan: VLAN to relay this VLAN through.
:type relay_vlan: `Vlan` or `int`
:param dhcp_on: True turns the DHCP on, false keeps the DHCP off. True
requires that `primary_rack` is also set.
:type dhcp_on: `bool`
:param primary_rack: Primary rack controller to run the DCHP
service on.
:type primary_rack: `RackController` or `int`
:parma secondary_rack: Secondary rack controller to run the DHCP
service on. This will enable HA operation of the DHCP service.
:type secondary_rack: `RackController` or `int`
:returns: The created VLAN.
:rtype: `Vlan`
"""
params = {}
if isinstance(fabric, int):
params['fabric_id'] = fabric
elif isinstance(fabric, Fabric):
params['fabric_id'] = fabric.id
else:
raise TypeError(
"fabric must be Fabric or int, not %s" % (
type(fabric).__class__))
params['vid'] = vid
if name is not None:
params['name'] = name
if description is not None:
params['description'] = description
if mtu is not None:
params['mtu'] = mtu
if relay_vlan is not None:
if isinstance(relay_vlan, int):
params['relay_vlan'] = relay_vlan
elif isinstance(relay_vlan, Vlan):
params['relay_vlan'] = relay_vlan.id
else:
raise TypeError(
"relay_vlan must be Vlan or int, not %s" % (
type(relay_vlan).__class__))
params['dhcp_on'] = dhcp_on
if primary_rack is not None:
if isinstance(primary_rack, str):
params['primary_rack'] = primary_rack
elif isinstance(primary_rack, RackController):
params['primary_rack'] = primary_rack.system_id
else:
raise TypeError(
"primary_rack must be RackController or str, not %s" % (
type(primary_rack).__class__))
if secondary_rack is not None:
if isinstance(secondary_rack, str):
params['secondary_rack'] = secondary_rack
elif isinstance(secondary_rack, RackController):
params['secondary_rack'] = secondary_rack.system_id
else:
raise TypeError(
"secondary_rack must be RackController or str, not %s" % (
type(secondary_rack).__class__))
if space is not None:
if isinstance(space, int):
params['space'] = space
elif isinstance(space, Space):
params['space'] = space.id
else:
raise TypeError(
"space must be Space or int, not %s" % (
type(space).__class__))
return cls._object(await cls._handler.create(**params)) | 0.000515 |
def assert_no_text(self, *args, **kwargs):
"""
Asserts that the page or current node doesn't have the given text content, ignoring any
HTML tags.
Args:
*args: Variable length argument list for :class:`TextQuery`.
**kwargs: Arbitrary keyword arguments for :class:`TextQuery`.
Returns:
True
Raises:
ExpectationNotMet: If the assertion hasn't succeeded during the wait time.
"""
query = TextQuery(*args, **kwargs)
@self.synchronize(wait=query.wait)
def assert_no_text():
count = query.resolve_for(self)
if matches_count(count, query.options) and (
count > 0 or expects_none(query.options)):
raise ExpectationNotMet(query.negative_failure_message)
return True
return assert_no_text() | 0.004474 |
def _get_password(params):
"""Get the password for a database connection from :mod:`keyring`
Args:
params (dict): database configuration, as defined in :mod:`ozelot.config`
Returns:
str: password
"""
user_name = params['user']
service_name = params['host'] + ':' + params['driver']
return keyring.get_password(service_name=service_name,
username=user_name) | 0.006329 |
def create_organisation(self, organisation_json):
'''
Create an Organisation object from a JSON object
Returns:
Organisation: The organisation from the given `organisation_json`.
'''
return trolly.organisation.Organisation(
trello_client=self,
organisation_id=organisation_json['id'],
name=organisation_json['name'],
data=organisation_json,
) | 0.004435 |
def free_parameter(self, name, par, free=True):
"""Free/Fix a parameter of a source by name.
Parameters
----------
name : str
Source name.
par : str
Parameter name.
"""
name = self.get_source_name(name)
if par in self._lck_params.get(name, []):
return
idx = self.like.par_index(name, par)
self.like[idx].setFree(free)
self._sync_params(name) | 0.004283 |
def get_task_cache(self, username, courseid, taskid):
"""
Shorthand for get_task_caches([username], courseid, taskid)[username]
"""
return self.get_task_caches([username], courseid, taskid)[username] | 0.008658 |
def set_cursor(self, col, row):
"""Move the cursor to an explicit column and row position."""
# Clamp row to the last row of the display.
if row > self._lines:
row = self._lines - 1
# Set location.
self.write8(LCD_SETDDRAMADDR | (col + LCD_ROW_OFFSETS[row])) | 0.006452 |
def get(self):
""" get method """
try:
cluster = self.get_argument_cluster()
role = self.get_argument_role()
environ = self.get_argument_environ()
topology_name = self.get_argument_topology()
component = self.get_argument_component()
metric_names = self.get_required_arguments_metricnames()
start_time = self.get_argument_starttime()
end_time = self.get_argument_endtime()
self.validateInterval(start_time, end_time)
instances = self.get_arguments(constants.PARAM_INSTANCE)
topology = self.tracker.getTopologyByClusterRoleEnvironAndName(
cluster, role, environ, topology_name)
metrics = yield tornado.gen.Task(metricstimeline.getMetricsTimeline,
topology.tmaster, component, metric_names,
instances, int(start_time), int(end_time))
self.write_success_response(metrics)
except Exception as e:
Log.debug(traceback.format_exc())
self.write_error_response(e) | 0.017225 |
def evaluate(data_loader):
"""Evaluate given the data loader
Parameters
----------
data_loader : DataLoader
Returns
-------
avg_loss : float
Average loss
real_translation_out : list of list of str
The translation output
"""
translation_out = []
all_inst_ids = []
avg_loss_denom = 0
avg_loss = 0.0
for _, (src_seq, tgt_seq, src_valid_length, tgt_valid_length, inst_ids) \
in enumerate(data_loader):
src_seq = src_seq.as_in_context(ctx)
tgt_seq = tgt_seq.as_in_context(ctx)
src_valid_length = src_valid_length.as_in_context(ctx)
tgt_valid_length = tgt_valid_length.as_in_context(ctx)
# Calculating Loss
out, _ = model(src_seq, tgt_seq[:, :-1], src_valid_length, tgt_valid_length - 1)
loss = loss_function(out, tgt_seq[:, 1:], tgt_valid_length - 1).mean().asscalar()
all_inst_ids.extend(inst_ids.asnumpy().astype(np.int32).tolist())
avg_loss += loss * (tgt_seq.shape[1] - 1)
avg_loss_denom += (tgt_seq.shape[1] - 1)
# Translate
samples, _, sample_valid_length =\
translator.translate(src_seq=src_seq, src_valid_length=src_valid_length)
max_score_sample = samples[:, 0, :].asnumpy()
sample_valid_length = sample_valid_length[:, 0].asnumpy()
for i in range(max_score_sample.shape[0]):
translation_out.append(
[tgt_vocab.idx_to_token[ele] for ele in
max_score_sample[i][1:(sample_valid_length[i] - 1)]])
avg_loss = avg_loss / avg_loss_denom
real_translation_out = [None for _ in range(len(all_inst_ids))]
for ind, sentence in zip(all_inst_ids, translation_out):
real_translation_out[ind] = sentence
return avg_loss, real_translation_out | 0.002201 |
def indices2one_hot(indices, nb_classes):
"""
Convert an iterable of indices to one-hot encoded list.
You might also be interested in sklearn.preprocessing.OneHotEncoder
Parameters
----------
indices : iterable
iterable of indices
nb_classes : int
Number of classes
dtype : type
Returns
-------
one_hot : list
Examples
--------
>>> indices2one_hot([0, 1, 1], 3)
[[1, 0, 0], [0, 1, 0], [0, 1, 0]]
>>> indices2one_hot([0, 1, 1], 2)
[[1, 0], [0, 1], [0, 1]]
"""
if nb_classes < 1:
raise ValueError('nb_classes={}, but positive number expected'
.format(nb_classes))
one_hot = []
for index in indices:
one_hot.append([0] * nb_classes)
one_hot[-1][index] = 1
return one_hot | 0.001212 |
def get_unique_constraint_declaration_sql(self, name, index):
"""
Obtains DBMS specific SQL code portion needed to set a unique
constraint declaration to be used in statements like CREATE TABLE.
:param name: The name of the unique constraint.
:type name: str
:param index: The index definition
:type index: Index
:return: DBMS specific SQL code portion needed to set a constraint.
:rtype: str
"""
columns = index.get_quoted_columns(self)
name = Identifier(name)
if not columns:
raise DBALException('Incomplete definition. "columns" required.')
return "CONSTRAINT %s UNIQUE (%s)%s" % (
name.get_quoted_name(self),
self.get_index_field_declaration_list_sql(columns),
self.get_partial_index_sql(index),
) | 0.002288 |
def confidence_intervals(self, X, width=.95, quantiles=None):
"""estimate confidence intervals for the model.
Parameters
----------
X : array-like of shape (n_samples, m_features)
Input data matrix
width : float on [0,1], optional
quantiles : array-like of floats in (0, 1), optional
Instead of specifying the prediciton width, one can specify the
quantiles. So ``width=.95`` is equivalent to ``quantiles=[.025, .975]``
Returns
-------
intervals: np.array of shape (n_samples, 2 or len(quantiles))
Notes
-----
Wood 2006, section 4.9
Confidence intervals based on section 4.8 rely on large sample results to deal with
non-Gaussian distributions, and treat the smoothing parameters as fixed, when in
reality they are estimated from the data.
"""
if not self._is_fitted:
raise AttributeError('GAM has not been fitted. Call fit first.')
X = check_X(X, n_feats=self.statistics_['m_features'],
edge_knots=self.edge_knots_, dtypes=self.dtype,
features=self.feature, verbose=self.verbose)
return self._get_quantiles(X, width, quantiles, prediction=False) | 0.003834 |
def get_draft_page_by_id(self, page_id, status='draft'):
"""
Provide content by id with status = draft
:param page_id:
:param status:
:return:
"""
url = 'rest/api/content/{page_id}?status={status}'.format(page_id=page_id, status=status)
return self.get(url) | 0.009346 |