repo_name
stringlengths 6
100
| path
stringlengths 4
294
| copies
stringlengths 1
5
| size
stringlengths 4
6
| content
stringlengths 606
896k
| license
stringclasses 15
values |
---|---|---|---|---|---|
Argon-Zhou/django | django/conf/locale/de/formats.py | 504 | 1100 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j. F Y'
TIME_FORMAT = 'H:i'
DATETIME_FORMAT = 'j. F Y H:i'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'j. F'
SHORT_DATE_FORMAT = 'd.m.Y'
SHORT_DATETIME_FORMAT = 'd.m.Y H:i'
FIRST_DAY_OF_WEEK = 1 # Monday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = [
'%d.%m.%Y', '%d.%m.%y', # '25.10.2006', '25.10.06'
# '%d. %B %Y', '%d. %b. %Y', # '25. October 2006', '25. Oct. 2006'
]
DATETIME_INPUT_FORMATS = [
'%d.%m.%Y %H:%M:%S', # '25.10.2006 14:30:59'
'%d.%m.%Y %H:%M:%S.%f', # '25.10.2006 14:30:59.000200'
'%d.%m.%Y %H:%M', # '25.10.2006 14:30'
'%d.%m.%Y', # '25.10.2006'
]
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '.'
NUMBER_GROUPING = 3
| bsd-3-clause |
batermj/algorithm-challenger | code-analysis/programming_anguage/python/source_codes/Python3.5.9/Python-3.5.9/Lib/distutils/tests/test_install_headers.py | 147 | 1264 | """Tests for distutils.command.install_headers."""
import sys
import os
import unittest
import getpass
from distutils.command.install_headers import install_headers
from distutils.tests import support
from test.support import run_unittest
class InstallHeadersTestCase(support.TempdirManager,
support.LoggingSilencer,
support.EnvironGuard,
unittest.TestCase):
def test_simple_run(self):
# we have two headers
header_list = self.mkdtemp()
header1 = os.path.join(header_list, 'header1')
header2 = os.path.join(header_list, 'header2')
self.write_file(header1)
self.write_file(header2)
headers = [header1, header2]
pkg_dir, dist = self.create_dist(headers=headers)
cmd = install_headers(dist)
self.assertEqual(cmd.get_inputs(), headers)
# let's run the command
cmd.install_dir = os.path.join(pkg_dir, 'inst')
cmd.ensure_finalized()
cmd.run()
# let's check the results
self.assertEqual(len(cmd.get_outputs()), 2)
def test_suite():
return unittest.makeSuite(InstallHeadersTestCase)
if __name__ == "__main__":
run_unittest(test_suite())
| apache-2.0 |
Medigate/cutiuta-server | cutiuta-server/env/lib/python3.4/site-packages/pip/_vendor/lockfile/linklockfile.py | 466 | 2649 | from __future__ import absolute_import
import time
import os
from . import (LockBase, LockFailed, NotLocked, NotMyLock, LockTimeout,
AlreadyLocked)
class LinkLockFile(LockBase):
"""Lock access to a file using atomic property of link(2).
>>> lock = LinkLockFile('somefile')
>>> lock = LinkLockFile('somefile', threaded=False)
"""
def acquire(self, timeout=None):
try:
open(self.unique_name, "wb").close()
except IOError:
raise LockFailed("failed to create %s" % self.unique_name)
timeout = timeout is not None and timeout or self.timeout
end_time = time.time()
if timeout is not None and timeout > 0:
end_time += timeout
while True:
# Try and create a hard link to it.
try:
os.link(self.unique_name, self.lock_file)
except OSError:
# Link creation failed. Maybe we've double-locked?
nlinks = os.stat(self.unique_name).st_nlink
if nlinks == 2:
# The original link plus the one I created == 2. We're
# good to go.
return
else:
# Otherwise the lock creation failed.
if timeout is not None and time.time() > end_time:
os.unlink(self.unique_name)
if timeout > 0:
raise LockTimeout("Timeout waiting to acquire"
" lock for %s" %
self.path)
else:
raise AlreadyLocked("%s is already locked" %
self.path)
time.sleep(timeout is not None and timeout/10 or 0.1)
else:
# Link creation succeeded. We're good to go.
return
def release(self):
if not self.is_locked():
raise NotLocked("%s is not locked" % self.path)
elif not os.path.exists(self.unique_name):
raise NotMyLock("%s is locked, but not by me" % self.path)
os.unlink(self.unique_name)
os.unlink(self.lock_file)
def is_locked(self):
return os.path.exists(self.lock_file)
def i_am_locking(self):
return (self.is_locked() and
os.path.exists(self.unique_name) and
os.stat(self.unique_name).st_nlink == 2)
def break_lock(self):
if os.path.exists(self.lock_file):
os.unlink(self.lock_file)
| gpl-3.0 |
Aasmi/scikit-learn | sklearn/feature_selection/variance_threshold.py | 238 | 2594 | # Author: Lars Buitinck <L.J.Buitinck@uva.nl>
# License: 3-clause BSD
import numpy as np
from ..base import BaseEstimator
from .base import SelectorMixin
from ..utils import check_array
from ..utils.sparsefuncs import mean_variance_axis
from ..utils.validation import check_is_fitted
class VarianceThreshold(BaseEstimator, SelectorMixin):
"""Feature selector that removes all low-variance features.
This feature selection algorithm looks only at the features (X), not the
desired outputs (y), and can thus be used for unsupervised learning.
Read more in the :ref:`User Guide <variance_threshold>`.
Parameters
----------
threshold : float, optional
Features with a training-set variance lower than this threshold will
be removed. The default is to keep all features with non-zero variance,
i.e. remove the features that have the same value in all samples.
Attributes
----------
variances_ : array, shape (n_features,)
Variances of individual features.
Examples
--------
The following dataset has integer features, two of which are the same
in every sample. These are removed with the default setting for threshold::
>>> X = [[0, 2, 0, 3], [0, 1, 4, 3], [0, 1, 1, 3]]
>>> selector = VarianceThreshold()
>>> selector.fit_transform(X)
array([[2, 0],
[1, 4],
[1, 1]])
"""
def __init__(self, threshold=0.):
self.threshold = threshold
def fit(self, X, y=None):
"""Learn empirical variances from X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Sample vectors from which to compute variances.
y : any
Ignored. This parameter exists only for compatibility with
sklearn.pipeline.Pipeline.
Returns
-------
self
"""
X = check_array(X, ('csr', 'csc'), dtype=np.float64)
if hasattr(X, "toarray"): # sparse matrix
_, self.variances_ = mean_variance_axis(X, axis=0)
else:
self.variances_ = np.var(X, axis=0)
if np.all(self.variances_ <= self.threshold):
msg = "No feature in X meets the variance threshold {0:.5f}"
if X.shape[0] == 1:
msg += " (X contains only one sample)"
raise ValueError(msg.format(self.threshold))
return self
def _get_support_mask(self):
check_is_fitted(self, 'variances_')
return self.variances_ > self.threshold
| bsd-3-clause |
XENON1T/pax | pax/trigger_plugins/FindSignals.py | 1 | 10320 | import numpy as np
import numba
from pax.trigger import TriggerPlugin
from pax.datastructure import TriggerSignal
from pax.dsputils import adc_to_pe
# Interrupts thrown by the signal finder
# Negative, since positive numbers indicate number of signals found during normal operation
SIGNAL_BUFFER_FULL = -1
SAVE_DARK_MONITOR_DATA = -2
class FindSignals(TriggerPlugin):
# How often did we save the dark rate since the last full (coincidence matrix) save?
dark_monitor_saves = 0
def startup(self):
# Initialize buffer for numba signal finding routine.
# Although we're able to extend this buffer as needed, we must do so outside numba
# and it involves copyping data, so if you pick too small a buffer size you will hurt performance.
self.numba_signals_buffer = np.zeros(self.config['numba_signal_buffer_size'],
dtype=TriggerSignal.get_dtype())
# Initialize buffers for tallying pulses / coincidences
# Reason for +1 is again 'ghost' channels, see trigger.py
n_channels = self.trigger.pax_config['DEFAULT']['n_channels'] + 1
self.all_pulses_tally = np.zeros(n_channels, dtype=np.int)
self.lone_pulses_tally = np.zeros(n_channels, dtype=np.int)
self.coincidence_tally = np.zeros((n_channels, n_channels), dtype=np.int)
# Get conversion factor from ADC counts to pe for each pmt
# The 'ghost' PMT will have gain 1 always
self.gain_conversion_factors = np.array([adc_to_pe(self.trigger.pax_config['DEFAULT'], ch)
for ch in range(n_channels - 1)] +
[1])
# We must keep track of the next time to save the dark rate between batches, since a batch usually does not
# end exactly at a save time.
self.next_save_time = None
def process(self, data):
if self.next_save_time is None:
self.next_save_time = self.config['dark_rate_save_interval']
if len(data.pulses):
self.next_save_time += data.pulses['time'][0]
sigf = signal_finder(times=data.pulses,
signal_separation=self.config['signal_separation'],
signal_buffer=self.numba_signals_buffer,
next_save_time=self.next_save_time,
dark_rate_save_interval=self.config['dark_rate_save_interval'],
all_pulses_tally=self.all_pulses_tally,
lone_pulses_tally=self.lone_pulses_tally,
coincidence_tally=self.coincidence_tally,
gain_conversion_factors=self.gain_conversion_factors,
)
saved_buffers = []
for result in sigf:
if result >= 0:
n_signals_found = result
if len(saved_buffers):
self.log.debug("%d previous signal buffers were saved, concatenating and returning them." % (
len(saved_buffers)))
saved_buffers.append(self.numba_signals_buffer[:n_signals_found])
signals = np.concatenate(saved_buffers)
else:
signals = self.numba_signals_buffer[:n_signals_found]
break
elif result == SIGNAL_BUFFER_FULL:
self.log.debug("Signal buffer is full, copying it out.")
saved_buffers.append(self.numba_signals_buffer.copy())
elif result == SAVE_DARK_MONITOR_DATA:
self.save_dark_monitor_data()
self.next_save_time += self.config['dark_rate_save_interval']
else:
raise ValueError("Unknown signal finder interrupt %d!" % result)
if data.last_data:
self.save_dark_monitor_data(last_time=True)
self.log.debug("Signal finder finished on this data increment, found %d signals." % len(signals))
data.signals = signals
def save_dark_monitor_data(self, last_time=False):
# Save the PMT dark rate
self.log.debug("Saving pulse rate: %d pulses (of which %d lone pulses)" % (
self.all_pulses_tally.sum(), self.lone_pulses_tally.sum()))
self.trigger.save_monitor_data('count_of_all_pulses', self.all_pulses_tally)
self.all_pulses_tally *= 0
self.trigger.save_monitor_data('count_of_lone_pulses', self.lone_pulses_tally)
self.lone_pulses_tally *= 0
self.dark_monitor_saves += 1
if last_time or self.dark_monitor_saves == self.config['dark_monitor_full_save_every']:
# Save the full coincidence rate
self.log.debug("Saving coincidence tally matrix, total %d" % self.coincidence_tally.sum())
self.trigger.save_monitor_data('count_of_2pmt_coincidences', self.coincidence_tally)
self.dark_monitor_saves = 0
self.coincidence_tally *= 0
def signal_finder(times, signal_separation,
signal_buffer,
next_save_time, dark_rate_save_interval,
all_pulses_tally, lone_pulses_tally, coincidence_tally,
gain_conversion_factors):
"""Fill signal_buffer with signals in times. Other arguments:
- signal_separation: group pulses into signals separated by signal_separation.
- coincidence_tally: nxn matrix of zero where n is number of channels,used to store 2-pmt coincidences
(with 1-pmt, i.e. dark rate, on diagonal)
- next_save_time: next time (in ns since start of run) the dark rate should be saved
- dark_rate_save_interval: yield SAVE_DARK_MONITOR every dark_rate_save_interval
Raises "interrupts" (yield numbers) to communicate with caller.
Online RMS algorithm is Knuth/Welford: https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance
"""
# Allocate memory for some internal buffers (which we can't do in numba) we don't need outside the signal finder
n_channels = len(all_pulses_tally) # Actually this is 1 more than the number of connected channels, see above
does_channel_contribute = np.zeros(n_channels, dtype=np.int8) # Bool gives weird errors
area_per_channel = np.zeros(n_channels, dtype=np.float64)
return _signal_finder(times, signal_separation,
signal_buffer,
next_save_time, dark_rate_save_interval,
all_pulses_tally, lone_pulses_tally, coincidence_tally,
gain_conversion_factors,
area_per_channel, does_channel_contribute)
@numba.jit()
def _signal_finder(times, signal_separation,
signal_buffer,
next_save_time, dark_rate_save_interval,
all_pulses_tally, lone_pulses_tally, coincidence_tally,
gain_conversion_factors,
area_per_channel, does_channel_contribute):
"""Numba backend for signal_finder: please see its docstring instead."""
in_signal = False
passes_test = False # Does the current time pass the signal inclusion test?
current_signal = 0 # Index of the current signal in the signal buffer
m2 = 0.0 # Temporary variable for online RMS computation
if not len(times):
yield 0 # no point looking for events. Communicate no events found, then exit.
return
for time_index, _time in enumerate(times):
t = _time.time
pmt = _time.pmt
area = _time.area * gain_conversion_factors[pmt]
# Save the dark rate. Notice it's WHILE t >= next_save_time, which ensures we save a lot of zeroes when there is
# a large gap in the data. Let's hope nobody tries to pass t = float('inf')...
while t >= next_save_time:
yield SAVE_DARK_MONITOR_DATA
next_save_time += dark_rate_save_interval
is_last_time = time_index == len(times) - 1
if not is_last_time:
# Should this time be in a signal? === Is the next time close enough?
passes_test = times[time_index+1].time - t < signal_separation
if not in_signal and passes_test:
# Start a signal. We must clear all attributes first to remove (potential) old stuff from the buffer.
in_signal = True
s = signal_buffer[current_signal]
s.left_time = t
s.right_time = 0
s.time_mean = 0
s.time_rms = 0
s.n_pulses = 0
s.n_contributing_channels = 0
s.area = 0
area_per_channel *= 0
does_channel_contribute *= 0
if in_signal: # Notice if, not elif. Work on first time in signal too.
# Update signal quantities
s = signal_buffer[current_signal]
area_per_channel[pmt] += area
does_channel_contribute[pmt] = True
s.n_pulses += 1
delta = t - s.time_mean
s.time_mean += delta / s.n_pulses
m2 += delta * (t - s.time_mean) # Notice this isn't delta**2: time_mean changed on the previous line!
if not passes_test or is_last_time:
# Signal has ended: store its quantities and move on
s.right_time = t
s.time_rms = (m2 / s.n_pulses)**0.5
s.n_contributing_channels = does_channel_contribute.sum()
s.area = area_per_channel.sum()
if s.n_contributing_channels == 2:
indices = np.nonzero(does_channel_contribute)[0]
coincidence_tally[indices[0], indices[1]] += 1
current_signal += 1
m2 = 0
in_signal = False
if current_signal == len(signal_buffer):
yield SIGNAL_BUFFER_FULL
# Caller will have copied out the signal buffer, we can start from 0 again
current_signal = 0
else:
lone_pulses_tally[pmt] += 1
all_pulses_tally[pmt] += 1
# Let caller know number of signals found, then raise StopIteration
yield current_signal
| bsd-3-clause |
oblique-labs/pyVM | rpython/rtyper/normalizecalls.py | 1 | 16966 | from rpython.annotator import model as annmodel, description
from rpython.flowspace.argument import Signature
from rpython.flowspace.model import (Variable, Constant, Block, Link,
checkgraph, FunctionGraph, SpaceOperation)
from rpython.rlib.objectmodel import ComputedIntSymbolic
from rpython.rtyper.error import TyperError
from rpython.rtyper.rmodel import getgcflavor
from rpython.tool.sourcetools import valid_identifier
from rpython.annotator.classdesc import ClassDesc
def normalize_call_familes(annotator):
for callfamily in annotator.bookkeeper.pbc_maximal_call_families.infos():
if not callfamily.modified:
assert callfamily.normalized
continue
normalize_calltable(annotator, callfamily)
callfamily.normalized = True
callfamily.modified = False
def normalize_calltable(annotator, callfamily):
"""Try to normalize all rows of a table."""
nshapes = len(callfamily.calltables)
for shape, table in callfamily.calltables.items():
for row in table:
did_something = normalize_calltable_row_signature(annotator, shape,
row)
if did_something:
assert not callfamily.normalized, "change in call family normalisation"
if nshapes != 1:
raise_call_table_too_complex_error(callfamily, annotator)
while True:
progress = False
for shape, table in callfamily.calltables.items():
for row in table:
progress |= normalize_calltable_row_annotation(annotator,
row.values())
if not progress:
return # done
assert not callfamily.normalized, "change in call family normalisation"
def raise_call_table_too_complex_error(callfamily, annotator):
msg = []
items = callfamily.calltables.items()
for i, (shape1, table1) in enumerate(items):
for shape2, table2 in items[i + 1:]:
if shape1 == shape2:
continue
row1 = table1[0]
row2 = table2[0]
problematic_function_graphs = set(row1.values()).union(set(row2.values()))
pfg = [str(graph) for graph in problematic_function_graphs]
pfg.sort()
msg.append("the following functions:")
msg.append(" %s" % ("\n ".join(pfg), ))
msg.append("are called with inconsistent numbers of arguments")
msg.append("(and/or the argument names are different, which is"
" not supported in this case)")
if shape1[0] != shape2[0]:
msg.append("sometimes with %s arguments, sometimes with %s" % (shape1[0], shape2[0]))
else:
pass # XXX better message in this case
callers = []
msg.append("the callers of these functions are:")
for tag, (caller, callee) in annotator.translator.callgraph.iteritems():
if callee not in problematic_function_graphs:
continue
if str(caller) in callers:
continue
callers.append(str(caller))
callers.sort()
for caller in callers:
msg.append(" %s" % (caller, ))
raise TyperError("\n".join(msg))
def normalize_calltable_row_signature(annotator, shape, row):
graphs = row.values()
assert graphs, "no graph??"
sig0 = graphs[0].signature
defaults0 = graphs[0].defaults
for graph in graphs[1:]:
if graph.signature != sig0:
break
if graph.defaults != defaults0:
break
else:
return False # nothing to do, all signatures already match
shape_cnt, shape_keys, shape_star = shape
assert not shape_star, "should have been removed at this stage"
# for the first 'shape_cnt' arguments we need to generalize to
# a common type
call_nbargs = shape_cnt + len(shape_keys)
did_something = False
for graph in graphs:
argnames, varargname, kwargname = graph.signature
assert not varargname, "XXX not implemented"
assert not kwargname, "XXX not implemented" # ?
inputargs_s = [annotator.binding(v) for v in graph.getargs()]
argorder = range(shape_cnt)
for key in shape_keys:
i = list(argnames).index(key)
assert i not in argorder
argorder.append(i)
need_reordering = (argorder != range(call_nbargs))
if need_reordering or len(graph.getargs()) != call_nbargs:
oldblock = graph.startblock
inlist = []
defaults = graph.defaults or ()
num_nondefaults = len(inputargs_s) - len(defaults)
defaults = [description.NODEFAULT] * num_nondefaults + list(defaults)
newdefaults = []
for j in argorder:
v = Variable(graph.getargs()[j])
annotator.setbinding(v, inputargs_s[j])
inlist.append(v)
newdefaults.append(defaults[j])
newblock = Block(inlist)
# prepare the output args of newblock:
# 1. collect the positional arguments
outlist = inlist[:shape_cnt]
# 2. add defaults and keywords
for j in range(shape_cnt, len(inputargs_s)):
try:
i = argorder.index(j)
v = inlist[i]
except ValueError:
default = defaults[j]
if default is description.NODEFAULT:
raise TyperError(
"call pattern has %d positional arguments, "
"but %r takes at least %d arguments" % (
shape_cnt, graph.name, num_nondefaults))
v = Constant(default)
outlist.append(v)
newblock.closeblock(Link(outlist, oldblock))
graph.startblock = newblock
for i in range(len(newdefaults)-1,-1,-1):
if newdefaults[i] is description.NODEFAULT:
newdefaults = newdefaults[i:]
break
graph.defaults = tuple(newdefaults)
graph.signature = Signature([argnames[j] for j in argorder],
None, None)
# finished
checkgraph(graph)
annotator.annotated[newblock] = annotator.annotated[oldblock]
did_something = True
return did_something
def normalize_calltable_row_annotation(annotator, graphs):
if len(graphs) <= 1:
return False # nothing to do
graph_bindings = {}
for graph in graphs:
graph_bindings[graph] = [annotator.binding(v)
for v in graph.getargs()]
iterbindings = graph_bindings.itervalues()
nbargs = len(iterbindings.next())
for binding in iterbindings:
assert len(binding) == nbargs
generalizedargs = []
for i in range(nbargs):
args_s = []
for graph, bindings in graph_bindings.items():
args_s.append(bindings[i])
s_value = annmodel.unionof(*args_s)
generalizedargs.append(s_value)
result_s = [annotator.binding(graph.getreturnvar())
for graph in graph_bindings]
generalizedresult = annmodel.unionof(*result_s)
conversion = False
for graph in graphs:
bindings = graph_bindings[graph]
need_conversion = (generalizedargs != bindings)
if need_conversion:
conversion = True
oldblock = graph.startblock
inlist = []
for j, s_value in enumerate(generalizedargs):
v = Variable(graph.getargs()[j])
annotator.setbinding(v, s_value)
inlist.append(v)
newblock = Block(inlist)
# prepare the output args of newblock and link
outlist = inlist[:]
newblock.closeblock(Link(outlist, oldblock))
graph.startblock = newblock
# finished
checkgraph(graph)
annotator.annotated[newblock] = annotator.annotated[oldblock]
# convert the return value too
if annotator.binding(graph.getreturnvar()) != generalizedresult:
conversion = True
annotator.setbinding(graph.getreturnvar(), generalizedresult)
return conversion
# ____________________________________________________________
def merge_classpbc_getattr_into_classdef(annotator):
# code like 'some_class.attr' will record an attribute access in the
# PBC access set of the family of classes of 'some_class'. If the classes
# have corresponding ClassDefs, they are not updated by the annotator.
# We have to do it now.
all_families = annotator.bookkeeper.classpbc_attr_families
for attrname, access_sets in all_families.items():
for access_set in access_sets.infos():
descs = access_set.descs
if len(descs) <= 1:
continue
if not isinstance(descs.iterkeys().next(), ClassDesc):
continue
classdefs = [desc.getuniqueclassdef() for desc in descs]
commonbase = classdefs[0]
for cdef in classdefs[1:]:
commonbase = commonbase.commonbase(cdef)
if commonbase is None:
raise TyperError("reading attribute %r: no common base "
"class for %r" % (attrname, descs.keys()))
extra_access_sets = commonbase.extra_access_sets
if commonbase.repr is not None:
assert access_set in extra_access_sets # minimal sanity check
continue
access_set.commonbase = commonbase
if access_set not in extra_access_sets:
counter = len(extra_access_sets)
extra_access_sets[access_set] = attrname, counter
# ____________________________________________________________
def create_class_constructors(annotator):
bk = annotator.bookkeeper
call_families = bk.pbc_maximal_call_families
for family in call_families.infos():
if len(family.descs) <= 1:
continue
descs = family.descs.keys()
if not isinstance(descs[0], ClassDesc):
continue
# Note that if classes are in the same callfamily, their __init__
# attribute must be in the same attrfamily as well.
change = descs[0].mergeattrfamilies(descs[1:], '__init__')
if hasattr(descs[0].getuniqueclassdef(), 'my_instantiate_graph'):
assert not change, "after the fact change to a family of classes" # minimal sanity check
continue
# Put __init__ into the attr family, for ClassesPBCRepr.call()
attrfamily = descs[0].getattrfamily('__init__')
inits_s = [desc.s_read_attribute('__init__') for desc in descs]
s_value = annmodel.unionof(attrfamily.s_value, *inits_s)
attrfamily.s_value = s_value
# ClassesPBCRepr.call() will also need instantiate() support
for desc in descs:
bk.needs_generic_instantiate[desc.getuniqueclassdef()] = True
# ____________________________________________________________
def create_instantiate_functions(annotator):
# build the 'instantiate() -> instance of C' functions for the vtables
needs_generic_instantiate = annotator.bookkeeper.needs_generic_instantiate
for classdef in needs_generic_instantiate:
assert getgcflavor(classdef) == 'gc' # only gc-case
create_instantiate_function(annotator, classdef)
def create_instantiate_function(annotator, classdef):
# build the graph of a function that looks like
#
# def my_instantiate():
# return instantiate(cls)
#
if hasattr(classdef, 'my_instantiate_graph'):
return
v = Variable()
block = Block([])
block.operations.append(SpaceOperation('instantiate1', [], v))
name = valid_identifier('instantiate_' + classdef.name)
graph = FunctionGraph(name, block)
block.closeblock(Link([v], graph.returnblock))
annotator.setbinding(v, annmodel.SomeInstance(classdef))
annotator.annotated[block] = graph
# force the result to be converted to a generic OBJECTPTR
generalizedresult = annmodel.SomeInstance(classdef=None)
annotator.setbinding(graph.getreturnvar(), generalizedresult)
classdef.my_instantiate_graph = graph
annotator.translator.graphs.append(graph)
# ____________________________________________________________
class TooLateForNewSubclass(Exception):
pass
class TotalOrderSymbolic(ComputedIntSymbolic):
def __init__(self, orderwitness, peers):
self.orderwitness = orderwitness
self.peers = peers
self.value = None
self._with_subclasses = None # unknown
peers.append(self)
def __cmp__(self, other):
if not isinstance(other, TotalOrderSymbolic):
return cmp(self.compute_fn(), other)
else:
return cmp(self.orderwitness, other.orderwitness)
# support for implementing int_between: (a<=b<c) with (b-a<c-a)
# see rpython.jit.metainterp.pyjitpl.opimpl_int_between
def __sub__(self, other):
return self.compute_fn() - other
def __rsub__(self, other):
return other - self.compute_fn()
def check_any_subclass_in_peer_list(self, i):
# check if the next peer, in order, is or not the end
# marker for this start marker
assert self.peers[i] is self
return self.peers[i + 1].orderwitness != self.orderwitness + [MAX]
def number_with_subclasses(self):
# Return True or False depending on whether this is the
# subclassrange_min corresponding to a class which has subclasses
# or not. If this is called and returns False, then adding later
# new subclasses will crash in compute_fn().
if self._with_subclasses is None: # unknown so far
self.peers.sort()
i = self.peers.index(self)
self._with_subclasses = self.check_any_subclass_in_peer_list(i)
return self._with_subclasses
def compute_fn(self):
if self.value is None:
self.peers.sort()
for i, peer in enumerate(self.peers):
assert peer.value is None or peer.value == i
peer.value = i
#
if peer._with_subclasses is False:
if peer.check_any_subclass_in_peer_list(i):
raise TooLateForNewSubclass
#
assert self.value is not None
return self.value
def dump(self, annotator): # for debugging
self.peers.sort()
mapping = {}
for classdef in annotator.bookkeeper.classdefs:
if hasattr(classdef, '_unique_cdef_id'):
mapping[classdef._unique_cdef_id] = classdef
for peer in self.peers:
if peer is self:
print '==>',
else:
print ' ',
print 'value %4s --' % (peer.value,), peer.orderwitness,
if peer.orderwitness[-1] in mapping:
print mapping[peer.orderwitness[-1]]
else:
print
def assign_inheritance_ids(annotator):
# we sort the classes by lexicographic order of reversed(mro),
# which gives a nice depth-first order. The classes are turned
# into numbers in order to (1) help determinism, (2) ensure that
# new hierarchies of classes with no common base classes can be
# added later and get higher numbers.
bk = annotator.bookkeeper
try:
lst = bk._inheritance_id_symbolics
except AttributeError:
lst = bk._inheritance_id_symbolics = []
for classdef in annotator.bookkeeper.classdefs:
if not hasattr(classdef, 'minid'):
witness = [get_unique_cdef_id(cdef) for cdef in classdef.getmro()]
witness.reverse()
classdef.minid = TotalOrderSymbolic(witness, lst)
classdef.maxid = TotalOrderSymbolic(witness + [MAX], lst)
MAX = 1E100
_cdef_id_counter = 0
def get_unique_cdef_id(cdef):
global _cdef_id_counter
try:
return cdef._unique_cdef_id
except AttributeError:
cdef._unique_cdef_id = _cdef_id_counter
_cdef_id_counter += 1
return cdef._unique_cdef_id
# ____________________________________________________________
def perform_normalizations(annotator):
create_class_constructors(annotator)
annotator.frozen += 1
try:
normalize_call_familes(annotator)
merge_classpbc_getattr_into_classdef(annotator)
assign_inheritance_ids(annotator)
finally:
annotator.frozen -= 1
create_instantiate_functions(annotator)
| mit |
kleientertainment/ds_mod_tools | pkg/win32/Python27/Lib/CGIHTTPServer.py | 2 | 13514 | """CGI-savvy HTTP Server.
This module builds on SimpleHTTPServer by implementing GET and POST
requests to cgi-bin scripts.
If the os.fork() function is not present (e.g. on Windows),
os.popen2() is used as a fallback, with slightly altered semantics; if
that function is not present either (e.g. on Macintosh), only Python
scripts are supported, and they are executed by the current process.
In all cases, the implementation is intentionally naive -- all
requests are executed sychronously.
SECURITY WARNING: DON'T USE THIS CODE UNLESS YOU ARE INSIDE A FIREWALL
-- it may execute arbitrary Python code or external programs.
Note that status code 200 is sent prior to execution of a CGI script, so
scripts cannot send other status codes such as 302 (redirect).
"""
__version__ = "0.4"
__all__ = ["CGIHTTPRequestHandler"]
import os
import sys
import urllib
import BaseHTTPServer
import SimpleHTTPServer
import select
import copy
class CGIHTTPRequestHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
"""Complete HTTP server with GET, HEAD and POST commands.
GET and HEAD also support running CGI scripts.
The POST command is *only* implemented for CGI scripts.
"""
# Determine platform specifics
have_fork = hasattr(os, 'fork')
have_popen2 = hasattr(os, 'popen2')
have_popen3 = hasattr(os, 'popen3')
# Make rfile unbuffered -- we need to read one line and then pass
# the rest to a subprocess, so we can't use buffered input.
rbufsize = 0
def do_POST(self):
"""Serve a POST request.
This is only implemented for CGI scripts.
"""
if self.is_cgi():
self.run_cgi()
else:
self.send_error(501, "Can only POST to CGI scripts")
def send_head(self):
"""Version of send_head that support CGI scripts"""
if self.is_cgi():
return self.run_cgi()
else:
return SimpleHTTPServer.SimpleHTTPRequestHandler.send_head(self)
def is_cgi(self):
"""Test whether self.path corresponds to a CGI script.
Returns True and updates the cgi_info attribute to the tuple
(dir, rest) if self.path requires running a CGI script.
Returns False otherwise.
If any exception is raised, the caller should assume that
self.path was rejected as invalid and act accordingly.
The default implementation tests whether the normalized url
path begins with one of the strings in self.cgi_directories
(and the next character is a '/' or the end of the string).
"""
collapsed_path = _url_collapse_path(self.path)
dir_sep = collapsed_path.find('/', 1)
head, tail = collapsed_path[:dir_sep], collapsed_path[dir_sep+1:]
if head in self.cgi_directories:
self.cgi_info = head, tail
return True
return False
cgi_directories = ['/cgi-bin', '/htbin']
def is_executable(self, path):
"""Test whether argument path is an executable file."""
return executable(path)
def is_python(self, path):
"""Test whether argument path is a Python script."""
head, tail = os.path.splitext(path)
return tail.lower() in (".py", ".pyw")
def run_cgi(self):
"""Execute a CGI script."""
path = self.path
dir, rest = self.cgi_info
i = path.find('/', len(dir) + 1)
while i >= 0:
nextdir = path[:i]
nextrest = path[i+1:]
scriptdir = self.translate_path(nextdir)
if os.path.isdir(scriptdir):
dir, rest = nextdir, nextrest
i = path.find('/', len(dir) + 1)
else:
break
# find an explicit query string, if present.
i = rest.rfind('?')
if i >= 0:
rest, query = rest[:i], rest[i+1:]
else:
query = ''
# dissect the part after the directory name into a script name &
# a possible additional path, to be stored in PATH_INFO.
i = rest.find('/')
if i >= 0:
script, rest = rest[:i], rest[i:]
else:
script, rest = rest, ''
scriptname = dir + '/' + script
scriptfile = self.translate_path(scriptname)
if not os.path.exists(scriptfile):
self.send_error(404, "No such CGI script (%r)" % scriptname)
return
if not os.path.isfile(scriptfile):
self.send_error(403, "CGI script is not a plain file (%r)" %
scriptname)
return
ispy = self.is_python(scriptname)
if not ispy:
if not (self.have_fork or self.have_popen2 or self.have_popen3):
self.send_error(403, "CGI script is not a Python script (%r)" %
scriptname)
return
if not self.is_executable(scriptfile):
self.send_error(403, "CGI script is not executable (%r)" %
scriptname)
return
# Reference: http://hoohoo.ncsa.uiuc.edu/cgi/env.html
# XXX Much of the following could be prepared ahead of time!
env = copy.deepcopy(os.environ)
env['SERVER_SOFTWARE'] = self.version_string()
env['SERVER_NAME'] = self.server.server_name
env['GATEWAY_INTERFACE'] = 'CGI/1.1'
env['SERVER_PROTOCOL'] = self.protocol_version
env['SERVER_PORT'] = str(self.server.server_port)
env['REQUEST_METHOD'] = self.command
uqrest = urllib.unquote(rest)
env['PATH_INFO'] = uqrest
env['PATH_TRANSLATED'] = self.translate_path(uqrest)
env['SCRIPT_NAME'] = scriptname
if query:
env['QUERY_STRING'] = query
host = self.address_string()
if host != self.client_address[0]:
env['REMOTE_HOST'] = host
env['REMOTE_ADDR'] = self.client_address[0]
authorization = self.headers.getheader("authorization")
if authorization:
authorization = authorization.split()
if len(authorization) == 2:
import base64, binascii
env['AUTH_TYPE'] = authorization[0]
if authorization[0].lower() == "basic":
try:
authorization = base64.decodestring(authorization[1])
except binascii.Error:
pass
else:
authorization = authorization.split(':')
if len(authorization) == 2:
env['REMOTE_USER'] = authorization[0]
# XXX REMOTE_IDENT
if self.headers.typeheader is None:
env['CONTENT_TYPE'] = self.headers.type
else:
env['CONTENT_TYPE'] = self.headers.typeheader
length = self.headers.getheader('content-length')
if length:
env['CONTENT_LENGTH'] = length
referer = self.headers.getheader('referer')
if referer:
env['HTTP_REFERER'] = referer
accept = []
for line in self.headers.getallmatchingheaders('accept'):
if line[:1] in "\t\n\r ":
accept.append(line.strip())
else:
accept = accept + line[7:].split(',')
env['HTTP_ACCEPT'] = ','.join(accept)
ua = self.headers.getheader('user-agent')
if ua:
env['HTTP_USER_AGENT'] = ua
co = filter(None, self.headers.getheaders('cookie'))
if co:
env['HTTP_COOKIE'] = ', '.join(co)
# XXX Other HTTP_* headers
# Since we're setting the env in the parent, provide empty
# values to override previously set values
for k in ('QUERY_STRING', 'REMOTE_HOST', 'CONTENT_LENGTH',
'HTTP_USER_AGENT', 'HTTP_COOKIE', 'HTTP_REFERER'):
env.setdefault(k, "")
self.send_response(200, "Script output follows")
decoded_query = query.replace('+', ' ')
if self.have_fork:
# Unix -- fork as we should
args = [script]
if '=' not in decoded_query:
args.append(decoded_query)
nobody = nobody_uid()
self.wfile.flush() # Always flush before forking
pid = os.fork()
if pid != 0:
# Parent
pid, sts = os.waitpid(pid, 0)
# throw away additional data [see bug #427345]
while select.select([self.rfile], [], [], 0)[0]:
if not self.rfile.read(1):
break
if sts:
self.log_error("CGI script exit status %#x", sts)
return
# Child
try:
try:
os.setuid(nobody)
except os.error:
pass
os.dup2(self.rfile.fileno(), 0)
os.dup2(self.wfile.fileno(), 1)
os.execve(scriptfile, args, env)
except:
self.server.handle_error(self.request, self.client_address)
os._exit(127)
else:
# Non Unix - use subprocess
import subprocess
cmdline = [scriptfile]
if self.is_python(scriptfile):
interp = sys.executable
if interp.lower().endswith("w.exe"):
# On Windows, use python.exe, not pythonw.exe
interp = interp[:-5] + interp[-4:]
cmdline = [interp, '-u'] + cmdline
if '=' not in query:
cmdline.append(query)
self.log_message("command: %s", subprocess.list2cmdline(cmdline))
try:
nbytes = int(length)
except (TypeError, ValueError):
nbytes = 0
p = subprocess.Popen(cmdline,
stdin = subprocess.PIPE,
stdout = subprocess.PIPE,
stderr = subprocess.PIPE,
env = env
)
if self.command.lower() == "post" and nbytes > 0:
data = self.rfile.read(nbytes)
else:
data = None
# throw away additional data [see bug #427345]
while select.select([self.rfile._sock], [], [], 0)[0]:
if not self.rfile._sock.recv(1):
break
stdout, stderr = p.communicate(data)
self.wfile.write(stdout)
if stderr:
self.log_error('%s', stderr)
p.stderr.close()
p.stdout.close()
status = p.returncode
if status:
self.log_error("CGI script exit status %#x", status)
else:
self.log_message("CGI script exited OK")
def _url_collapse_path(path):
"""
Given a URL path, remove extra '/'s and '.' path elements and collapse
any '..' references and returns a colllapsed path.
Implements something akin to RFC-2396 5.2 step 6 to parse relative paths.
The utility of this function is limited to is_cgi method and helps
preventing some security attacks.
Returns: A tuple of (head, tail) where tail is everything after the final /
and head is everything before it. Head will always start with a '/' and,
if it contains anything else, never have a trailing '/'.
Raises: IndexError if too many '..' occur within the path.
"""
# Similar to os.path.split(os.path.normpath(path)) but specific to URL
# path semantics rather than local operating system semantics.
path_parts = path.split('/')
head_parts = []
for part in path_parts[:-1]:
if part == '..':
head_parts.pop() # IndexError if more '..' than prior parts
elif part and part != '.':
head_parts.append( part )
if path_parts:
tail_part = path_parts.pop()
if tail_part:
if tail_part == '..':
head_parts.pop()
tail_part = ''
elif tail_part == '.':
tail_part = ''
else:
tail_part = ''
splitpath = ('/' + '/'.join(head_parts), tail_part)
collapsed_path = "/".join(splitpath)
return collapsed_path
nobody = None
def nobody_uid():
"""Internal routine to get nobody's uid"""
global nobody
if nobody:
return nobody
try:
import pwd
except ImportError:
return -1
try:
nobody = pwd.getpwnam('nobody')[2]
except KeyError:
nobody = 1 + max(map(lambda x: x[2], pwd.getpwall()))
return nobody
def executable(path):
"""Test for executable file."""
try:
st = os.stat(path)
except os.error:
return False
return st.st_mode & 0111 != 0
def test(HandlerClass = CGIHTTPRequestHandler,
ServerClass = BaseHTTPServer.HTTPServer):
SimpleHTTPServer.test(HandlerClass, ServerClass)
if __name__ == '__main__':
test()
| mit |
betatim/osf-cli | osfclient/__main__.py | 1 | 4648 | from __future__ import print_function
import sys
import six
import argparse
from textwrap import dedent
from .cli import clone, fetch, list_, remove, upload, init
from . import __version__
def main():
description = dedent("""
osf is a command-line program to up and download
files from osf.io.
These are common osf commands:
init Set up a .osfcli.config file
clone Copy all files from all storages of a project
fetch Fetch an individual file from a project
list List all files from all storages for a project
upload Upload a new file to an existing project
remove Remove a file from a project's storage
See 'osf <command> -h' to read about a specific command.
""")
parser = argparse.ArgumentParser(
description=description,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('-u', '--username', default=None,
help=('OSF username. Provide your password via '
'OSF_PASSWORD environment variable'))
parser.add_argument('-p', '--project', default=None,
help='OSF project ID')
parser.add_argument('-v', '--version', action='version',
version='%(prog)s {}'.format(__version__))
# dest=command stores the name of the command in a variable, this is
# used later on to retrieve the correct sub-parser
subparsers = parser.add_subparsers(dest='command')
# Clone project
clone_parser = subparsers.add_parser(
'clone', description=clone.__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
clone_parser.set_defaults(func=clone)
clone_parser.add_argument('output', help='Write files to this directory',
default=None, nargs='?')
def _add_subparser(name, description, aliases=[]):
options = {
'description': description,
'formatter_class': argparse.RawDescriptionHelpFormatter,
}
if six.PY3:
options['aliases'] = aliases
return subparsers.add_parser(name, **options)
init_parser = _add_subparser('init', init.__doc__)
init_parser.set_defaults(func=init)
# Fetch an individual file
fetch_parser = _add_subparser('fetch', fetch.__doc__)
fetch_parser.set_defaults(func=fetch)
fetch_parser.add_argument('-f', '--force',
help='Force overwriting of local file',
action='store_true')
fetch_parser.add_argument('remote', help='Remote path',
default=None)
fetch_parser.add_argument('local', help='Local path',
default=None, nargs='?')
# List all files in a project
list_parser = _add_subparser('list', list.__doc__, aliases=['ls'])
list_parser.set_defaults(func=list_)
# Upload a single file or a directory tree
upload_parser = _add_subparser('upload', upload.__doc__)
upload_parser.set_defaults(func=upload)
upload_parser.add_argument('-f', '--force',
help='Force overwriting of remote file',
action='store_true')
upload_parser.add_argument('-r', '--recursive',
help='Recursively upload entire directories',
action='store_true')
upload_parser.add_argument('source', help='Local file')
upload_parser.add_argument('destination', help='Remote file path')
# Remove a single file
remove_parser = _add_subparser('remove', remove.__doc__, aliases=['rm'])
remove_parser.set_defaults(func=remove)
remove_parser.add_argument('target', help='Remote file path')
# Python2 argparse exits with an error when no command is given
if six.PY2 and len(sys.argv) == 1:
parser.print_help()
return
args = parser.parse_args()
if 'func' in args:
# give functions a chance to influence the exit code
# this setup is so we can print usage for the sub command
# even if there was an error further down
try:
exit_code = args.func(args)
except SystemExit as e:
exit_code = e.code
if exit_code is not None:
sub_parser = subparsers.choices[args.command]
sub_parser.print_usage(file=sys.stderr)
print('{} {}: error:'.format(parser.prog, args.command),
file=sys.stderr, end=' ')
sys.exit(exit_code)
else:
parser.print_help()
if __name__ == "__main__":
main()
| bsd-3-clause |
culot/homebooking | storage.py | 1 | 15856 | import logging as log
import sqlite3
class Database():
"Class to manage interactions with database"
def __init__(self):
self.connection = sqlite3.connect('book.db')
self.connection.row_factory = sqlite3.Row
try:
self.sanity_checks()
except Exception:
self.create_schema()
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.connection.close()
def sanity_checks(self):
cursor = self.connection.cursor()
cursor.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='BOOKINGS'")
if cursor.fetchone() == None:
log.info('Missing database schema, creating it')
raise RuntimeError('Missing schema')
def create_schema(self):
cursor = self.connection.cursor()
cursor.executescript('''
CREATE TABLE ROOMS (ROOM_ID INTEGER PRIMARY KEY,
NAME TEXT UNIQUE NOT NULL);
CREATE UNIQUE INDEX IDX_ROOMS ON ROOMS(NAME);
CREATE TABLE GUESTS (GUEST_ID INTEGER PRIMARY KEY,
NICKNAME TEXT UNIQUE NOT NULL,
FIRST_NAME TEXT,
LAST_NAME TEXT);
CREATE UNIQUE INDEX IDX_GUESTS ON GUESTS(NICKNAME);
CREATE TABLE FEATURES (FEATURE_ID INTEGER PRIMARY KEY,
NAME TEXT UNIQUE NOT NULL,
DESC TEXT);
CREATE UNIQUE INDEX IDX_FEATURES ON FEATURES(NAME);
CREATE TABLE BEDS (BED_ID INTEGER PRIMARY KEY,
NAME TEXT UNIQUE NOT NULL,
CAPACITY INTEGER NOT NULL,
FEATURE_ID INTEGER,
ROOM_ID INTEGER,
FOREIGN KEY(FEATURE_ID) REFERENCES FEATURES(FEATURE_ID),
FOREIGN KEY(ROOM_ID) REFERENCES ROOMS(ROOM_ID));
CREATE UNIQUE INDEX IDX_BEDS ON BEDS(NAME);
CREATE TABLE BOOKINGS (BOOKING_ID INTEGER PRIMARY KEY,
GUEST_ID INTEGER NOT NULL,
BED_ID INTEGER NOT NULL,
DATE TEXT NOT NULL,
FOREIGN KEY(GUEST_ID) REFERENCES GUESTS(GUEST_ID),
FOREIGN KEY(BED_ID) REFERENCES BEDS(BED_ID));
CREATE UNIQUE INDEX IDX_BOOKINGS ON BOOKINGS(GUEST_ID, BED_ID, DATE);
''')
def add_room(self, name):
log.info('Adding room [%s] to the database', name)
cursor = self.connection.cursor()
cursor.execute("INSERT INTO ROOMS (NAME) VALUES (:ROOM_NAME)", {"ROOM_NAME": name})
self.connection.commit()
def add_feature(self, name, desc = None):
log.info('Adding feature [%s] to the database', name)
cursor = self.connection.cursor()
cursor.execute("INSERT INTO FEATURES (NAME, DESC) VALUES (:FEATURE_NAME,:FEATURE_DESC)",
{"FEATURE_NAME": name, "FEATURE_DESC": desc})
self.connection.commit()
def add_guest(self, nick, first_name = None, last_name = None):
log.info('Adding guest [%s] to the database', nick)
cursor = self.connection.cursor()
cursor.execute("INSERT INTO GUESTS (NICKNAME, FIRST_NAME, LAST_NAME) VALUES (:NICKNAME,:FIRST_NAME,:LAST_NAME)",
{"NICKNAME": nick, "FIRST_NAME": first_name, "LAST_NAME": last_name})
self.connection.commit()
def add_bed(self, name, capacity, room, feature = None):
log.info('Adding bed [%s] to the database', name)
# First check that the room and feature exists and fetch the corresponding ids
try:
room_id = self._get_room_id(room)
except ValueError as e:
print str(e)
log.warn(str(e))
exit(1)
if feature:
try:
feature_id = self._get_feature_id(feature)
except ValueError as e:
print str(e)
log.warn(str(e))
exit(1)
else:
feature_id = None
cursor = self.connection.cursor()
cursor.execute("INSERT INTO BEDS (NAME,CAPACITY,FEATURE_ID,ROOM_ID) VALUES (:NAME,:CAPACITY,:FEATURE,:ROOM)",{"NAME":name,"CAPACITY":capacity,"FEATURE":feature_id,"ROOM":room_id})
self.connection.commit()
def register(self, guest, bed, date):
log.info('Registering guest [%s] for bed [%s] on [%s]')
try:
guest_id = self._get_guest_id(guest)
except ValueError as e:
print str(e)
log.warn(str(e))
exit(1)
try:
bed_id = self._get_bed_id(bed)
except ValueError as e:
print str(e)
log.warn(str(e))
exit(1)
cursor = self.connection.cursor()
cursor.execute("INSERT INTO BOOKINGS (GUEST_ID,BED_ID,DATE) VALUES (:GUEST,:BED,:DATE)",{"GUEST":guest_id,"BED":bed_id,"DATE":date})
self.connection.commit()
def unregister(self, guest, bed, date):
log.info('Unregistering guest [%s] for bed [%s] on [%s]')
try:
booking_id = self._get_booking_id(guest, bed, date)
except ValueError as e:
print str(e)
log.warn(str(e))
exit(1)
cursor = self.connection.cursor()
cursor.execute("DELETE FROM BOOKINGS WHERE BOOKING_ID = :ID",{"ID":booking_id})
self.connection.commit()
def remove_bed(self, name):
log.info('Removing bed [%s] from the database', name)
cursor = self.connection.cursor()
try:
bed_id = self._get_bed_id(name)
except ValueError as e:
print str(e)
log.warn(str(e))
exit(1)
# Now check if bookings exist for this bed, in which case they must be removed first
cursor.execute("SELECT COUNT(*) AS NB_BOOKINGS FROM BOOKINGS WHERE BED_ID = :ID",{"ID":bed_id})
resultset = cursor.fetchone()
if resultset != None:
nb_bookings = resultset["NB_BOOKINGS"]
if nb_bookings != 0:
print "Some bookings exist for this bed, please remove them first!"
log.warn('Bookings registered for bed [%s], can\'t remove it', name)
exit(1)
cursor.execute("DELETE FROM BEDS WHERE BED_ID = :ID",{"ID":bed_id})
self.connection.commit()
def remove_feature(self, name):
log.info('Removing feature [%s] from the database', name)
cursor = self.connection.cursor()
try:
feature_id = self._get_feature_id(name)
except ValueError as e:
print str(e)
log.warn(str(e))
exit(1)
# Now check if beds have this feature, in which case they must be removed first
cursor.execute("SELECT COUNT(*) AS NB_BEDS FROM BEDS WHERE FEATURE_ID = :ID",{"ID":feature_id})
resultset = cursor.fetchone()
if resultset != None:
nb_beds = resultset["NB_BEDS"]
if nb_beds != 0:
print "Some beds are registered with this feature, please remove them first!"
log.warn('Beds registered with feature [%s], can\'t remove it', name)
exit(1)
cursor.execute("DELETE FROM FEATURES WHERE FEATURE_ID = :ID",{"ID":feature_id})
self.connection.commit()
def remove_guest(self, nickname):
log.info('Removing guest [%s] from the database', nickname)
cursor = self.connection.cursor()
try:
guest_id = self._get_guest_id(nickname)
except ValueError as e:
print str(e)
log.warn(str(e))
exit(1)
# Now check if bookings exist for this guest, in which case they must be removed first
cursor.execute("SELECT COUNT(*) AS NB_BOOKINGS FROM BOOKINGS WHERE GUEST_ID = :ID",{"ID":guest_id})
resultset = cursor.fetchone()
if resultset != None:
nb_bookings = resultset["NB_BOOKINGS"]
if nb_bookings != 0:
print "Some bookings exist for this guest, please remove them first!"
log.warn('Bookings registered for guest [%s], can\'t remove it', nickname)
exit(1)
cursor.execute("DELETE FROM GUESTS WHERE GUEST_ID = :ID",{"ID":guest_id})
self.connection.commit()
def remove_room(self, name):
log.info('Removing room [%s] from the database', name)
cursor = self.connection.cursor()
try:
room_id = self._get_room_id(name)
except ValueError as e:
print str(e)
log.warn(str(e))
exit(1)
# Now check if beds are found for this room, in which case they must be removed first
cursor.execute("SELECT COUNT(*) AS NB_BEDS FROM BEDS WHERE ROOM_ID = :ID",{"ID":room_id})
resultset = cursor.fetchone()
if resultset != None:
nb_beds = resultset["NB_BEDS"]
if nb_beds != 0:
print "Some beds are registered for this room, please remove them first!"
log.warn('Beds registered for room [%s], can\'t remove it', name)
exit(1)
cursor.execute("DELETE FROM ROOMS WHERE ROOM_ID = :ID",{"ID":room_id})
self.connection.commit()
def list_room(self, name):
log.info('Listing bookings for room [%s]', name)
try:
room_id = self._get_room_id(name)
except ValueError as e:
print str(e)
log.warn(str(e))
exit(1)
query = '''
SELECT GUESTS.NICKNAME, BEDS.NAME, BOOKINGS.DATE
FROM BOOKINGS
JOIN GUESTS ON (GUESTS.GUEST_ID = BOOKINGS.GUEST_ID)
JOIN BEDS ON (BEDS.BED_ID = BOOKINGS.BED_ID)
WHERE BEDS.BED_ID IN
(SELECT BED_ID FROM BEDS WHERE ROOM_ID = :ROOM_ID)
'''
cursor = self.connection.cursor()
cursor.execute(query,{"ROOM_ID": room_id})
rows = cursor.fetchall()
for row in rows:
print "Guest [%s], Bed [%s], Date [%s]" % (row["NICKNAME"], row["NAME"], row["DATE"])
def list_bed(self, name):
log.info('Listing bookings for bed [%s]', name)
try:
bed_id = self._get_bed_id(name)
except ValueError as e:
print str(e)
log.warn(str(e))
exit(1)
query = '''
SELECT GUESTS.NICKNAME, BEDS.NAME, BOOKINGS.DATE
FROM BOOKINGS
JOIN GUESTS ON (GUESTS.GUEST_ID = BOOKINGS.GUEST_ID)
JOIN BEDS ON (BEDS.BED_ID = BOOKINGS.BED_ID)
WHERE BEDS.BED_ID = :BED_ID
'''
cursor = self.connection.cursor()
cursor.execute(query,{"BED_ID": bed_id})
rows = cursor.fetchall()
for row in rows:
print "Guest [%s], Bed [%s], Date [%s]" % (row["NICKNAME"], row["NAME"], row["DATE"])
def list_guest(self, nick):
log.info('Listing bookings for guest [%s]', nick)
try:
guest_id = self._get_guest_id(nick)
except ValueError as e:
print str(e)
log.warn(str(e))
exit(1)
query = '''
SELECT GUESTS.NICKNAME, BEDS.NAME, BOOKINGS.DATE
FROM BOOKINGS
JOIN GUESTS ON (GUESTS.GUEST_ID = BOOKINGS.GUEST_ID)
JOIN BEDS ON (BEDS.BED_ID = BOOKINGS.BED_ID)
WHERE GUESTS.GUEST_ID = :GUEST_ID
'''
cursor = self.connection.cursor()
cursor.execute(query,{"GUEST_ID": guest_id})
rows = cursor.fetchall()
for row in rows:
print "Guest [%s], Bed [%s], Date [%s]" % (row["NICKNAME"], row["NAME"], row["DATE"])
def list_date(self, date):
log.info('Listing bookings for date [%s]', date)
query = '''
SELECT GUESTS.NICKNAME, BEDS.NAME, BOOKINGS.DATE
FROM BOOKINGS
JOIN GUESTS ON (GUESTS.GUEST_ID = BOOKINGS.GUEST_ID)
JOIN BEDS ON (BEDS.BED_ID = BOOKINGS.BED_ID)
WHERE DATE = :DATE
'''
cursor = self.connection.cursor()
cursor.execute(query,{"DATE": date})
rows = cursor.fetchall()
for row in rows:
print "Guest [%s], Bed [%s], Date [%s]" % (row["NICKNAME"], row["NAME"], row["DATE"])
def search_date(self, date):
log.info('Searching availabilities for [%s]', date)
query = '''
SELECT DISTINCT BEDS.NAME
FROM BEDS
WHERE NOT EXISTS
(SELECT * FROM BOOKINGS WHERE BED_ID = BEDS.BED_ID AND DATE = :DATE)
'''
cursor = self.connection.cursor()
cursor.execute(query,{"DATE": date})
rows = cursor.fetchall()
for row in rows:
print "Bed [%s]" % row["NAME"]
def show_entity(self, entity):
print "%s:" % entity
cursor = self.connection.cursor()
query = "select * from '%s'" % entity
cursor.execute(query)
rows = cursor.fetchall()
for row in rows:
print "\t",
print row
print "\n"
def stats_number(self):
log.info('Gathering database statistics')
cursor = self.connection.cursor()
cursor.execute("select name from sqlite_master where type='table'")
rows = cursor.fetchall()
for row in rows:
table = row["NAME"]
query = "select count(*) as NUM from '%s'" % table
cursor.execute(query)
count = cursor.fetchone()
print "%s | %d" % (table, count["NUM"])
def dump(self):
for line in self.connection.iterdump():
print "%s\n" % line
def _get_room_id(self, room_name):
cursor = self.connection.cursor()
cursor.execute("SELECT ROOM_ID FROM ROOMS WHERE NAME=:NAME",{"NAME": room_name})
resultset = cursor.fetchone()
if resultset == None:
raise ValueError('Room [%s] not found' % name)
return resultset["ROOM_ID"]
def _get_guest_id(self, guest_nick):
cursor = self.connection.cursor()
cursor.execute("SELECT GUEST_ID FROM GUESTS WHERE NICKNAME=:NAME",{"NAME": guest_nick})
resultset = cursor.fetchone()
if resultset == None:
raise ValueError('Guest [%s] not found' % guest_nick)
return resultset["GUEST_ID"]
def _get_bed_id(self, bed_name):
cursor = self.connection.cursor()
cursor.execute("SELECT BED_ID FROM BEDS WHERE NAME=:NAME",{"NAME": bed_name})
resultset = cursor.fetchone()
if resultset == None:
raise ValueError('Bed [%s] not found' % bed_name)
return resultset["BED_ID"]
def _get_feature_id(self, feature_name):
cursor = self.connection.cursor()
cursor.execute("SELECT FEATURE_ID FROM FEATURES WHERE NAME=:NAME",{"NAME": feature_name})
resultset = cursor.fetchone()
if resultset == None:
raise ValueError("Feature [%s] not found" % feature_name)
return resultset["FEATURE_ID"]
def _get_booking_id(self, guest, bed, date):
guest_id = self._get_guest_id(guest)
bed_id = self._get_bed_id(bed)
cursor = self.connection.cursor()
cursor.execute("SELECT BOOKING_ID FROM BOOKINGS WHERE GUEST_ID=:GUEST_ID AND BED_ID=:BED_ID AND DATE=:DATE",{"GUEST_ID":guest_id,"BED_ID":bed_id,"DATE":date})
resultset = cursor.fetchone()
if resultset == None:
raise ValueError("Booking for guest [%s] and bed [%s] on [%s] not found" % (guest,bed,date))
return resultset["BOOKING_ID"]
| bsd-3-clause |
rationalAgent/edx-platform-custom | cms/djangoapps/contentstore/views/requests.py | 2 | 1047 | from django.http import HttpResponse
from mitxmako.shortcuts import render_to_string, render_to_response
__all__ = ['edge', 'event', 'landing']
# points to the temporary course landing page with log in and sign up
def landing(request, org, course, coursename):
return render_to_response('temp-course-landing.html', {})
# points to the temporary edge page
def edge(request):
return render_to_response('university_profiles/edge.html', {})
def event(request):
'''
A noop to swallow the analytics call so that cms methods don't spook and poor developers looking at
console logs don't get distracted :-)
'''
return HttpResponse(status=204)
def render_from_lms(template_name, dictionary, context=None, namespace='main'):
"""
Render a template using the LMS MAKO_TEMPLATES
"""
return render_to_string(template_name, dictionary, context, namespace="lms." + namespace)
def _xmodule_recurse(item, action):
for child in item.get_children():
_xmodule_recurse(child, action)
action(item)
| agpl-3.0 |
eli261/jumpserver | apps/orgs/mixins/api.py | 1 | 1880 | # -*- coding: utf-8 -*-
#
from django.shortcuts import get_object_or_404
from rest_framework.viewsets import ModelViewSet
from rest_framework_bulk import BulkModelViewSet
from common.mixins import IDInCacheFilterMixin
from ..utils import set_to_root_org
from ..models import Organization
__all__ = [
'RootOrgViewMixin', 'OrgMembershipModelViewSetMixin', 'OrgModelViewSet',
'OrgBulkModelViewSet',
]
class RootOrgViewMixin:
def dispatch(self, request, *args, **kwargs):
set_to_root_org()
return super().dispatch(request, *args, **kwargs)
class OrgModelViewSet(IDInCacheFilterMixin, ModelViewSet):
def get_queryset(self):
return super().get_queryset().all()
class OrgBulkModelViewSet(IDInCacheFilterMixin, BulkModelViewSet):
def get_queryset(self):
queryset = super().get_queryset().all()
if hasattr(self, 'action') and self.action == 'list' and \
hasattr(self, 'serializer_class') and \
hasattr(self.serializer_class, 'setup_eager_loading'):
queryset = self.serializer_class.setup_eager_loading(queryset)
return queryset
def allow_bulk_destroy(self, qs, filtered):
return False
class OrgMembershipModelViewSetMixin:
org = None
membership_class = None
lookup_field = 'user'
lookup_url_kwarg = 'user_id'
http_method_names = ['get', 'post', 'delete', 'head', 'options']
def dispatch(self, request, *args, **kwargs):
self.org = get_object_or_404(Organization, pk=kwargs.get('org_id'))
return super().dispatch(request, *args, **kwargs)
def get_serializer_context(self):
context = super().get_serializer_context()
context['org'] = self.org
return context
def get_queryset(self):
queryset = self.membership_class.objects.filter(organization=self.org)
return queryset
| gpl-2.0 |
jsteemann/arangodb | 3rdParty/V8-4.3.61/third_party/python_26/Lib/lib-tk/tkColorChooser.py | 149 | 1786 | # tk common colour chooser dialogue
#
# this module provides an interface to the native color dialogue
# available in Tk 4.2 and newer.
#
# written by Fredrik Lundh, May 1997
#
# fixed initialcolor handling in August 1998
#
#
# options (all have default values):
#
# - initialcolor: colour to mark as selected when dialog is displayed
# (given as an RGB triplet or a Tk color string)
#
# - parent: which window to place the dialog on top of
#
# - title: dialog title
#
from tkCommonDialog import Dialog
#
# color chooser class
class Chooser(Dialog):
"Ask for a color"
command = "tk_chooseColor"
def _fixoptions(self):
try:
# make sure initialcolor is a tk color string
color = self.options["initialcolor"]
if isinstance(color, tuple):
# assume an RGB triplet
self.options["initialcolor"] = "#%02x%02x%02x" % color
except KeyError:
pass
def _fixresult(self, widget, result):
# result can be somethings: an empty tuple, an empty string or
# a Tcl_Obj, so this somewhat weird check handles that
if not result or not str(result):
return None, None # canceled
# to simplify application code, the color chooser returns
# an RGB tuple together with the Tk color string
r, g, b = widget.winfo_rgb(result)
return (r/256, g/256, b/256), str(result)
#
# convenience stuff
def askcolor(color = None, **options):
"Ask for a color"
if color:
options = options.copy()
options["initialcolor"] = color
return Chooser(**options).show()
# --------------------------------------------------------------------
# test stuff
if __name__ == "__main__":
print "color", askcolor()
| apache-2.0 |
CyanogenMod/android_external_chromium_org_third_party_skia | platform_tools/android/bin/download_utils.py | 149 | 8464 | #!/usr/bin/python
# Copyright (c) 2012 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A library to assist automatically downloading files.
This library is used by scripts that download tarballs, zipfiles, etc. as part
of the build process.
"""
import hashlib
import http_download
import os.path
import re
import shutil
import sys
import time
import urllib2
SOURCE_STAMP = 'SOURCE_URL'
HASH_STAMP = 'SOURCE_SHA1'
# Designed to handle more general inputs than sys.platform because the platform
# name may come from the command line.
PLATFORM_COLLAPSE = {
'windows': 'windows',
'win32': 'windows',
'cygwin': 'windows',
'linux': 'linux',
'linux2': 'linux',
'linux3': 'linux',
'darwin': 'mac',
'mac': 'mac',
}
ARCH_COLLAPSE = {
'i386' : 'x86',
'i686' : 'x86',
'x86_64': 'x86',
'armv7l': 'arm',
}
class HashError(Exception):
def __init__(self, download_url, expected_hash, actual_hash):
self.download_url = download_url
self.expected_hash = expected_hash
self.actual_hash = actual_hash
def __str__(self):
return 'Got hash "%s" but expected hash "%s" for "%s"' % (
self.actual_hash, self.expected_hash, self.download_url)
def PlatformName(name=None):
if name is None:
name = sys.platform
return PLATFORM_COLLAPSE[name]
def ArchName(name=None):
if name is None:
if PlatformName() == 'windows':
# TODO(pdox): Figure out how to auto-detect 32-bit vs 64-bit Windows.
name = 'i386'
else:
import platform
name = platform.machine()
return ARCH_COLLAPSE[name]
def EnsureFileCanBeWritten(filename):
directory = os.path.dirname(filename)
if not os.path.exists(directory):
os.makedirs(directory)
def WriteData(filename, data):
EnsureFileCanBeWritten(filename)
f = open(filename, 'wb')
f.write(data)
f.close()
def WriteDataFromStream(filename, stream, chunk_size, verbose=True):
EnsureFileCanBeWritten(filename)
dst = open(filename, 'wb')
try:
while True:
data = stream.read(chunk_size)
if len(data) == 0:
break
dst.write(data)
if verbose:
# Indicate that we're still writing.
sys.stdout.write('.')
sys.stdout.flush()
finally:
if verbose:
sys.stdout.write('\n')
dst.close()
def DoesStampMatch(stampfile, expected, index):
try:
f = open(stampfile, 'r')
stamp = f.read()
f.close()
if stamp.split('\n')[index] == expected:
return "already up-to-date."
elif stamp.startswith('manual'):
return "manual override."
return False
except IOError:
return False
def WriteStamp(stampfile, data):
EnsureFileCanBeWritten(stampfile)
f = open(stampfile, 'w')
f.write(data)
f.close()
def StampIsCurrent(path, stamp_name, stamp_contents, min_time=None, index=0):
stampfile = os.path.join(path, stamp_name)
# Check if the stampfile is older than the minimum last mod time
if min_time:
try:
stamp_time = os.stat(stampfile).st_mtime
if stamp_time <= min_time:
return False
except OSError:
return False
return DoesStampMatch(stampfile, stamp_contents, index)
def WriteSourceStamp(path, url):
stampfile = os.path.join(path, SOURCE_STAMP)
WriteStamp(stampfile, url)
def WriteHashStamp(path, hash_val):
hash_stampfile = os.path.join(path, HASH_STAMP)
WriteStamp(hash_stampfile, hash_val)
def Retry(op, *args):
# Windows seems to be prone to having commands that delete files or
# directories fail. We currently do not have a complete understanding why,
# and as a workaround we simply retry the command a few times.
# It appears that file locks are hanging around longer than they should. This
# may be a secondary effect of processes hanging around longer than they
# should. This may be because when we kill a browser sel_ldr does not exit
# immediately, etc.
# Virus checkers can also accidently prevent files from being deleted, but
# that shouldn't be a problem on the bots.
if sys.platform in ('win32', 'cygwin'):
count = 0
while True:
try:
op(*args)
break
except Exception:
sys.stdout.write("FAILED: %s %s\n" % (op.__name__, repr(args)))
count += 1
if count < 5:
sys.stdout.write("RETRY: %s %s\n" % (op.__name__, repr(args)))
time.sleep(pow(2, count))
else:
# Don't mask the exception.
raise
else:
op(*args)
def MoveDirCleanly(src, dst):
RemoveDir(dst)
MoveDir(src, dst)
def MoveDir(src, dst):
Retry(shutil.move, src, dst)
def RemoveDir(path):
if os.path.exists(path):
Retry(shutil.rmtree, path)
def RemoveFile(path):
if os.path.exists(path):
Retry(os.unlink, path)
def _HashFileHandle(fh):
"""sha1 of a file like object.
Arguments:
fh: file handle like object to hash.
Returns:
sha1 as a string.
"""
hasher = hashlib.sha1()
try:
while True:
data = fh.read(4096)
if not data:
break
hasher.update(data)
finally:
fh.close()
return hasher.hexdigest()
def HashFile(filename):
"""sha1 a file on disk.
Arguments:
filename: filename to hash.
Returns:
sha1 as a string.
"""
fh = open(filename, 'rb')
return _HashFileHandle(fh)
def HashUrlByDownloading(url):
"""sha1 the data at an url.
Arguments:
url: url to download from.
Returns:
sha1 of the data at the url.
"""
try:
fh = urllib2.urlopen(url)
except:
sys.stderr.write("Failed fetching URL: %s\n" % url)
raise
return _HashFileHandle(fh)
# Attempts to get the SHA1 hash of a file given a URL by looking for
# an adjacent file with a ".sha1hash" suffix. This saves having to
# download a large tarball just to get its hash. Otherwise, we fall
# back to downloading the main file.
def HashUrl(url):
hash_url = '%s.sha1hash' % url
try:
fh = urllib2.urlopen(hash_url)
data = fh.read(100)
fh.close()
except urllib2.HTTPError, exn:
if exn.code == 404:
return HashUrlByDownloading(url)
raise
else:
if not re.match('[0-9a-f]{40}\n?$', data):
raise AssertionError('Bad SHA1 hash file: %r' % data)
return data.strip()
def SyncURL(url, filename=None, stamp_dir=None, min_time=None,
hash_val=None, keep=False, verbose=False, stamp_index=0):
"""Synchronize a destination file with a URL
if the URL does not match the URL stamp, then we must re-download it.
Arugments:
url: the url which will to compare against and download
filename: the file to create on download
path: the download path
stamp_dir: the filename containing the URL stamp to check against
hash_val: if set, the expected hash which must be matched
verbose: prints out status as it runs
stamp_index: index within the stamp file to check.
Returns:
True if the file is replaced
False if the file is not replaced
Exception:
HashError: if the hash does not match
"""
assert url and filename
# If we are not keeping the tarball, or we already have it, we can
# skip downloading it for this reason. If we are keeping it,
# it must exist.
if keep:
tarball_ok = os.path.isfile(filename)
else:
tarball_ok = True
# If we don't need the tarball and the stamp_file matches the url, then
# we must be up to date. If the URL differs but the recorded hash matches
# the one we'll insist the tarball has, then that's good enough too.
# TODO(mcgrathr): Download the .sha1sum file first to compare with
# the cached hash, in case --file-hash options weren't used.
if tarball_ok and stamp_dir is not None:
if StampIsCurrent(stamp_dir, SOURCE_STAMP, url, min_time):
if verbose:
print '%s is already up to date.' % filename
return False
if (hash_val is not None and
StampIsCurrent(stamp_dir, HASH_STAMP, hash_val, min_time, stamp_index)):
if verbose:
print '%s is identical to the up to date file.' % filename
return False
if verbose:
print 'Updating %s\n\tfrom %s.' % (filename, url)
EnsureFileCanBeWritten(filename)
http_download.HttpDownload(url, filename)
if hash_val:
tar_hash = HashFile(filename)
if hash_val != tar_hash:
raise HashError(actual_hash=tar_hash, expected_hash=hash_val,
download_url=url)
return True
| bsd-3-clause |
ChinaMassClouds/copenstack-server | openstack/src/horizon-2014.2/openstack_dashboard/dashboards/project/databases/tests.py | 7 | 14628 | # Copyright 2013 Mirantis Inc.
# Copyright 2013 Rackspace Hosting.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from django.core.urlresolvers import reverse
from django import http
from mox import IsA # noqa
from horizon import exceptions
from openstack_dashboard import api
from openstack_dashboard.test import helpers as test
from troveclient import common
INDEX_URL = reverse('horizon:project:databases:index')
LAUNCH_URL = reverse('horizon:project:databases:launch')
DETAILS_URL = reverse('horizon:project:databases:detail', args=['id'])
class DatabaseTests(test.TestCase):
@test.create_stubs(
{api.trove: ('instance_list', 'flavor_list')})
def test_index(self):
# Mock database instances
databases = common.Paginated(self.databases.list())
api.trove.instance_list(IsA(http.HttpRequest), marker=None)\
.AndReturn(databases)
# Mock flavors
api.trove.flavor_list(IsA(http.HttpRequest))\
.AndReturn(self.flavors.list())
self.mox.ReplayAll()
res = self.client.get(INDEX_URL)
self.assertTemplateUsed(res, 'project/databases/index.html')
# Check the Host column displaying ip or hostname
self.assertContains(res, '10.0.0.3')
self.assertContains(res, 'trove.instance-2.com')
@test.create_stubs(
{api.trove: ('instance_list', 'flavor_list')})
def test_index_flavor_exception(self):
# Mock database instances
databases = common.Paginated(self.databases.list())
api.trove.instance_list(IsA(http.HttpRequest), marker=None)\
.AndReturn(databases)
# Mock flavors
api.trove.flavor_list(IsA(http.HttpRequest))\
.AndRaise(self.exceptions.trove)
self.mox.ReplayAll()
res = self.client.get(INDEX_URL)
self.assertTemplateUsed(res, 'project/databases/index.html')
self.assertMessageCount(res, error=1)
@test.create_stubs(
{api.trove: ('instance_list',)})
def test_index_list_exception(self):
# Mock database instances
api.trove.instance_list(IsA(http.HttpRequest), marker=None)\
.AndRaise(self.exceptions.trove)
self.mox.ReplayAll()
res = self.client.get(INDEX_URL)
self.assertTemplateUsed(res, 'project/databases/index.html')
self.assertMessageCount(res, error=1)
@test.create_stubs(
{api.trove: ('instance_list', 'flavor_list')})
def test_index_pagination(self):
# Mock database instances
databases = self.databases.list()
last_record = databases[1]
databases = common.Paginated(databases, next_marker="foo")
api.trove.instance_list(IsA(http.HttpRequest), marker=None)\
.AndReturn(databases)
# Mock flavors
api.trove.flavor_list(IsA(http.HttpRequest))\
.AndReturn(self.flavors.list())
self.mox.ReplayAll()
res = self.client.get(INDEX_URL)
self.assertTemplateUsed(res, 'project/databases/index.html')
self.assertContains(
res, 'marker=' + last_record.id)
@test.create_stubs(
{api.trove: ('instance_list', 'flavor_list')})
def test_index_flavor_list_exception(self):
# Mocking instances.
databases = common.Paginated(self.databases.list())
api.trove.instance_list(
IsA(http.HttpRequest),
marker=None,
).AndReturn(databases)
# Mocking flavor list with raising an exception.
api.trove.flavor_list(
IsA(http.HttpRequest),
).AndRaise(self.exceptions.trove)
self.mox.ReplayAll()
res = self.client.get(INDEX_URL)
self.assertTemplateUsed(res, 'project/databases/index.html')
self.assertMessageCount(res, error=1)
@test.create_stubs({
api.trove: ('flavor_list', 'backup_list',
'datastore_list', 'datastore_version_list')})
def test_launch_instance(self):
api.trove.flavor_list(IsA(http.HttpRequest))\
.AndReturn(self.flavors.list())
api.trove.backup_list(IsA(http.HttpRequest))\
.AndReturn(self.database_backups.list())
# Mock datastores
api.trove.datastore_list(IsA(http.HttpRequest))\
.AndReturn(self.datastores.list())
# Mock datastore versions
api.trove.datastore_version_list(IsA(http.HttpRequest),
IsA(str)).AndReturn(self.datastore_versions.list())
self.mox.ReplayAll()
res = self.client.get(LAUNCH_URL)
self.assertTemplateUsed(res, 'project/databases/launch.html')
@test.create_stubs({api.trove: ('flavor_list',)})
def test_launch_instance_exception_on_flavors(self):
trove_exception = self.exceptions.nova
api.trove.flavor_list(IsA(http.HttpRequest)).AndRaise(trove_exception)
self.mox.ReplayAll()
toSuppress = ["openstack_dashboard.dashboards.project.databases."
"workflows.create_instance",
"horizon.workflows.base"]
# Suppress expected log messages in the test output
loggers = []
for cls in toSuppress:
logger = logging.getLogger(cls)
loggers.append((logger, logger.getEffectiveLevel()))
logger.setLevel(logging.CRITICAL)
try:
with self.assertRaises(exceptions.Http302):
self.client.get(LAUNCH_URL)
finally:
# Restore the previous log levels
for (log, level) in loggers:
log.setLevel(level)
@test.create_stubs({
api.trove: ('flavor_list', 'backup_list', 'instance_create',
'datastore_list', 'datastore_version_list'),
api.neutron: ('network_list',)})
def test_create_simple_instance(self):
api.trove.flavor_list(IsA(http.HttpRequest)).AndReturn(
self.flavors.list())
api.trove.backup_list(IsA(http.HttpRequest)).AndReturn(
self.database_backups.list())
# Mock datastores
api.trove.datastore_list(IsA(http.HttpRequest))\
.AndReturn(self.datastores.list())
# Mock datastore versions
api.trove.datastore_version_list(IsA(http.HttpRequest),
IsA(str)).AndReturn(self.datastore_versions.list())
api.neutron.network_list(IsA(http.HttpRequest),
tenant_id=self.tenant.id,
shared=False).AndReturn(
self.networks.list()[:1])
api.neutron.network_list(IsA(http.HttpRequest),
shared=True).AndReturn(
self.networks.list()[1:])
nics = [{"net-id": self.networks.first().id, "v4-fixed-ip": ''}]
# Actual create database call
api.trove.instance_create(
IsA(http.HttpRequest),
IsA(unicode),
IsA(int),
IsA(unicode),
databases=None,
datastore=IsA(unicode),
datastore_version=IsA(unicode),
restore_point=None,
users=None,
nics=nics).AndReturn(self.databases.first())
self.mox.ReplayAll()
post = {
'name': "MyDB",
'volume': '1',
'flavor': 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
'network': self.networks.first().id,
'datastore': 'mysql,5.5',
}
res = self.client.post(LAUNCH_URL, post)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({
api.trove: ('flavor_list', 'backup_list', 'instance_create',
'datastore_list', 'datastore_version_list'),
api.neutron: ('network_list',)})
def test_create_simple_instance_exception(self):
trove_exception = self.exceptions.nova
api.trove.flavor_list(IsA(http.HttpRequest)).AndReturn(
self.flavors.list())
api.trove.backup_list(IsA(http.HttpRequest)).AndReturn(
self.database_backups.list())
# Mock datastores
api.trove.datastore_list(IsA(http.HttpRequest))\
.AndReturn(self.datastores.list())
# Mock datastore versions
api.trove.datastore_version_list(IsA(http.HttpRequest),
IsA(str)).AndReturn(self.datastore_versions.list())
api.neutron.network_list(IsA(http.HttpRequest),
tenant_id=self.tenant.id,
shared=False).AndReturn(
self.networks.list()[:1])
api.neutron.network_list(IsA(http.HttpRequest),
shared=True).AndReturn(
self.networks.list()[1:])
nics = [{"net-id": self.networks.first().id, "v4-fixed-ip": ''}]
# Actual create database call
api.trove.instance_create(
IsA(http.HttpRequest),
IsA(unicode),
IsA(int),
IsA(unicode),
databases=None,
datastore=IsA(unicode),
datastore_version=IsA(unicode),
restore_point=None,
users=None,
nics=nics).AndRaise(trove_exception)
self.mox.ReplayAll()
post = {
'name': "MyDB",
'volume': '1',
'flavor': 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
'network': self.networks.first().id,
'datastore': 'mysql,5.5',
}
res = self.client.post(LAUNCH_URL, post)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs(
{api.trove: ('instance_get', 'flavor_get',)})
def _test_details(self, database, with_designate=False):
api.trove.instance_get(IsA(http.HttpRequest), IsA(unicode))\
.AndReturn(database)
api.trove.flavor_get(IsA(http.HttpRequest), IsA(str))\
.AndReturn(self.flavors.first())
self.mox.ReplayAll()
res = self.client.get(DETAILS_URL)
self.assertTemplateUsed(res, 'project/databases/detail.html')
if with_designate:
self.assertContains(res, database.hostname)
else:
self.assertContains(res, database.ip[0])
def test_details_with_ip(self):
database = self.databases.first()
self._test_details(database, with_designate=False)
def test_details_with_hostname(self):
database = self.databases.list()[1]
self._test_details(database, with_designate=True)
@test.create_stubs(
{api.trove: ('instance_get', 'flavor_get', 'users_list',
'user_list_access', 'user_delete')})
def test_user_delete(self):
database = self.databases.first()
user = self.database_users.first()
user_db = self.database_user_dbs.first()
database_id = database.id
# Instead of using the user's ID, the api uses the user's name. BOOO!
user_id = user.name
# views.py: DetailView.get_data
api.trove.instance_get(IsA(http.HttpRequest), IsA(unicode))\
.AndReturn(database)
api.trove.flavor_get(IsA(http.HttpRequest), IsA(str))\
.AndReturn(self.flavors.first())
# tabs.py: UserTab.get_user_data
api.trove.users_list(IsA(http.HttpRequest),
database_id).AndReturn([user])
api.trove.user_list_access(IsA(http.HttpRequest),
database_id,
user_id).AndReturn([user_db])
# tables.py: DeleteUser.delete
api.trove.user_delete(IsA(http.HttpRequest),
database_id,
user_id).AndReturn(None)
self.mox.ReplayAll()
details_url = reverse('horizon:project:databases:detail',
args=[database_id])
url = details_url + '?tab=instance_details__users_tab'
action_string = u"users__delete__%s" % user_id
form_data = {'action': action_string}
res = self.client.post(url, form_data)
self.assertRedirectsNoFollow(res, url)
@test.create_stubs({
api.trove: ('instance_get', 'instance_resize_volume'),
})
def test_resize_volume(self):
database = self.databases.first()
database_id = database.id
database_size = database.volume.get('size')
# views.py: DetailView.get_data
api.trove.instance_get(IsA(http.HttpRequest), IsA(unicode))\
.AndReturn(database)
# forms.py: ResizeVolumeForm.handle
api.trove.instance_resize_volume(IsA(http.HttpRequest),
database_id,
IsA(int)).AndReturn(None)
self.mox.ReplayAll()
url = reverse('horizon:project:databases:resize_volume',
args=[database_id])
post = {
'instance_id': database_id,
'orig_size': database_size,
'new_size': database_size + 1,
}
res = self.client.post(url, post)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({
api.trove: ('instance_get', 'instance_resize_volume'),
})
def test_resize_volume_bad_value(self):
database = self.databases.first()
database_id = database.id
database_size = database.volume.get('size')
# views.py: DetailView.get_data
api.trove.instance_get(IsA(http.HttpRequest), IsA(unicode))\
.AndReturn(database)
self.mox.ReplayAll()
url = reverse('horizon:project:databases:resize_volume',
args=[database_id])
post = {
'instance_id': database_id,
'orig_size': database_size,
'new_size': database_size,
}
res = self.client.post(url, post)
self.assertContains(res,
"New size for volume must be greater than current size.")
| gpl-2.0 |
beafus/Video-Player | project/server/lib/node_modules/npm/node_modules/node-gyp/gyp/tools/pretty_vcproj.py | 2637 | 9586 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Make the format of a vcproj really pretty.
This script normalize and sort an xml. It also fetches all the properties
inside linked vsprops and include them explicitly in the vcproj.
It outputs the resulting xml to stdout.
"""
__author__ = 'nsylvain (Nicolas Sylvain)'
import os
import sys
from xml.dom.minidom import parse
from xml.dom.minidom import Node
REPLACEMENTS = dict()
ARGUMENTS = None
class CmpTuple(object):
"""Compare function between 2 tuple."""
def __call__(self, x, y):
return cmp(x[0], y[0])
class CmpNode(object):
"""Compare function between 2 xml nodes."""
def __call__(self, x, y):
def get_string(node):
node_string = "node"
node_string += node.nodeName
if node.nodeValue:
node_string += node.nodeValue
if node.attributes:
# We first sort by name, if present.
node_string += node.getAttribute("Name")
all_nodes = []
for (name, value) in node.attributes.items():
all_nodes.append((name, value))
all_nodes.sort(CmpTuple())
for (name, value) in all_nodes:
node_string += name
node_string += value
return node_string
return cmp(get_string(x), get_string(y))
def PrettyPrintNode(node, indent=0):
if node.nodeType == Node.TEXT_NODE:
if node.data.strip():
print '%s%s' % (' '*indent, node.data.strip())
return
if node.childNodes:
node.normalize()
# Get the number of attributes
attr_count = 0
if node.attributes:
attr_count = node.attributes.length
# Print the main tag
if attr_count == 0:
print '%s<%s>' % (' '*indent, node.nodeName)
else:
print '%s<%s' % (' '*indent, node.nodeName)
all_attributes = []
for (name, value) in node.attributes.items():
all_attributes.append((name, value))
all_attributes.sort(CmpTuple())
for (name, value) in all_attributes:
print '%s %s="%s"' % (' '*indent, name, value)
print '%s>' % (' '*indent)
if node.nodeValue:
print '%s %s' % (' '*indent, node.nodeValue)
for sub_node in node.childNodes:
PrettyPrintNode(sub_node, indent=indent+2)
print '%s</%s>' % (' '*indent, node.nodeName)
def FlattenFilter(node):
"""Returns a list of all the node and sub nodes."""
node_list = []
if (node.attributes and
node.getAttribute('Name') == '_excluded_files'):
# We don't add the "_excluded_files" filter.
return []
for current in node.childNodes:
if current.nodeName == 'Filter':
node_list.extend(FlattenFilter(current))
else:
node_list.append(current)
return node_list
def FixFilenames(filenames, current_directory):
new_list = []
for filename in filenames:
if filename:
for key in REPLACEMENTS:
filename = filename.replace(key, REPLACEMENTS[key])
os.chdir(current_directory)
filename = filename.strip('"\' ')
if filename.startswith('$'):
new_list.append(filename)
else:
new_list.append(os.path.abspath(filename))
return new_list
def AbsoluteNode(node):
"""Makes all the properties we know about in this node absolute."""
if node.attributes:
for (name, value) in node.attributes.items():
if name in ['InheritedPropertySheets', 'RelativePath',
'AdditionalIncludeDirectories',
'IntermediateDirectory', 'OutputDirectory',
'AdditionalLibraryDirectories']:
# We want to fix up these paths
path_list = value.split(';')
new_list = FixFilenames(path_list, os.path.dirname(ARGUMENTS[1]))
node.setAttribute(name, ';'.join(new_list))
if not value:
node.removeAttribute(name)
def CleanupVcproj(node):
"""For each sub node, we call recursively this function."""
for sub_node in node.childNodes:
AbsoluteNode(sub_node)
CleanupVcproj(sub_node)
# Normalize the node, and remove all extranous whitespaces.
for sub_node in node.childNodes:
if sub_node.nodeType == Node.TEXT_NODE:
sub_node.data = sub_node.data.replace("\r", "")
sub_node.data = sub_node.data.replace("\n", "")
sub_node.data = sub_node.data.rstrip()
# Fix all the semicolon separated attributes to be sorted, and we also
# remove the dups.
if node.attributes:
for (name, value) in node.attributes.items():
sorted_list = sorted(value.split(';'))
unique_list = []
for i in sorted_list:
if not unique_list.count(i):
unique_list.append(i)
node.setAttribute(name, ';'.join(unique_list))
if not value:
node.removeAttribute(name)
if node.childNodes:
node.normalize()
# For each node, take a copy, and remove it from the list.
node_array = []
while node.childNodes and node.childNodes[0]:
# Take a copy of the node and remove it from the list.
current = node.childNodes[0]
node.removeChild(current)
# If the child is a filter, we want to append all its children
# to this same list.
if current.nodeName == 'Filter':
node_array.extend(FlattenFilter(current))
else:
node_array.append(current)
# Sort the list.
node_array.sort(CmpNode())
# Insert the nodes in the correct order.
for new_node in node_array:
# But don't append empty tool node.
if new_node.nodeName == 'Tool':
if new_node.attributes and new_node.attributes.length == 1:
# This one was empty.
continue
if new_node.nodeName == 'UserMacro':
continue
node.appendChild(new_node)
def GetConfiguationNodes(vcproj):
#TODO(nsylvain): Find a better way to navigate the xml.
nodes = []
for node in vcproj.childNodes:
if node.nodeName == "Configurations":
for sub_node in node.childNodes:
if sub_node.nodeName == "Configuration":
nodes.append(sub_node)
return nodes
def GetChildrenVsprops(filename):
dom = parse(filename)
if dom.documentElement.attributes:
vsprops = dom.documentElement.getAttribute('InheritedPropertySheets')
return FixFilenames(vsprops.split(';'), os.path.dirname(filename))
return []
def SeekToNode(node1, child2):
# A text node does not have properties.
if child2.nodeType == Node.TEXT_NODE:
return None
# Get the name of the current node.
current_name = child2.getAttribute("Name")
if not current_name:
# There is no name. We don't know how to merge.
return None
# Look through all the nodes to find a match.
for sub_node in node1.childNodes:
if sub_node.nodeName == child2.nodeName:
name = sub_node.getAttribute("Name")
if name == current_name:
return sub_node
# No match. We give up.
return None
def MergeAttributes(node1, node2):
# No attributes to merge?
if not node2.attributes:
return
for (name, value2) in node2.attributes.items():
# Don't merge the 'Name' attribute.
if name == 'Name':
continue
value1 = node1.getAttribute(name)
if value1:
# The attribute exist in the main node. If it's equal, we leave it
# untouched, otherwise we concatenate it.
if value1 != value2:
node1.setAttribute(name, ';'.join([value1, value2]))
else:
# The attribute does nto exist in the main node. We append this one.
node1.setAttribute(name, value2)
# If the attribute was a property sheet attributes, we remove it, since
# they are useless.
if name == 'InheritedPropertySheets':
node1.removeAttribute(name)
def MergeProperties(node1, node2):
MergeAttributes(node1, node2)
for child2 in node2.childNodes:
child1 = SeekToNode(node1, child2)
if child1:
MergeProperties(child1, child2)
else:
node1.appendChild(child2.cloneNode(True))
def main(argv):
"""Main function of this vcproj prettifier."""
global ARGUMENTS
ARGUMENTS = argv
# check if we have exactly 1 parameter.
if len(argv) < 2:
print ('Usage: %s "c:\\path\\to\\vcproj.vcproj" [key1=value1] '
'[key2=value2]' % argv[0])
return 1
# Parse the keys
for i in range(2, len(argv)):
(key, value) = argv[i].split('=')
REPLACEMENTS[key] = value
# Open the vcproj and parse the xml.
dom = parse(argv[1])
# First thing we need to do is find the Configuration Node and merge them
# with the vsprops they include.
for configuration_node in GetConfiguationNodes(dom.documentElement):
# Get the property sheets associated with this configuration.
vsprops = configuration_node.getAttribute('InheritedPropertySheets')
# Fix the filenames to be absolute.
vsprops_list = FixFilenames(vsprops.strip().split(';'),
os.path.dirname(argv[1]))
# Extend the list of vsprops with all vsprops contained in the current
# vsprops.
for current_vsprops in vsprops_list:
vsprops_list.extend(GetChildrenVsprops(current_vsprops))
# Now that we have all the vsprops, we need to merge them.
for current_vsprops in vsprops_list:
MergeProperties(configuration_node,
parse(current_vsprops).documentElement)
# Now that everything is merged, we need to cleanup the xml.
CleanupVcproj(dom.documentElement)
# Finally, we use the prett xml function to print the vcproj back to the
# user.
#print dom.toprettyxml(newl="\n")
PrettyPrintNode(dom.documentElement)
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
| mit |
umangv/LitHub | LitHub/fbconnect/forms.py | 1 | 2017 | # Copyright 2011 Kalamazoo College Computer Science Club
# <kzoo-cs-board@googlegroups.com>
# This file is part of LitHub.
#
# LitHub is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# LitHub is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with LitHub. If not, see <http://www.gnu.org/licenses/>.
from django import forms
from django.contrib.auth.models import User
class FBRegisterForm(forms.Form):
username = forms.CharField(max_length=30)
def clean_username(self):
username = self.cleaned_data['username']
if User.objects.filter(username=username).count():
raise forms.ValidationError("This username has already been "+\
"taken. Please try again")
return self.cleaned_data['username']
class FBRegisterVerifyForm(forms.Form):
username = forms.CharField(max_length=30)
email = forms.EmailField(max_length=75)
def clean_username(self):
username = self.cleaned_data['username']
if User.objects.filter(username=username).count():
raise forms.ValidationError("This username has already been "
"taken. Please try again")
return self.cleaned_data['username']
def clean_email(self):
"""Ensures a valid K student email id is used. """
email_parts = self.cleaned_data['email'].split('@')
if email_parts[1].lower () != "kzoo.edu":
raise forms.ValidationError("Only kzoo.edu addresses are "
"allowed!")
return self.cleaned_data['email']
| gpl-3.0 |
indefini/linux | tools/perf/scripts/python/export-to-postgresql.py | 217 | 17826 | # export-to-postgresql.py: export perf data to a postgresql database
# Copyright (c) 2014, Intel Corporation.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms and conditions of the GNU General Public License,
# version 2, as published by the Free Software Foundation.
#
# This program is distributed in the hope it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
import os
import sys
import struct
import datetime
# To use this script you will need to have installed package python-pyside which
# provides LGPL-licensed Python bindings for Qt. You will also need the package
# libqt4-sql-psql for Qt postgresql support.
#
# The script assumes postgresql is running on the local machine and that the
# user has postgresql permissions to create databases. Examples of installing
# postgresql and adding such a user are:
#
# fedora:
#
# $ sudo yum install postgresql postgresql-server python-pyside qt-postgresql
# $ sudo su - postgres -c initdb
# $ sudo service postgresql start
# $ sudo su - postgres
# $ createuser <your user id here>
# Shall the new role be a superuser? (y/n) y
#
# ubuntu:
#
# $ sudo apt-get install postgresql
# $ sudo su - postgres
# $ createuser <your user id here>
# Shall the new role be a superuser? (y/n) y
#
# An example of using this script with Intel PT:
#
# $ perf record -e intel_pt//u ls
# $ perf script -s ~/libexec/perf-core/scripts/python/export-to-postgresql.py pt_example branches calls
# 2015-05-29 12:49:23.464364 Creating database...
# 2015-05-29 12:49:26.281717 Writing to intermediate files...
# 2015-05-29 12:49:27.190383 Copying to database...
# 2015-05-29 12:49:28.140451 Removing intermediate files...
# 2015-05-29 12:49:28.147451 Adding primary keys
# 2015-05-29 12:49:28.655683 Adding foreign keys
# 2015-05-29 12:49:29.365350 Done
#
# To browse the database, psql can be used e.g.
#
# $ psql pt_example
# pt_example=# select * from samples_view where id < 100;
# pt_example=# \d+
# pt_example=# \d+ samples_view
# pt_example=# \q
#
# An example of using the database is provided by the script
# call-graph-from-postgresql.py. Refer to that script for details.
from PySide.QtSql import *
# Need to access PostgreSQL C library directly to use COPY FROM STDIN
from ctypes import *
libpq = CDLL("libpq.so.5")
PQconnectdb = libpq.PQconnectdb
PQconnectdb.restype = c_void_p
PQfinish = libpq.PQfinish
PQstatus = libpq.PQstatus
PQexec = libpq.PQexec
PQexec.restype = c_void_p
PQresultStatus = libpq.PQresultStatus
PQputCopyData = libpq.PQputCopyData
PQputCopyData.argtypes = [ c_void_p, c_void_p, c_int ]
PQputCopyEnd = libpq.PQputCopyEnd
PQputCopyEnd.argtypes = [ c_void_p, c_void_p ]
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
# These perf imports are not used at present
#from perf_trace_context import *
#from Core import *
perf_db_export_mode = True
perf_db_export_calls = False
def usage():
print >> sys.stderr, "Usage is: export-to-postgresql.py <database name> [<columns>] [<calls>]"
print >> sys.stderr, "where: columns 'all' or 'branches'"
print >> sys.stderr, " calls 'calls' => create calls table"
raise Exception("Too few arguments")
if (len(sys.argv) < 2):
usage()
dbname = sys.argv[1]
if (len(sys.argv) >= 3):
columns = sys.argv[2]
else:
columns = "all"
if columns not in ("all", "branches"):
usage()
branches = (columns == "branches")
if (len(sys.argv) >= 4):
if (sys.argv[3] == "calls"):
perf_db_export_calls = True
else:
usage()
output_dir_name = os.getcwd() + "/" + dbname + "-perf-data"
os.mkdir(output_dir_name)
def do_query(q, s):
if (q.exec_(s)):
return
raise Exception("Query failed: " + q.lastError().text())
print datetime.datetime.today(), "Creating database..."
db = QSqlDatabase.addDatabase('QPSQL')
query = QSqlQuery(db)
db.setDatabaseName('postgres')
db.open()
try:
do_query(query, 'CREATE DATABASE ' + dbname)
except:
os.rmdir(output_dir_name)
raise
query.finish()
query.clear()
db.close()
db.setDatabaseName(dbname)
db.open()
query = QSqlQuery(db)
do_query(query, 'SET client_min_messages TO WARNING')
do_query(query, 'CREATE TABLE selected_events ('
'id bigint NOT NULL,'
'name varchar(80))')
do_query(query, 'CREATE TABLE machines ('
'id bigint NOT NULL,'
'pid integer,'
'root_dir varchar(4096))')
do_query(query, 'CREATE TABLE threads ('
'id bigint NOT NULL,'
'machine_id bigint,'
'process_id bigint,'
'pid integer,'
'tid integer)')
do_query(query, 'CREATE TABLE comms ('
'id bigint NOT NULL,'
'comm varchar(16))')
do_query(query, 'CREATE TABLE comm_threads ('
'id bigint NOT NULL,'
'comm_id bigint,'
'thread_id bigint)')
do_query(query, 'CREATE TABLE dsos ('
'id bigint NOT NULL,'
'machine_id bigint,'
'short_name varchar(256),'
'long_name varchar(4096),'
'build_id varchar(64))')
do_query(query, 'CREATE TABLE symbols ('
'id bigint NOT NULL,'
'dso_id bigint,'
'sym_start bigint,'
'sym_end bigint,'
'binding integer,'
'name varchar(2048))')
do_query(query, 'CREATE TABLE branch_types ('
'id integer NOT NULL,'
'name varchar(80))')
if branches:
do_query(query, 'CREATE TABLE samples ('
'id bigint NOT NULL,'
'evsel_id bigint,'
'machine_id bigint,'
'thread_id bigint,'
'comm_id bigint,'
'dso_id bigint,'
'symbol_id bigint,'
'sym_offset bigint,'
'ip bigint,'
'time bigint,'
'cpu integer,'
'to_dso_id bigint,'
'to_symbol_id bigint,'
'to_sym_offset bigint,'
'to_ip bigint,'
'branch_type integer,'
'in_tx boolean)')
else:
do_query(query, 'CREATE TABLE samples ('
'id bigint NOT NULL,'
'evsel_id bigint,'
'machine_id bigint,'
'thread_id bigint,'
'comm_id bigint,'
'dso_id bigint,'
'symbol_id bigint,'
'sym_offset bigint,'
'ip bigint,'
'time bigint,'
'cpu integer,'
'to_dso_id bigint,'
'to_symbol_id bigint,'
'to_sym_offset bigint,'
'to_ip bigint,'
'period bigint,'
'weight bigint,'
'transaction bigint,'
'data_src bigint,'
'branch_type integer,'
'in_tx boolean)')
if perf_db_export_calls:
do_query(query, 'CREATE TABLE call_paths ('
'id bigint NOT NULL,'
'parent_id bigint,'
'symbol_id bigint,'
'ip bigint)')
do_query(query, 'CREATE TABLE calls ('
'id bigint NOT NULL,'
'thread_id bigint,'
'comm_id bigint,'
'call_path_id bigint,'
'call_time bigint,'
'return_time bigint,'
'branch_count bigint,'
'call_id bigint,'
'return_id bigint,'
'parent_call_path_id bigint,'
'flags integer)')
do_query(query, 'CREATE VIEW samples_view AS '
'SELECT '
'id,'
'time,'
'cpu,'
'(SELECT pid FROM threads WHERE id = thread_id) AS pid,'
'(SELECT tid FROM threads WHERE id = thread_id) AS tid,'
'(SELECT comm FROM comms WHERE id = comm_id) AS command,'
'(SELECT name FROM selected_events WHERE id = evsel_id) AS event,'
'to_hex(ip) AS ip_hex,'
'(SELECT name FROM symbols WHERE id = symbol_id) AS symbol,'
'sym_offset,'
'(SELECT short_name FROM dsos WHERE id = dso_id) AS dso_short_name,'
'to_hex(to_ip) AS to_ip_hex,'
'(SELECT name FROM symbols WHERE id = to_symbol_id) AS to_symbol,'
'to_sym_offset,'
'(SELECT short_name FROM dsos WHERE id = to_dso_id) AS to_dso_short_name,'
'(SELECT name FROM branch_types WHERE id = branch_type) AS branch_type_name,'
'in_tx'
' FROM samples')
file_header = struct.pack("!11sii", "PGCOPY\n\377\r\n\0", 0, 0)
file_trailer = "\377\377"
def open_output_file(file_name):
path_name = output_dir_name + "/" + file_name
file = open(path_name, "w+")
file.write(file_header)
return file
def close_output_file(file):
file.write(file_trailer)
file.close()
def copy_output_file_direct(file, table_name):
close_output_file(file)
sql = "COPY " + table_name + " FROM '" + file.name + "' (FORMAT 'binary')"
do_query(query, sql)
# Use COPY FROM STDIN because security may prevent postgres from accessing the files directly
def copy_output_file(file, table_name):
conn = PQconnectdb("dbname = " + dbname)
if (PQstatus(conn)):
raise Exception("COPY FROM STDIN PQconnectdb failed")
file.write(file_trailer)
file.seek(0)
sql = "COPY " + table_name + " FROM STDIN (FORMAT 'binary')"
res = PQexec(conn, sql)
if (PQresultStatus(res) != 4):
raise Exception("COPY FROM STDIN PQexec failed")
data = file.read(65536)
while (len(data)):
ret = PQputCopyData(conn, data, len(data))
if (ret != 1):
raise Exception("COPY FROM STDIN PQputCopyData failed, error " + str(ret))
data = file.read(65536)
ret = PQputCopyEnd(conn, None)
if (ret != 1):
raise Exception("COPY FROM STDIN PQputCopyEnd failed, error " + str(ret))
PQfinish(conn)
def remove_output_file(file):
name = file.name
file.close()
os.unlink(name)
evsel_file = open_output_file("evsel_table.bin")
machine_file = open_output_file("machine_table.bin")
thread_file = open_output_file("thread_table.bin")
comm_file = open_output_file("comm_table.bin")
comm_thread_file = open_output_file("comm_thread_table.bin")
dso_file = open_output_file("dso_table.bin")
symbol_file = open_output_file("symbol_table.bin")
branch_type_file = open_output_file("branch_type_table.bin")
sample_file = open_output_file("sample_table.bin")
if perf_db_export_calls:
call_path_file = open_output_file("call_path_table.bin")
call_file = open_output_file("call_table.bin")
def trace_begin():
print datetime.datetime.today(), "Writing to intermediate files..."
# id == 0 means unknown. It is easier to create records for them than replace the zeroes with NULLs
evsel_table(0, "unknown")
machine_table(0, 0, "unknown")
thread_table(0, 0, 0, -1, -1)
comm_table(0, "unknown")
dso_table(0, 0, "unknown", "unknown", "")
symbol_table(0, 0, 0, 0, 0, "unknown")
sample_table(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
if perf_db_export_calls:
call_path_table(0, 0, 0, 0)
unhandled_count = 0
def trace_end():
print datetime.datetime.today(), "Copying to database..."
copy_output_file(evsel_file, "selected_events")
copy_output_file(machine_file, "machines")
copy_output_file(thread_file, "threads")
copy_output_file(comm_file, "comms")
copy_output_file(comm_thread_file, "comm_threads")
copy_output_file(dso_file, "dsos")
copy_output_file(symbol_file, "symbols")
copy_output_file(branch_type_file, "branch_types")
copy_output_file(sample_file, "samples")
if perf_db_export_calls:
copy_output_file(call_path_file, "call_paths")
copy_output_file(call_file, "calls")
print datetime.datetime.today(), "Removing intermediate files..."
remove_output_file(evsel_file)
remove_output_file(machine_file)
remove_output_file(thread_file)
remove_output_file(comm_file)
remove_output_file(comm_thread_file)
remove_output_file(dso_file)
remove_output_file(symbol_file)
remove_output_file(branch_type_file)
remove_output_file(sample_file)
if perf_db_export_calls:
remove_output_file(call_path_file)
remove_output_file(call_file)
os.rmdir(output_dir_name)
print datetime.datetime.today(), "Adding primary keys"
do_query(query, 'ALTER TABLE selected_events ADD PRIMARY KEY (id)')
do_query(query, 'ALTER TABLE machines ADD PRIMARY KEY (id)')
do_query(query, 'ALTER TABLE threads ADD PRIMARY KEY (id)')
do_query(query, 'ALTER TABLE comms ADD PRIMARY KEY (id)')
do_query(query, 'ALTER TABLE comm_threads ADD PRIMARY KEY (id)')
do_query(query, 'ALTER TABLE dsos ADD PRIMARY KEY (id)')
do_query(query, 'ALTER TABLE symbols ADD PRIMARY KEY (id)')
do_query(query, 'ALTER TABLE branch_types ADD PRIMARY KEY (id)')
do_query(query, 'ALTER TABLE samples ADD PRIMARY KEY (id)')
if perf_db_export_calls:
do_query(query, 'ALTER TABLE call_paths ADD PRIMARY KEY (id)')
do_query(query, 'ALTER TABLE calls ADD PRIMARY KEY (id)')
print datetime.datetime.today(), "Adding foreign keys"
do_query(query, 'ALTER TABLE threads '
'ADD CONSTRAINT machinefk FOREIGN KEY (machine_id) REFERENCES machines (id),'
'ADD CONSTRAINT processfk FOREIGN KEY (process_id) REFERENCES threads (id)')
do_query(query, 'ALTER TABLE comm_threads '
'ADD CONSTRAINT commfk FOREIGN KEY (comm_id) REFERENCES comms (id),'
'ADD CONSTRAINT threadfk FOREIGN KEY (thread_id) REFERENCES threads (id)')
do_query(query, 'ALTER TABLE dsos '
'ADD CONSTRAINT machinefk FOREIGN KEY (machine_id) REFERENCES machines (id)')
do_query(query, 'ALTER TABLE symbols '
'ADD CONSTRAINT dsofk FOREIGN KEY (dso_id) REFERENCES dsos (id)')
do_query(query, 'ALTER TABLE samples '
'ADD CONSTRAINT evselfk FOREIGN KEY (evsel_id) REFERENCES selected_events (id),'
'ADD CONSTRAINT machinefk FOREIGN KEY (machine_id) REFERENCES machines (id),'
'ADD CONSTRAINT threadfk FOREIGN KEY (thread_id) REFERENCES threads (id),'
'ADD CONSTRAINT commfk FOREIGN KEY (comm_id) REFERENCES comms (id),'
'ADD CONSTRAINT dsofk FOREIGN KEY (dso_id) REFERENCES dsos (id),'
'ADD CONSTRAINT symbolfk FOREIGN KEY (symbol_id) REFERENCES symbols (id),'
'ADD CONSTRAINT todsofk FOREIGN KEY (to_dso_id) REFERENCES dsos (id),'
'ADD CONSTRAINT tosymbolfk FOREIGN KEY (to_symbol_id) REFERENCES symbols (id)')
if perf_db_export_calls:
do_query(query, 'ALTER TABLE call_paths '
'ADD CONSTRAINT parentfk FOREIGN KEY (parent_id) REFERENCES call_paths (id),'
'ADD CONSTRAINT symbolfk FOREIGN KEY (symbol_id) REFERENCES symbols (id)')
do_query(query, 'ALTER TABLE calls '
'ADD CONSTRAINT threadfk FOREIGN KEY (thread_id) REFERENCES threads (id),'
'ADD CONSTRAINT commfk FOREIGN KEY (comm_id) REFERENCES comms (id),'
'ADD CONSTRAINT call_pathfk FOREIGN KEY (call_path_id) REFERENCES call_paths (id),'
'ADD CONSTRAINT callfk FOREIGN KEY (call_id) REFERENCES samples (id),'
'ADD CONSTRAINT returnfk FOREIGN KEY (return_id) REFERENCES samples (id),'
'ADD CONSTRAINT parent_call_pathfk FOREIGN KEY (parent_call_path_id) REFERENCES call_paths (id)')
do_query(query, 'CREATE INDEX pcpid_idx ON calls (parent_call_path_id)')
if (unhandled_count):
print datetime.datetime.today(), "Warning: ", unhandled_count, " unhandled events"
print datetime.datetime.today(), "Done"
def trace_unhandled(event_name, context, event_fields_dict):
global unhandled_count
unhandled_count += 1
def sched__sched_switch(*x):
pass
def evsel_table(evsel_id, evsel_name, *x):
n = len(evsel_name)
fmt = "!hiqi" + str(n) + "s"
value = struct.pack(fmt, 2, 8, evsel_id, n, evsel_name)
evsel_file.write(value)
def machine_table(machine_id, pid, root_dir, *x):
n = len(root_dir)
fmt = "!hiqiii" + str(n) + "s"
value = struct.pack(fmt, 3, 8, machine_id, 4, pid, n, root_dir)
machine_file.write(value)
def thread_table(thread_id, machine_id, process_id, pid, tid, *x):
value = struct.pack("!hiqiqiqiiii", 5, 8, thread_id, 8, machine_id, 8, process_id, 4, pid, 4, tid)
thread_file.write(value)
def comm_table(comm_id, comm_str, *x):
n = len(comm_str)
fmt = "!hiqi" + str(n) + "s"
value = struct.pack(fmt, 2, 8, comm_id, n, comm_str)
comm_file.write(value)
def comm_thread_table(comm_thread_id, comm_id, thread_id, *x):
fmt = "!hiqiqiq"
value = struct.pack(fmt, 3, 8, comm_thread_id, 8, comm_id, 8, thread_id)
comm_thread_file.write(value)
def dso_table(dso_id, machine_id, short_name, long_name, build_id, *x):
n1 = len(short_name)
n2 = len(long_name)
n3 = len(build_id)
fmt = "!hiqiqi" + str(n1) + "si" + str(n2) + "si" + str(n3) + "s"
value = struct.pack(fmt, 5, 8, dso_id, 8, machine_id, n1, short_name, n2, long_name, n3, build_id)
dso_file.write(value)
def symbol_table(symbol_id, dso_id, sym_start, sym_end, binding, symbol_name, *x):
n = len(symbol_name)
fmt = "!hiqiqiqiqiii" + str(n) + "s"
value = struct.pack(fmt, 6, 8, symbol_id, 8, dso_id, 8, sym_start, 8, sym_end, 4, binding, n, symbol_name)
symbol_file.write(value)
def branch_type_table(branch_type, name, *x):
n = len(name)
fmt = "!hiii" + str(n) + "s"
value = struct.pack(fmt, 2, 4, branch_type, n, name)
branch_type_file.write(value)
def sample_table(sample_id, evsel_id, machine_id, thread_id, comm_id, dso_id, symbol_id, sym_offset, ip, time, cpu, to_dso_id, to_symbol_id, to_sym_offset, to_ip, period, weight, transaction, data_src, branch_type, in_tx, *x):
if branches:
value = struct.pack("!hiqiqiqiqiqiqiqiqiqiqiiiqiqiqiqiiiB", 17, 8, sample_id, 8, evsel_id, 8, machine_id, 8, thread_id, 8, comm_id, 8, dso_id, 8, symbol_id, 8, sym_offset, 8, ip, 8, time, 4, cpu, 8, to_dso_id, 8, to_symbol_id, 8, to_sym_offset, 8, to_ip, 4, branch_type, 1, in_tx)
else:
value = struct.pack("!hiqiqiqiqiqiqiqiqiqiqiiiqiqiqiqiqiqiqiqiiiB", 21, 8, sample_id, 8, evsel_id, 8, machine_id, 8, thread_id, 8, comm_id, 8, dso_id, 8, symbol_id, 8, sym_offset, 8, ip, 8, time, 4, cpu, 8, to_dso_id, 8, to_symbol_id, 8, to_sym_offset, 8, to_ip, 8, period, 8, weight, 8, transaction, 8, data_src, 4, branch_type, 1, in_tx)
sample_file.write(value)
def call_path_table(cp_id, parent_id, symbol_id, ip, *x):
fmt = "!hiqiqiqiq"
value = struct.pack(fmt, 4, 8, cp_id, 8, parent_id, 8, symbol_id, 8, ip)
call_path_file.write(value)
def call_return_table(cr_id, thread_id, comm_id, call_path_id, call_time, return_time, branch_count, call_id, return_id, parent_call_path_id, flags, *x):
fmt = "!hiqiqiqiqiqiqiqiqiqiqii"
value = struct.pack(fmt, 11, 8, cr_id, 8, thread_id, 8, comm_id, 8, call_path_id, 8, call_time, 8, return_time, 8, branch_count, 8, call_id, 8, return_id, 8, parent_call_path_id, 4, flags)
call_file.write(value)
| gpl-2.0 |
hyperwang/bitcoin | contrib/devtools/security-check.py | 3 | 6400 | #!/usr/bin/python2
'''
Perform basic ELF security checks on a series of executables.
Exit status will be 0 if successful, and the program will be silent.
Otherwise the exit status will be 1 and it will log which executables failed which checks.
Needs `readelf` (for ELF) and `objdump` (for PE).
'''
from __future__ import division,print_function
import subprocess
import sys
import os
READELF_CMD = os.getenv('READELF', '/usr/bin/readelf')
OBJDUMP_CMD = os.getenv('OBJDUMP', '/usr/bin/objdump')
def check_ELF_PIE(executable):
'''
Check for position independent executable (PIE), allowing for address space randomization.
'''
p = subprocess.Popen([READELF_CMD, '-h', '-W', executable], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
(stdout, stderr) = p.communicate()
if p.returncode:
raise IOError('Error opening file')
ok = False
for line in stdout.split('\n'):
line = line.split()
if len(line)>=2 and line[0] == 'Type:' and line[1] == 'DYN':
ok = True
return ok
def get_ELF_program_headers(executable):
'''Return type and flags for ELF program headers'''
p = subprocess.Popen([READELF_CMD, '-l', '-W', executable], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
(stdout, stderr) = p.communicate()
if p.returncode:
raise IOError('Error opening file')
in_headers = False
count = 0
headers = []
for line in stdout.split('\n'):
if line.startswith('Program Headers:'):
in_headers = True
if line == '':
in_headers = False
if in_headers:
if count == 1: # header line
ofs_typ = line.find('Type')
ofs_offset = line.find('Offset')
ofs_flags = line.find('Flg')
ofs_align = line.find('Align')
if ofs_typ == -1 or ofs_offset == -1 or ofs_flags == -1 or ofs_align == -1:
raise ValueError('Cannot parse elfread -lW output')
elif count > 1:
typ = line[ofs_typ:ofs_offset].rstrip()
flags = line[ofs_flags:ofs_align].rstrip()
headers.append((typ, flags))
count += 1
return headers
def check_ELF_NX(executable):
'''
Check that no sections are writable and executable (including the stack)
'''
have_wx = False
have_gnu_stack = False
for (typ, flags) in get_ELF_program_headers(executable):
if typ == 'GNU_STACK':
have_gnu_stack = True
if 'W' in flags and 'E' in flags: # section is both writable and executable
have_wx = True
return have_gnu_stack and not have_wx
def check_ELF_RELRO(executable):
'''
Check for read-only relocations.
GNU_RELRO program header must exist
Dynamic section must have BIND_NOW flag
'''
have_gnu_relro = False
for (typ, flags) in get_ELF_program_headers(executable):
# Note: not checking flags == 'R': here as linkers set the permission differently
# This does not affect security: the permission flags of the GNU_RELRO program header are ignored, the PT_LOAD header determines the effective permissions.
# However, the dynamic linker need to write to this area so these are RW.
# Glibc itself takes care of mprotecting this area R after relocations are finished.
# See also http://permalink.gmane.org/gmane.comp.gnu.binutils/71347
if typ == 'GNU_RELRO':
have_gnu_relro = True
have_bindnow = False
p = subprocess.Popen([READELF_CMD, '-d', '-W', executable], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
(stdout, stderr) = p.communicate()
if p.returncode:
raise IOError('Error opening file')
for line in stdout.split('\n'):
tokens = line.split()
if len(tokens)>1 and tokens[1] == '(BIND_NOW)':
have_bindnow = True
return have_gnu_relro and have_bindnow
def check_ELF_Canary(executable):
'''
Check for use of stack canary
'''
p = subprocess.Popen([READELF_CMD, '--dyn-syms', '-W', executable], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
(stdout, stderr) = p.communicate()
if p.returncode:
raise IOError('Error opening file')
ok = False
for line in stdout.split('\n'):
if '__stack_chk_fail' in line:
ok = True
return ok
def get_PE_dll_characteristics(executable):
'''
Get PE DllCharacteristics bits
'''
p = subprocess.Popen([OBJDUMP_CMD, '-x', executable], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
(stdout, stderr) = p.communicate()
if p.returncode:
raise IOError('Error opening file')
for line in stdout.split('\n'):
tokens = line.split()
if len(tokens)>=2 and tokens[0] == 'DllCharacteristics':
return int(tokens[1],16)
return 0
def check_PE_PIE(executable):
'''PIE: DllCharacteristics bit 0x40 signifies dynamicbase (ASLR)'''
return bool(get_PE_dll_characteristics(executable) & 0x40)
def check_PE_NX(executable):
'''NX: DllCharacteristics bit 0x100 signifies nxcompat (DEP)'''
return bool(get_PE_dll_characteristics(executable) & 0x100)
CHECKS = {
'ELF': [
('PIE', check_ELF_PIE),
('NX', check_ELF_NX),
('RELRO', check_ELF_RELRO),
('Canary', check_ELF_Canary)
],
'PE': [
('PIE', check_PE_PIE),
('NX', check_PE_NX)
]
}
def identify_executable(executable):
with open(filename, 'rb') as f:
magic = f.read(4)
if magic.startswith(b'MZ'):
return 'PE'
elif magic.startswith(b'\x7fELF'):
return 'ELF'
return None
if __name__ == '__main__':
retval = 0
for filename in sys.argv[1:]:
try:
etype = identify_executable(filename)
if etype is None:
print('%s: unknown format' % filename)
retval = 1
continue
failed = []
for (name, func) in CHECKS[etype]:
if not func(filename):
failed.append(name)
if failed:
print('%s: failed %s' % (filename, ' '.join(failed)))
retval = 1
except IOError:
print('%s: cannot open' % filename)
retval = 1
exit(retval)
| mit |
praekelt/ummeli | ummeli/base/migrations/0010_auto__add_field_skill_level.py | 1 | 11741 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Skill.level'
db.add_column('base_skill', 'level', self.gf('django.db.models.fields.PositiveIntegerField')(default=0), keep_default=False)
def backwards(self, orm):
# Deleting field 'Skill.level'
db.delete_column('base_skill', 'level')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'base.article': {
'Meta': {'object_name': 'Article'},
'date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 4, 3, 11, 20, 39, 972729)', 'blank': 'True'}),
'hash_key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'source': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'text': ('django.db.models.fields.TextField', [], {})
},
'base.category': {
'Meta': {'object_name': 'Category'},
'articles': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['base.Article']", 'null': 'True', 'blank': 'True'}),
'hash_key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'province': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['base.Province']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '45'}),
'user_submitted_job_articles': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['base.UserSubmittedJobArticle']", 'null': 'True', 'blank': 'True'})
},
'base.certificate': {
'Meta': {'object_name': 'Certificate'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'institution': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '45'}),
'year': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'})
},
'base.curriculumvitae': {
'Meta': {'object_name': 'CurriculumVitae'},
'certificates': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['base.Certificate']", 'symmetrical': 'False', 'blank': 'True'}),
'connection_requests': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'connection_requests'", 'blank': 'True', 'to': "orm['auth.User']"}),
'date_of_birth': ('django.db.models.fields.CharField', [], {'max_length': '45', 'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.CharField', [], {'max_length': '45', 'null': 'True', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '45', 'null': 'True', 'blank': 'True'}),
'gender': ('django.db.models.fields.CharField', [], {'max_length': '45', 'null': 'True', 'blank': 'True'}),
'highest_grade': ('django.db.models.fields.CharField', [], {'max_length': '45', 'null': 'True', 'blank': 'True'}),
'highest_grade_year': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'house_number': ('django.db.models.fields.CharField', [], {'max_length': '45', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_complete': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'languages': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['base.Language']", 'symmetrical': 'False', 'blank': 'True'}),
'location': ('django.db.models.fields.CharField', [], {'max_length': '45', 'null': 'True', 'blank': 'True'}),
'nr_of_faxes_sent': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'preferred_skill': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'profiles_preferred'", 'null': 'True', 'to': "orm['base.Skill']"}),
'references': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['base.Reference']", 'symmetrical': 'False', 'blank': 'True'}),
'school': ('django.db.models.fields.CharField', [], {'max_length': '45', 'null': 'True', 'blank': 'True'}),
'skills': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'profiles'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['base.Skill']"}),
'street_name': ('django.db.models.fields.CharField', [], {'max_length': '45', 'null': 'True', 'blank': 'True'}),
'surname': ('django.db.models.fields.CharField', [], {'max_length': '45', 'null': 'True', 'blank': 'True'}),
'telephone_number': ('django.db.models.fields.CharField', [], {'max_length': '45', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'}),
'work_experiences': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['base.WorkExperience']", 'symmetrical': 'False', 'blank': 'True'})
},
'base.language': {
'Meta': {'object_name': 'Language'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '45'}),
'read_write': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'base.province': {
'Meta': {'object_name': 'Province'},
'name': ('django.db.models.fields.CharField', [], {'max_length': '45'}),
'search_id': ('django.db.models.fields.IntegerField', [], {'primary_key': 'True'})
},
'base.reference': {
'Meta': {'object_name': 'Reference'},
'contact_no': ('django.db.models.fields.CharField', [], {'max_length': '45', 'null': 'True', 'blank': 'True'}),
'fullname': ('django.db.models.fields.CharField', [], {'max_length': '45'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'relationship': ('django.db.models.fields.CharField', [], {'max_length': '45', 'null': 'True', 'blank': 'True'})
},
'base.skill': {
'Meta': {'object_name': 'Skill'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'skill': ('django.db.models.fields.CharField', [], {'max_length': '45'})
},
'base.usersubmittedjobarticle': {
'Meta': {'object_name': 'UserSubmittedJobArticle'},
'date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'moderated': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'text': ('django.db.models.fields.TextField', [], {'default': "''"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'user_submitted_job_article_user'", 'to': "orm['auth.User']"})
},
'base.workexperience': {
'Meta': {'object_name': 'WorkExperience'},
'company': ('django.db.models.fields.CharField', [], {'max_length': '45', 'null': 'True', 'blank': 'True'}),
'end_year': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'start_year': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '45'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['base']
| bsd-3-clause |
okolisny/integration_tests | cfme/tests/infrastructure/test_host_drift_analysis.py | 1 | 4947 | # -*- coding: utf-8 -*-
import pytest
from cfme import test_requirements
from cfme.configure.tasks import Tasks
from cfme.fixtures import pytest_selenium as sel
from cfme.infrastructure import host as host_obj
from cfme.infrastructure.provider import InfraProvider
from cfme.web_ui import DriftGrid, toolbar as tb
from cfme.utils import error, testgen
from cfme.utils.appliance.implementations.ui import navigate_to
from cfme.utils.wait import wait_for
pytestmark = [
test_requirements.drift,
pytest.mark.tier(3)
]
def pytest_generate_tests(metafunc):
argnames, argvalues, idlist = testgen.providers_by_class(
metafunc, [InfraProvider], required_fields=['hosts'])
argnames += ['host']
new_idlist = []
new_argvalues = []
for i, argvalue_tuple in enumerate(argvalues):
args = dict(zip(argnames, argvalue_tuple))
for test_host in args['provider'].data['hosts']:
if not test_host.get('test_fleece', False):
continue
argvs = argvalues[i][:]
new_argvalues.append(argvs + [test_host])
test_id = '{}-{}'.format(args['provider'].key, test_host['type'])
new_idlist.append(test_id)
testgen.parametrize(metafunc, argnames, new_argvalues, ids=new_idlist, scope="module")
@pytest.mark.meta(blockers=[1242655])
def test_host_drift_analysis(request, setup_provider, provider, host, soft_assert):
"""Tests host drift analysis
Metadata:
test_flag: host_drift_analysis
"""
test_host = host_obj.Host(name=host['name'], provider=provider)
wait_for(lambda: test_host.exists, delay=20, num_sec=120, fail_func=sel.refresh,
message="hosts_exists")
# get drift history num
drift_num_orig = int(test_host.get_detail('Relationships', 'Drift History'))
# add credentials to host + finalizer to remove them
if not test_host.has_valid_credentials:
test_host.update(
updates={'credentials': host_obj.get_credentials_from_config(host['credentials'])},
validate_credentials=True
)
@request.addfinalizer
def test_host_remove_creds():
test_host.update(
updates={
'credentials': host_obj.Host.Credential(
principal="",
secret="",
verify_secret=""
)
}
)
# clear table
view = navigate_to(Tasks, 'AllOtherTasks')
view.delete.item_select('Delete All', handle_alert=True)
# initiate 1st analysis
test_host.run_smartstate_analysis()
# Wait for the task to finish
def is_host_analysis_finished():
""" Check if analysis is finished - if not, reload page
"""
finished = False
view = navigate_to(Tasks, 'AllOtherTasks')
host_analysis_row = view.tabs.allothertasks.table.row(
task_name="SmartState Analysis for '{}'".format(test_host.name))
if host_analysis_row.state.text == 'Finished':
finished = True
# select the row and delete the task
host_analysis_row[0].check()
view.delete.item_select('Delete', handle_alert=True)
else:
view.reload.click()
return finished
wait_for(is_host_analysis_finished, delay=5, timeout="8m")
# wait for for drift history num+1
wait_for(
lambda: int(test_host.get_detail('Relationships', 'Drift History')) == drift_num_orig + 1,
delay=20,
num_sec=120,
message="Waiting for Drift History count to increase",
fail_func=sel.refresh
)
# add a tag and a finalizer to remove it
test_host.add_tag(category='Department', tag='Accounting')
request.addfinalizer(lambda: test_host.remove_tag(category='Department', tag='Accounting'))
# initiate 2nd analysis
test_host.run_smartstate_analysis()
# Wait for the task to finish
wait_for(is_host_analysis_finished, delay=5, timeout="8m")
# wait for for drift history num+2
wait_for(
lambda: int(test_host.get_detail('Relationships', 'Drift History')) == drift_num_orig + 2,
delay=20,
num_sec=120,
message="Waiting for Drift History count to increase",
fail_func=sel.refresh
)
# check drift difference
soft_assert(not test_host.equal_drift_results('Department (1)', 'My Company Tags', 0, 1),
"Drift analysis results are equal when they shouldn't be")
# Test UI features that modify the drift grid
d_grid = DriftGrid()
# Accounting tag should not be displayed, because it was changed to True
tb.select("Attributes with same values")
with error.expected(sel.NoSuchElementException):
d_grid.get_cell('Accounting', 0)
# Accounting tag should be displayed now
tb.select("Attributes with different values")
d_grid.get_cell('Accounting', 0)
| gpl-2.0 |
BaconPancakes/valor | lib/websockets/client.py | 11 | 6563 | """
The :mod:`websockets.client` module defines a simple WebSocket client API.
"""
import asyncio
import collections.abc
import email.message
from .exceptions import InvalidHandshake
from .handshake import build_request, check_response
from .http import USER_AGENT, read_response
from .protocol import CONNECTING, OPEN, WebSocketCommonProtocol
from .uri import parse_uri
__all__ = ['connect', 'WebSocketClientProtocol']
class WebSocketClientProtocol(WebSocketCommonProtocol):
"""
Complete WebSocket client implementation as an :class:`asyncio.Protocol`.
This class inherits most of its methods from
:class:`~websockets.protocol.WebSocketCommonProtocol`.
"""
is_client = True
state = CONNECTING
@asyncio.coroutine
def handshake(self, wsuri,
origin=None, subprotocols=None, extra_headers=None):
"""
Perform the client side of the opening handshake.
If provided, ``origin`` sets the Origin HTTP header.
If provided, ``subprotocols`` is a list of supported subprotocols in
order of decreasing preference.
If provided, ``extra_headers`` sets additional HTTP request headers.
It must be a mapping or an iterable of (name, value) pairs.
"""
headers = []
set_header = lambda k, v: headers.append((k, v))
if wsuri.port == (443 if wsuri.secure else 80): # pragma: no cover
set_header('Host', wsuri.host)
else:
set_header('Host', '{}:{}'.format(wsuri.host, wsuri.port))
if origin is not None:
set_header('Origin', origin)
if subprotocols is not None:
set_header('Sec-WebSocket-Protocol', ', '.join(subprotocols))
if extra_headers is not None:
if isinstance(extra_headers, collections.abc.Mapping):
extra_headers = extra_headers.items()
for name, value in extra_headers:
set_header(name, value)
set_header('User-Agent', USER_AGENT)
key = build_request(set_header)
self.request_headers = email.message.Message()
for name, value in headers:
self.request_headers[name] = value
self.raw_request_headers = headers
# Send handshake request. Since the URI and the headers only contain
# ASCII characters, we can keep this simple.
request = ['GET %s HTTP/1.1' % wsuri.resource_name]
request.extend('{}: {}'.format(k, v) for k, v in headers)
request.append('\r\n')
request = '\r\n'.join(request).encode()
self.writer.write(request)
# Read handshake response.
try:
status_code, headers = yield from read_response(self.reader)
except ValueError as exc:
raise InvalidHandshake("Malformed HTTP message") from exc
if status_code != 101:
raise InvalidHandshake("Bad status code: {}".format(status_code))
self.response_headers = headers
self.raw_response_headers = list(headers.raw_items())
get_header = lambda k: headers.get(k, '')
check_response(get_header, key)
self.subprotocol = headers.get('Sec-WebSocket-Protocol', None)
if (self.subprotocol is not None and
self.subprotocol not in subprotocols):
raise InvalidHandshake(
"Unknown subprotocol: {}".format(self.subprotocol))
assert self.state == CONNECTING
self.state = OPEN
self.opening_handshake.set_result(True)
@asyncio.coroutine
def connect(uri, *,
klass=WebSocketClientProtocol,
timeout=10, max_size=2 ** 20, max_queue=2 ** 5,
loop=None, legacy_recv=False,
origin=None, subprotocols=None, extra_headers=None,
**kwds):
"""
This coroutine connects to a WebSocket server at a given ``uri``.
It yields a :class:`WebSocketClientProtocol` which can then be used to
send and receive messages.
:func:`connect` is a wrapper around the event loop's
:meth:`~asyncio.BaseEventLoop.create_connection` method. Extra keyword
arguments are passed to :meth:`~asyncio.BaseEventLoop.create_connection`.
For example, you can set the ``ssl`` keyword argument to a
:class:`~ssl.SSLContext` to enforce some TLS settings. When connecting to
a ``wss://`` URI, if this argument isn't provided explicitly, it's set to
``True``, which means Python's default :class:`~ssl.SSLContext` is used.
The behavior of the ``timeout``, ``max_size``, and ``max_queue`` optional
arguments is described the documentation of
:class:`~websockets.protocol.WebSocketCommonProtocol`.
:func:`connect` also accepts the following optional arguments:
* ``origin`` sets the Origin HTTP header
* ``subprotocols`` is a list of supported subprotocols in order of
decreasing preference
* ``extra_headers`` sets additional HTTP request headers – it can be a
mapping or an iterable of (name, value) pairs
:func:`connect` raises :exc:`~websockets.uri.InvalidURI` if ``uri`` is
invalid and :exc:`~websockets.handshake.InvalidHandshake` if the opening
handshake fails.
On Python 3.5, :func:`connect` can be used as a asynchronous context
manager. In that case, the connection is closed when exiting the context.
"""
if loop is None:
loop = asyncio.get_event_loop()
wsuri = parse_uri(uri)
if wsuri.secure:
kwds.setdefault('ssl', True)
elif 'ssl' in kwds:
raise ValueError("connect() received a SSL context for a ws:// URI. "
"Use a wss:// URI to enable TLS.")
factory = lambda: klass(
host=wsuri.host, port=wsuri.port, secure=wsuri.secure,
timeout=timeout, max_size=max_size, max_queue=max_queue,
loop=loop, legacy_recv=legacy_recv,
)
transport, protocol = yield from loop.create_connection(
factory, wsuri.host, wsuri.port, **kwds)
try:
yield from protocol.handshake(
wsuri, origin=origin, subprotocols=subprotocols,
extra_headers=extra_headers)
except Exception:
yield from protocol.close_connection(force=True)
raise
return protocol
try:
from .py35.client import Connect
except (SyntaxError, ImportError): # pragma: no cover
pass
else:
Connect.__wrapped__ = connect
# Copy over docstring to support building documentation on Python 3.5.
Connect.__doc__ = connect.__doc__
connect = Connect
| gpl-3.0 |
gumyns/home-automation | hardware_old/arm/toolchainGenerator.py | 1 | 2209 | #!/usr/bin/python
import os
import sys
import subprocess
devices = ['STM32F030x6']
template = """set(ENV{{PATH}} {gcc_dir})
set(GCC_ARM_DIRECTORY {gcc_dir})
add_definitions(-D{device})
set(CMAKE_SYSTEM_NAME Generic)
set(CMAKE_SYSTEM_PROCESSOR arm)
set(CMAKE_C_COMPILER_WORKS 1)
set(CMAKE_CXX_COMPILER_WORKS 1)
set(CMAKE_C_COMPILER arm-none-eabi-gcc)
set(CMAKE_CXX_COMPILER arm-none-eabi-g++)
set(CMAKE_ASM_COMPILER arm-none-eabi-as)
set(CMAKE_AR arm-none-eabi-ar)
set(CMAKE_RANLIB arm-none-eabi-ranlib)
set(CMAKE_LINKER arm-none-eabi-ld)
set(COMMON_FLAGS " -Wall -Wextra -nostartfiles -mcpu=cortex-m0 -mthumb -mthumb-interwork -mfloat-abi=soft -ffunction-sections -fdata-sections -g -fno-common -fmessage-length=0 --specs=nano.specs -Os -fsigned-char -ffreestanding -fomit-frame-pointer -mabi=aapcs -fno-unroll-loops -ffast-math -ftree-vectorize -fno-builtin -MMD -MP")
set(CMAKE_CXX_FLAGS_INIT "${{COMMON_FLAGS}} -std=gnu++14")
set(CMAKE_C_FLAGS_INIT "${{COMMON_FLAGS}} -std=gnu99")
set(CMAKE_ASM_FLAGS_INIT -nostdinc)
set(CMAKE_EXE_LINKER_FLAGS_INIT "-Xlinker --gc-sections -Wl,-Map,${{PROJECT_NAME}}.map,-o${{PROJECT_NAME}}.elf")
set(CMAKE_C_FLAGS "${{COMMON_FLAGS}} -std=gnu99" CACHE INTERNAL "c compiler flags")
set(CMAKE_CXX_FLAGS "${{COMMON_FLAGS}} -std=gnu++14 -fabi-version=0 -fno-exceptions -fno-rtti -fno-use-cxa-atexit -fno-threadsafe-statics" CACHE INTERNAL "cxx compiler flags")
set(CMAKE_ASM_FLAGS "${{COMMON_FLAGS}} -x assembler-with-cpp " CACHE INTERNAL "asm compiler flags")
"""
findCMD = 'find ~/opt -name "arm-none-eabi-gcc" | head -1 | rev | cut -c 18- | rev'
out = subprocess.Popen(findCMD, shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(stdout, stderr) = out.communicate()
files = stdout.decode().split()
if len(files) is 1:
directory = files[0].strip()
else:
sys.exit("GCC for arm not found...")
srcDir = os.path.join(os.path.dirname(os.path.abspath(str(sys.argv[0]))), "toolchains")
if not os.path.isdir(srcDir):
subprocess.call(["mkdir", srcDir])
for dev in devices:
with open(os.path.join(srcDir, "{0}.cmake".format(dev)), 'w') as f:
f.write(template.format(gcc_dir=directory, device=dev))
f.close()
| apache-2.0 |
CharlieGreenman/pixelatorV2_with_react | node_modules/gulp-sass/node_modules/node-sass/node_modules/node-gyp/gyp/pylib/gyp/easy_xml_test.py | 2698 | 3270 | #!/usr/bin/env python
# Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
""" Unit tests for the easy_xml.py file. """
import gyp.easy_xml as easy_xml
import unittest
import StringIO
class TestSequenceFunctions(unittest.TestCase):
def setUp(self):
self.stderr = StringIO.StringIO()
def test_EasyXml_simple(self):
self.assertEqual(
easy_xml.XmlToString(['test']),
'<?xml version="1.0" encoding="utf-8"?><test/>')
self.assertEqual(
easy_xml.XmlToString(['test'], encoding='Windows-1252'),
'<?xml version="1.0" encoding="Windows-1252"?><test/>')
def test_EasyXml_simple_with_attributes(self):
self.assertEqual(
easy_xml.XmlToString(['test2', {'a': 'value1', 'b': 'value2'}]),
'<?xml version="1.0" encoding="utf-8"?><test2 a="value1" b="value2"/>')
def test_EasyXml_escaping(self):
original = '<test>\'"\r&\nfoo'
converted = '<test>\'"
&
foo'
converted_apos = converted.replace("'", ''')
self.assertEqual(
easy_xml.XmlToString(['test3', {'a': original}, original]),
'<?xml version="1.0" encoding="utf-8"?><test3 a="%s">%s</test3>' %
(converted, converted_apos))
def test_EasyXml_pretty(self):
self.assertEqual(
easy_xml.XmlToString(
['test3',
['GrandParent',
['Parent1',
['Child']
],
['Parent2']
]
],
pretty=True),
'<?xml version="1.0" encoding="utf-8"?>\n'
'<test3>\n'
' <GrandParent>\n'
' <Parent1>\n'
' <Child/>\n'
' </Parent1>\n'
' <Parent2/>\n'
' </GrandParent>\n'
'</test3>\n')
def test_EasyXml_complex(self):
# We want to create:
target = (
'<?xml version="1.0" encoding="utf-8"?>'
'<Project>'
'<PropertyGroup Label="Globals">'
'<ProjectGuid>{D2250C20-3A94-4FB9-AF73-11BC5B73884B}</ProjectGuid>'
'<Keyword>Win32Proj</Keyword>'
'<RootNamespace>automated_ui_tests</RootNamespace>'
'</PropertyGroup>'
'<Import Project="$(VCTargetsPath)\\Microsoft.Cpp.props"/>'
'<PropertyGroup '
'Condition="\'$(Configuration)|$(Platform)\'=='
'\'Debug|Win32\'" Label="Configuration">'
'<ConfigurationType>Application</ConfigurationType>'
'<CharacterSet>Unicode</CharacterSet>'
'</PropertyGroup>'
'</Project>')
xml = easy_xml.XmlToString(
['Project',
['PropertyGroup', {'Label': 'Globals'},
['ProjectGuid', '{D2250C20-3A94-4FB9-AF73-11BC5B73884B}'],
['Keyword', 'Win32Proj'],
['RootNamespace', 'automated_ui_tests']
],
['Import', {'Project': '$(VCTargetsPath)\\Microsoft.Cpp.props'}],
['PropertyGroup',
{'Condition': "'$(Configuration)|$(Platform)'=='Debug|Win32'",
'Label': 'Configuration'},
['ConfigurationType', 'Application'],
['CharacterSet', 'Unicode']
]
])
self.assertEqual(xml, target)
if __name__ == '__main__':
unittest.main()
| mit |
ActionAdam/osmc | package/mediacenter-addon-osmc/src/script.module.osmcsetting.apfstore/resources/lib/apf_gui.py | 9 | 4895 |
# KODI modules
import xbmc
import xbmcaddon
import xbmcgui
# Standard modules
import sys
import os
import socket
import json
addonid = "script.module.osmcsetting.apfstore"
__addon__ = xbmcaddon.Addon(addonid)
__path__ = xbmc.translatePath(xbmcaddon.Addon(addonid).getAddonInfo('path'))
# Custom module path
sys.path.append(os.path.join(__path__, 'resources','lib'))
# OSMC SETTING Modules
from CompLogger import comprehensive_logger as clog
ADDONART = os.path.join(__path__, 'resources','skins', 'Default', 'media')
USERART = os.path.join(xbmc.translatePath('special://userdata/'),'addon_data ', addonid)
def log(message):
xbmc.log('OSMC APFStore gui : ' + str(message), level=xbmc.LOGDEBUG)
def lang(id):
san = __addon__.getLocalizedString(id).encode( 'utf-8', 'ignore' )
return san
class apf_GUI(xbmcgui.WindowXMLDialog):
def __init__(self, strXMLname, strFallbackPath, strDefaultName, apf_dict):
self.apf_dict = apf_dict
self.apf_order_list = []
self.action_dict = {}
def onInit(self):
self.list = self.getControl(500)
self.list.setVisible(True)
for x, y in self.apf_dict.iteritems():
# self.current_icon = '/home/kubkev/.kodi/addons/script.module.osmcsetting.apfstore/resources/skins/Default/media/osmc_logo.png'
self.list.addItem(y)
self.apf_order_list.append(x)
try:
self.getControl(50).setVisible(False)
except:
pass
self.check_action_dict()
@clog(logger=log)
def check_action_dict(self):
install = 0
removal = 0
for x, y in self.action_dict.iteritems():
if y == 'Install':
install += 1
elif y == 'Uninstall':
removal += 1
if not install and not removal:
self.getControl(6).setVisible(False)
self.getControl(61).setVisible(False)
self.getControl(62).setVisible(False)
return
if install:
self.getControl(61).setLabel(lang(32001) % install)
self.getControl(6).setVisible(True)
self.getControl(61).setVisible(True)
else:
self.getControl(61).setVisible(False)
if removal:
self.getControl(62).setLabel(lang(32002) % removal)
self.getControl(6).setVisible(True)
self.getControl(62).setVisible(True)
else:
self.getControl(62).setVisible(False)
@clog(logger=log)
def onClick(self, controlID):
if controlID == 500:
container = self.getControl(500)
sel_pos = container.getSelectedPosition()
sel_item = self.apf_dict[self.apf_order_list[sel_pos]]
xml = "APFAddonInfo_720OSMC.xml" if xbmcgui.Window(10000).getProperty("SkinHeight") == '720' else "APFAddonInfo_OSMC.xml"
self.addon_gui = addon_info_gui(xml, __path__, 'Default', sel_item=sel_item)
self.addon_gui.doModal()
ending_action = self.addon_gui.action
if ending_action == 'Install':
self.action_dict[sel_item.id] = 'Install'
elif ending_action == 'Uninstall':
self.action_dict[sel_item.id] = 'Uninstall'
elif sel_item.id in self.action_dict:
del self.action_dict[sel_item.id]
self.check_action_dict()
del self.addon_gui
log(self.action_dict)
elif controlID == 7:
self.close()
elif controlID == 6:
# send install and removal list to Update Service
action_list = ['install_' + k if v == 'Install' else 'removal_' + k for k, v in self.action_dict.iteritems()]
action_string = '|=|'.join(action_list)
self.contact_update_service(action_string)
self.close()
@clog(logger=log)
def contact_update_service(self, action_string):
address = '/var/tmp/osmc.settings.update.sockfile'
message = ('action_list', {'action': action_string})
message = json.dumps(message)
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.connect(address)
sock.sendall(message)
sock.close()
class addon_info_gui(xbmcgui.WindowXMLDialog):
'''
Controls
==============================
50001 Shortdesc
50002 Longdesc
50003 Version
50004 Maintainer
50005 LastUpdated
50006 Icon
50007 Name
'''
def __init__(self, strXMLname, strFallbackPath, strDefaultName, sel_item):
self.action = False
self.sel_item = sel_item
def onInit(self):
self.getControl(50001).setLabel(self.sel_item.shortdesc)
self.getControl(50002).setText(self.sel_item.longdesc)
self.getControl(50003).setLabel(self.sel_item.version)
self.getControl(50004).setLabel(self.sel_item.maintainedby)
self.getControl(50005).setLabel(self.sel_item.lastupdated)
self.getControl(50006).setImage(self.sel_item.current_icon, True)
self.getControl(50007).setLabel(self.sel_item.name)
if self.sel_item.installed:
self.getControl(6).setLabel(lang(32004))
else:
self.getControl(6).setLabel(lang(32003))
def onClick(self, controlID):
if controlID == 6:
lbl = self.getControl(6).getLabel()
if lbl == lang(32003):
self.action = 'Install'
else:
self.action = 'Uninstall'
self.close()
elif controlID == 7:
self.close()
| gpl-2.0 |
Dhivyap/ansible | lib/ansible/modules/cloud/google/gcp_filestore_instance_info.py | 3 | 7713 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017 Google
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# ----------------------------------------------------------------------------
#
# *** AUTO GENERATED CODE *** AUTO GENERATED CODE ***
#
# ----------------------------------------------------------------------------
#
# This file is automatically generated by Magic Modules and manual
# changes will be clobbered when the file is regenerated.
#
# Please read more about how to change this file at
# https://www.github.com/GoogleCloudPlatform/magic-modules
#
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
__metaclass__ = type
################################################################################
# Documentation
################################################################################
ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'}
DOCUMENTATION = '''
---
module: gcp_filestore_instance_info
description:
- Gather info for GCP Instance
short_description: Gather info for GCP Instance
version_added: '2.9'
author: Google Inc. (@googlecloudplatform)
requirements:
- python >= 2.6
- requests >= 2.18.4
- google-auth >= 1.3.0
options:
zone:
description:
- The name of the Filestore zone of the instance.
required: true
type: str
project:
description:
- The Google Cloud Platform project to use.
type: str
auth_kind:
description:
- The type of credential used.
type: str
required: true
choices:
- application
- machineaccount
- serviceaccount
service_account_contents:
description:
- The contents of a Service Account JSON file, either in a dictionary or as a
JSON string that represents it.
type: jsonarg
service_account_file:
description:
- The path of a Service Account JSON file if serviceaccount is selected as type.
type: path
service_account_email:
description:
- An optional service account email address if machineaccount is selected and
the user does not wish to use the default email.
type: str
scopes:
description:
- Array of scopes to be used
type: list
env_type:
description:
- Specifies which Ansible environment you're running this module within.
- This should not be set unless you know what you're doing.
- This only alters the User Agent string for any API requests.
type: str
notes:
- for authentication, you can set service_account_file using the c(gcp_service_account_file)
env variable.
- for authentication, you can set service_account_contents using the c(GCP_SERVICE_ACCOUNT_CONTENTS)
env variable.
- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL)
env variable.
- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable.
- For authentication, you can set scopes using the C(GCP_SCOPES) env variable.
- Environment variables values will only be used if the playbook values are not set.
- The I(service_account_email) and I(service_account_file) options are mutually exclusive.
'''
EXAMPLES = '''
- name: get info on an instance
gcp_filestore_instance_info:
zone: us-central1-b
project: test_project
auth_kind: serviceaccount
service_account_file: "/tmp/auth.pem"
'''
RETURN = '''
resources:
description: List of resources
returned: always
type: complex
contains:
name:
description:
- The resource name of the instance.
returned: success
type: str
description:
description:
- A description of the instance.
returned: success
type: str
createTime:
description:
- Creation timestamp in RFC3339 text format.
returned: success
type: str
tier:
description:
- The service tier of the instance.
returned: success
type: str
labels:
description:
- Resource labels to represent user-provided metadata.
returned: success
type: dict
fileShares:
description:
- File system shares on the instance. For this version, only a single file share
is supported.
returned: success
type: complex
contains:
name:
description:
- The name of the fileshare (16 characters or less) .
returned: success
type: str
capacityGb:
description:
- File share capacity in GiB. This must be at least 1024 GiB for the standard
tier, or 2560 GiB for the premium tier.
returned: success
type: int
networks:
description:
- VPC networks to which the instance is connected. For this version, only a
single network is supported.
returned: success
type: complex
contains:
network:
description:
- The name of the GCE VPC network to which the instance is connected.
returned: success
type: str
modes:
description:
- IP versions for which the instance has IP addresses assigned.
returned: success
type: list
reservedIpRange:
description:
- A /29 CIDR block that identifies the range of IP addresses reserved for
this instance.
returned: success
type: str
ipAddresses:
description:
- A list of IPv4 or IPv6 addresses.
returned: success
type: list
etag:
description:
- Server-specified ETag for the instance resource to prevent simultaneous updates
from overwriting each other.
returned: success
type: str
zone:
description:
- The name of the Filestore zone of the instance.
returned: success
type: str
'''
################################################################################
# Imports
################################################################################
from ansible.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest
import json
################################################################################
# Main
################################################################################
def main():
module = GcpModule(argument_spec=dict(zone=dict(required=True, type='str')))
if not module.params['scopes']:
module.params['scopes'] = ['https://www.googleapis.com/auth/cloud-platform']
return_value = {'resources': fetch_list(module, collection(module))}
module.exit_json(**return_value)
def collection(module):
return "https://file.googleapis.com/v1/projects/{project}/locations/{zone}/instances".format(**module.params)
def fetch_list(module, link):
auth = GcpSession(module, 'filestore')
return auth.list(link, return_if_object, array_name='instances')
def return_if_object(module, response):
# If not found, return nothing.
if response.status_code == 404:
return None
# If no content, return nothing.
if response.status_code == 204:
return None
try:
module.raise_for_status(response)
result = response.json()
except getattr(json.decoder, 'JSONDecodeError', ValueError) as inst:
module.fail_json(msg="Invalid JSON response with error: %s" % inst)
if navigate_hash(result, ['error', 'errors']):
module.fail_json(msg=navigate_hash(result, ['error', 'errors']))
return result
if __name__ == "__main__":
main()
| gpl-3.0 |
scenarios/tensorflow | tensorflow/python/kernel_tests/ctc_loss_op_test.py | 10 | 10795 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ctc_ops.ctc_decoder_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import ctc_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.platform import test
def SimpleSparseTensorFrom(x):
"""Create a very simple SparseTensor with dimensions (batch, time).
Args:
x: a list of lists of type int
Returns:
x_ix and x_val, the indices and values of the SparseTensor<2>.
"""
x_ix = []
x_val = []
for batch_i, batch in enumerate(x):
for time, val in enumerate(batch):
x_ix.append([batch_i, time])
x_val.append(val)
x_shape = [len(x), np.asarray(x_ix).max(0)[1] + 1]
x_ix = constant_op.constant(x_ix, dtypes.int64)
x_val = constant_op.constant(x_val, dtypes.int32)
x_shape = constant_op.constant(x_shape, dtypes.int64)
return sparse_tensor.SparseTensor(x_ix, x_val, x_shape)
class CTCLossTest(test.TestCase):
def _testCTCLoss(self,
inputs,
seq_lens,
labels,
loss_truth,
grad_truth,
expected_err_re=None):
self.assertEquals(len(inputs), len(grad_truth))
inputs_t = constant_op.constant(inputs)
with self.test_session(use_gpu=False) as sess:
loss = ctc_ops.ctc_loss(
inputs=inputs_t, labels=labels, sequence_length=seq_lens)
grad = gradients_impl.gradients(loss, [inputs_t])[0]
self.assertShapeEqual(loss_truth, loss)
self.assertShapeEqual(grad_truth, grad)
if expected_err_re is None:
(tf_loss, tf_grad) = sess.run([loss, grad])
self.assertAllClose(tf_loss, loss_truth, atol=1e-6)
self.assertAllClose(tf_grad, grad_truth, atol=1e-6)
else:
with self.assertRaisesOpError(expected_err_re):
sess.run([loss, grad])
def testBasic(self):
"""Test two batch entries."""
# Input and ground truth from Alex Graves' implementation.
#
#### Batch entry 0 #####
# targets: 0 1 2 1 0
# outputs:
# 0 0.633766 0.221185 0.0917319 0.0129757 0.0142857 0.0260553
# 1 0.111121 0.588392 0.278779 0.0055756 0.00569609 0.010436
# 2 0.0357786 0.633813 0.321418 0.00249248 0.00272882 0.0037688
# 3 0.0663296 0.643849 0.280111 0.00283995 0.0035545 0.00331533
# 4 0.458235 0.396634 0.123377 0.00648837 0.00903441 0.00623107
# alpha:
# 0 -3.64753 -0.456075 -inf -inf -inf -inf -inf -inf -inf -inf -inf
# 1 -inf -inf -inf -0.986437 -inf -inf -inf -inf -inf -inf -inf
# 2 -inf -inf -inf -inf -inf -2.12145 -inf -inf -inf -inf -inf
# 3 -inf -inf -inf -inf -inf -inf -inf -2.56174 -inf -inf -inf
# 4 -inf -inf -inf -inf -inf -inf -inf -inf -inf -3.34211 -inf
# beta:
# 0 -inf -2.88604 -inf -inf -inf -inf -inf -inf -inf -inf -inf
# 1 -inf -inf -inf -2.35568 -inf -inf -inf -inf -inf -inf -inf
# 2 -inf -inf -inf -inf -inf -1.22066 -inf -inf -inf -inf -inf
# 3 -inf -inf -inf -inf -inf -inf -inf -0.780373 -inf -inf -inf
# 4 -inf -inf -inf -inf -inf -inf -inf -inf -inf 0 0
# prob: -3.34211
# outputDerivs:
# 0 -0.366234 0.221185 0.0917319 0.0129757 0.0142857 0.0260553
# 1 0.111121 -0.411608 0.278779 0.0055756 0.00569609 0.010436
# 2 0.0357786 0.633813 -0.678582 0.00249248 0.00272882 0.0037688
# 3 0.0663296 -0.356151 0.280111 0.00283995 0.0035545 0.00331533
# 4 -0.541765 0.396634 0.123377 0.00648837 0.00903441 0.00623107
#
#### Batch entry 1 #####
#
# targets: 0 1 1 0
# outputs:
# 0 0.30176 0.28562 0.0831517 0.0862751 0.0816851 0.161508
# 1 0.24082 0.397533 0.0557226 0.0546814 0.0557528 0.19549
# 2 0.230246 0.450868 0.0389607 0.038309 0.0391602 0.202456
# 3 0.280884 0.429522 0.0326593 0.0339046 0.0326856 0.190345
# 4 0.423286 0.315517 0.0338439 0.0393744 0.0339315 0.154046
# alpha:
# 0 -1.8232 -1.19812 -inf -inf -inf -inf -inf -inf -inf
# 1 -inf -2.19315 -2.83037 -2.1206 -inf -inf -inf -inf -inf
# 2 -inf -inf -inf -2.03268 -3.71783 -inf -inf -inf -inf
# 3 -inf -inf -inf -inf -inf -4.56292 -inf -inf -inf
# 4 -inf -inf -inf -inf -inf -inf -inf -5.42262 -inf
# beta:
# 0 -inf -4.2245 -inf -inf -inf -inf -inf -inf -inf
# 1 -inf -inf -inf -3.30202 -inf -inf -inf -inf -inf
# 2 -inf -inf -inf -inf -1.70479 -0.856738 -inf -inf -inf
# 3 -inf -inf -inf -inf -inf -0.859706 -0.859706 -0.549337 -inf
# 4 -inf -inf -inf -inf -inf -inf -inf 0 0
# prob: -5.42262
# outputDerivs:
# 0 -0.69824 0.28562 0.0831517 0.0862751 0.0816851 0.161508
# 1 0.24082 -0.602467 0.0557226 0.0546814 0.0557528 0.19549
# 2 0.230246 0.450868 0.0389607 0.038309 0.0391602 -0.797544
# 3 0.280884 -0.570478 0.0326593 0.0339046 0.0326856 0.190345
# 4 -0.576714 0.315517 0.0338439 0.0393744 0.0339315 0.154046
# max_time_steps == 7
depth = 6
# seq_len_0 == 5
targets_0 = [0, 1, 2, 1, 0]
loss_log_prob_0 = -3.34211
# dimensions are time x depth
input_prob_matrix_0 = np.asarray(
[[0.633766, 0.221185, 0.0917319, 0.0129757, 0.0142857, 0.0260553],
[0.111121, 0.588392, 0.278779, 0.0055756, 0.00569609, 0.010436],
[0.0357786, 0.633813, 0.321418, 0.00249248, 0.00272882, 0.0037688],
[0.0663296, 0.643849, 0.280111, 0.00283995, 0.0035545, 0.00331533],
[0.458235, 0.396634, 0.123377, 0.00648837, 0.00903441, 0.00623107]],
dtype=np.float32)
input_log_prob_matrix_0 = np.log(input_prob_matrix_0)
gradient_log_prob_0 = np.asarray(
[[-0.366234, 0.221185, 0.0917319, 0.0129757, 0.0142857, 0.0260553],
[0.111121, -0.411608, 0.278779, 0.0055756, 0.00569609, 0.010436],
[0.0357786, 0.633813, -0.678582, 0.00249248, 0.00272882, 0.0037688],
[0.0663296, -0.356151, 0.280111, 0.00283995, 0.0035545, 0.00331533],
[-0.541765, 0.396634, 0.123377, 0.00648837, 0.00903441, 0.00623107]],
dtype=np.float32)
# seq_len_1 == 5
targets_1 = [0, 1, 1, 0]
loss_log_prob_1 = -5.42262
# dimensions are time x depth
input_prob_matrix_1 = np.asarray(
[[0.30176, 0.28562, 0.0831517, 0.0862751, 0.0816851, 0.161508],
[0.24082, 0.397533, 0.0557226, 0.0546814, 0.0557528, 0.19549],
[0.230246, 0.450868, 0.0389607, 0.038309, 0.0391602, 0.202456],
[0.280884, 0.429522, 0.0326593, 0.0339046, 0.0326856, 0.190345],
[0.423286, 0.315517, 0.0338439, 0.0393744, 0.0339315, 0.154046]],
dtype=np.float32)
input_log_prob_matrix_1 = np.log(input_prob_matrix_1)
gradient_log_prob_1 = np.asarray(
[[-0.69824, 0.28562, 0.0831517, 0.0862751, 0.0816851, 0.161508],
[0.24082, -0.602467, 0.0557226, 0.0546814, 0.0557528, 0.19549],
[0.230246, 0.450868, 0.0389607, 0.038309, 0.0391602, -0.797544],
[0.280884, -0.570478, 0.0326593, 0.0339046, 0.0326856, 0.190345],
[-0.576714, 0.315517, 0.0338439, 0.0393744, 0.0339315, 0.154046]],
dtype=np.float32)
# len max_time_steps array of 2 x depth matrices
inputs = [
np.vstack(
[input_log_prob_matrix_0[t, :], input_log_prob_matrix_1[t, :]])
for t in range(5)
] + 2 * [np.nan * np.ones((2, depth), np.float32)]
# convert inputs into [max_time x batch_size x depth tensor] Tensor
inputs = np.asarray(inputs, dtype=np.float32)
# len batch_size array of label vectors
labels = SimpleSparseTensorFrom([targets_0, targets_1])
# batch_size length vector of sequence_lengths
seq_lens = np.array([5, 5], dtype=np.int32)
# output: batch_size length vector of negative log probabilities
loss_truth = np.array([-loss_log_prob_0, -loss_log_prob_1], np.float32)
# output: len max_time_steps array of 2 x depth matrices
grad_truth = [
np.vstack([gradient_log_prob_0[t, :], gradient_log_prob_1[t, :]])
for t in range(5)
] + 2 * [np.zeros((2, depth), np.float32)]
# convert grad_truth into [max_time x batch_size x depth] Tensor
grad_truth = np.asarray(grad_truth, dtype=np.float32)
self._testCTCLoss(inputs, seq_lens, labels, loss_truth, grad_truth)
def test_time_major(self):
"""Testing time_major param.
testing if transposing and setting time_major=False will result in the same
loss
"""
# [max_time x batch_size x depth tensor]
inputs = np.random.randn(2, 2, 3).astype(np.float32)
labels = SimpleSparseTensorFrom([[0, 1], [1, 0]])
seq_lens = np.array([2, 2], dtype=np.int32)
inputs_t = constant_op.constant(inputs)
# Transposing tensor to [batch_size x max_time x depth tensor]
inputs_t_transposed = constant_op.constant(inputs.transpose(1, 0, 2))
with self.test_session(use_gpu=False) as sess:
loss = ctc_ops.ctc_loss(
inputs=inputs_t, labels=labels, sequence_length=seq_lens)
loss_transposed = ctc_ops.ctc_loss(
inputs=inputs_t_transposed,
labels=labels,
sequence_length=seq_lens,
time_major=False)
(tf_loss, tf_loss_transposed) = sess.run([loss, loss_transposed])
self.assertAllEqual(tf_loss, tf_loss_transposed)
def testInvalidSecondGradient(self):
inputs = np.random.randn(2, 2, 3).astype(np.float32)
inputs_t = constant_op.constant(inputs)
labels = SimpleSparseTensorFrom([[0, 1], [1, 0]])
seq_lens = np.array([2, 2], dtype=np.int32)
v = [1.0]
with self.test_session(use_gpu=False):
loss = ctc_ops.ctc_loss(
inputs=inputs_t, labels=labels, sequence_length=seq_lens)
# Taking ths second gradient should fail, since it is not
# yet supported.
with self.assertRaisesRegexp(LookupError,
".*No gradient defined.*PreventGradient.*"):
_ = gradients_impl._hessian_vector_product(loss, [inputs_t], v)
if __name__ == "__main__":
test.main()
| apache-2.0 |
mateuszmalinowski/visual_turing_test-tutorial | kraino/utils/print_metrics.py | 1 | 5116 | #!/usr/bin/env python
from __future__ import print_function
"""
Selects and prints metrics.
Author: Mateusz Malinowski
Email: mmalinow@mpi-inf.mpg.de
"""
import os
from uuid import uuid4
from compute_wups import get_metric_score as wups_score
from compute_wups import get_class_metric_score as class_wups_score
from data_provider import vqa_save_results as vqa_store
from vqaEvaluation.vqaClassNormalizedEval import VQAClassNormalizedEval as VQAEval
def average_over_dictionary(mydict):
"""
Average over dictionary values.
"""
ave = sum([x for x in mydict.values()])/len(mydict)
return ave
def show_wups(gt_list, pred_list, verbose, extra_vars):
"""
In:
gt_list - ground truth list
pred_list - list of predictions
verbose - if greater than 0 the metric measures are printed out
extra_vars - not used here
Out:
list of key, value pairs (dict) such that
'value' denotes the performance number
and 'name' denotes the name of the metric
"""
acc = wups_score(gt_list, pred_list, -1) * 100.0
wups_at_09 = wups_score(gt_list, pred_list, 0.9) * 100.0
#wups_at_0 = wups_score(gt_list, pred_list, 0.0) * 100.0
wups_at_0 = -1.0
per_class_acc_tmp = class_wups_score(gt_list, pred_list, -1)
#per_class_wups_at_09_tmp = class_wups_score(gt_list, pred_list, 0.9)
per_class_wups_at_09_tmp = None
per_class_acc = {k:v*100.0 for k,v in per_class_acc_tmp.items()}
if per_class_wups_at_09_tmp is not None:
per_class_wups_at_09 = {k:v*100.0 for k,v in per_class_wups_at_09_tmp.items()}
else:
per_class_wups_at_09 = None
class_acc = average_over_dictionary(per_class_acc_tmp)*100.0
if per_class_wups_at_09_tmp is not None:
class_wups_at_09 = average_over_dictionary(per_class_wups_at_09_tmp)*100.0
else:
class_wups_at_09 = -1.0
class_wups_at_0 = -1.0
if verbose > 0:
print('METRIC: Accuracy is {0}, wups at 0.9 is {1}, wups at 0.0 is {2}'.format(
acc, wups_at_09, wups_at_0))
print('CLASS METRIC: Accuracy is {0}, wups at 0.9 is {1}, wups at 0.0 is {2}'.format(
class_acc, class_wups_at_09, class_wups_at_0))
return [{'value':acc, 'name':'accuracy'},
{'value':wups_at_09, 'name':'wups at 0.9'},
{'value':wups_at_0, 'name':'wups at 0.0'},
{'value':per_class_acc, 'name':'per class accuracy',
'idiosyncrasy':'long:muted'},
{'value':per_class_wups_at_09, 'name':'per class wups at 0.9',
'idiosyncrasy':'long:muted'},
{'value':class_acc, 'name':'class accuracy'},
{'value':class_wups_at_09, 'name':'class wups at 0.9'},
{'value':class_wups_at_0, 'name':'class wups at 0'},]
def show_vqa(gt_list, pred_list, verbose, extra_vars):
#question_id, vqa_object,
#dataset_root=None):
"""
In:
gt_list - ground truth list
pred_list - list of predictions
verbose - if greater than 0 the metric measures are printed out
extra_vars - extra variables, here are:
extra_vars['vqa'] - the vqa object
extra_vars['resfun'] - function from the results file to the vqa object
extra_vars['question_id'] - list of the question ids
Out:
list of key, value pairs (dict) such that
'value' denotes the performance number
and 'name' denotes the name of the metric
"""
# TODO: quite hacky way of creating and next reading the file
if verbose > 0:
print('dumping json file ...')
vqa_object = extra_vars['vqa_object']
results_path = '/tmp/vqa_metric_{0}.json'.format(uuid4())
#print(results_path)
vqa_store(extra_vars['question_id'], pred_list, results_path)
vqa_res = extra_vars['resfun'](results_path)
os.remove(results_path)
if verbose > 0:
print('dumping finished')
###
vqaEval = VQAEval(vqa_object, vqa_res, n=2)
vqaEval.evaluate()
acc_overall = vqaEval.accuracy['overall']
acc_yes_no = vqaEval.accuracy['perAnswerType']['yes/no']
acc_number = vqaEval.accuracy['perAnswerType']['number']
acc_other = vqaEval.accuracy['perAnswerType']['other']
acc_per_class = vqaEval.accuracy['perAnswerClass']
acc_class_normalized = vqaEval.accuracy['classNormalizedOverall']
if verbose > 0:
print('METRIC: Accuracy yes/no is {0}, other is {1}, number is {2}, overall is {3}, class normalized is {4}'.\
format(acc_yes_no, acc_other, acc_number, acc_overall, acc_class_normalized))
return [{'value':acc_overall, 'name':'overall accuracy'},
{'value':acc_yes_no, 'name':'yes/no accuracy'},
{'value':acc_number, 'name':'number accuracy'},
{'value':acc_other, 'name':'other accuracy'},
{'value':acc_class_normalized, 'name':'class accuracy'},
{'value':acc_per_class, 'name':'per answer class',
'idiosyncrasy':'long:muted'},]
select = {
'wups' : show_wups,
'vqa' : show_vqa
}
| mit |
manish211/coveralls-python | tests/test_cli.py | 1 | 2270 | # coding: utf-8
import os
from mock import patch, call
import pytest
import coveralls
from coveralls.api import CoverallsException
import coveralls.cli
@patch.dict(os.environ, {'TRAVIS': 'True'}, clear=True)
@patch.object(coveralls.cli.log, 'info')
@patch.object(coveralls.Coveralls, 'wear')
def test_debug(mock_wear, mock_log):
coveralls.cli.main(argv=['debug'])
mock_wear.assert_called_with(dry_run=True)
mock_log.assert_has_calls([call("Testing coveralls-python...")])
@patch.object(coveralls.cli.log, 'info')
@patch.object(coveralls.Coveralls, 'wear')
def test_debug_no_token(mock_wear, mock_log):
coveralls.cli.main(argv=['debug'])
mock_wear.assert_called_with(dry_run=True)
mock_log.assert_has_calls([call("Testing coveralls-python...")])
@patch.object(coveralls.cli.log, 'info')
@patch.object(coveralls.Coveralls, 'wear')
@patch.dict(os.environ, {'TRAVIS': 'True'}, clear=True)
def test_real(mock_wear, mock_log):
coveralls.cli.main(argv=[])
mock_wear.assert_called_with()
mock_log.assert_has_calls([call("Submitting coverage to coveralls.io..."), call("Coverage submitted!")])
@patch.dict(os.environ, {'TRAVIS': 'True'}, clear=True)
@patch('coveralls.cli.Coveralls')
def test_rcfile(mock_coveralls):
coveralls.cli.main(argv=['--rcfile=coveragerc'])
mock_coveralls.assert_called_with(True, config_file='coveragerc')
exc = CoverallsException('bad stuff happened')
@patch.object(coveralls.cli.log, 'error')
@patch.object(coveralls.Coveralls, 'wear', side_effect=exc)
@patch.dict(os.environ, {'TRAVIS': 'True'}, clear=True)
def test_exception(mock_coveralls, mock_log):
coveralls.cli.main(argv=[])
mock_log.assert_has_calls([call(exc)])
@patch.object(coveralls.Coveralls, 'save_report')
@patch.dict(os.environ, {'TRAVIS': 'True'}, clear=True)
def test_save_report_to_file(mock_coveralls):
"""Check save_report api usage."""
coveralls.cli.main(argv=['--output=test.log'])
mock_coveralls.assert_called_with('test.log')
@patch.object(coveralls.Coveralls, 'save_report')
def test_save_report_to_file_no_token(mock_coveralls):
"""Check save_report api usage when token is not set."""
coveralls.cli.main(argv=['--output=test.log'])
mock_coveralls.assert_called_with('test.log')
| mit |
kaedroho/django | tests/i18n/test_compilation.py | 12 | 9956 | import gettext as gettext_module
import os
import stat
import unittest
from io import StringIO
from pathlib import Path
from subprocess import run
from unittest import mock
from django.core.management import (
CommandError, call_command, execute_from_command_line,
)
from django.core.management.commands.makemessages import (
Command as MakeMessagesCommand,
)
from django.core.management.utils import find_command
from django.test import SimpleTestCase, override_settings
from django.test.utils import captured_stderr, captured_stdout
from django.utils import translation
from django.utils.translation import gettext
from .utils import RunInTmpDirMixin, copytree
has_msgfmt = find_command('msgfmt')
@unittest.skipUnless(has_msgfmt, 'msgfmt is mandatory for compilation tests')
class MessageCompilationTests(RunInTmpDirMixin, SimpleTestCase):
work_subdir = 'commands'
class PoFileTests(MessageCompilationTests):
LOCALE = 'es_AR'
MO_FILE = 'locale/%s/LC_MESSAGES/django.mo' % LOCALE
def test_bom_rejection(self):
stderr = StringIO()
with self.assertRaisesMessage(CommandError, 'compilemessages generated one or more errors.'):
call_command('compilemessages', locale=[self.LOCALE], stdout=StringIO(), stderr=stderr)
self.assertIn('file has a BOM (Byte Order Mark)', stderr.getvalue())
self.assertFalse(os.path.exists(self.MO_FILE))
def test_no_write_access(self):
mo_file_en = 'locale/en/LC_MESSAGES/django.mo'
err_buffer = StringIO()
# put file in read-only mode
old_mode = os.stat(mo_file_en).st_mode
os.chmod(mo_file_en, stat.S_IREAD)
try:
with self.assertRaisesMessage(CommandError, 'compilemessages generated one or more errors.'):
call_command('compilemessages', locale=['en'], stderr=err_buffer, verbosity=0)
self.assertIn('not writable location', err_buffer.getvalue())
finally:
os.chmod(mo_file_en, old_mode)
class PoFileContentsTests(MessageCompilationTests):
# Ticket #11240
LOCALE = 'fr'
MO_FILE = 'locale/%s/LC_MESSAGES/django.mo' % LOCALE
def test_percent_symbol_in_po_file(self):
call_command('compilemessages', locale=[self.LOCALE], stdout=StringIO())
self.assertTrue(os.path.exists(self.MO_FILE))
class MultipleLocaleCompilationTests(MessageCompilationTests):
MO_FILE_HR = None
MO_FILE_FR = None
def setUp(self):
super().setUp()
localedir = os.path.join(self.test_dir, 'locale')
self.MO_FILE_HR = os.path.join(localedir, 'hr/LC_MESSAGES/django.mo')
self.MO_FILE_FR = os.path.join(localedir, 'fr/LC_MESSAGES/django.mo')
def test_one_locale(self):
with override_settings(LOCALE_PATHS=[os.path.join(self.test_dir, 'locale')]):
call_command('compilemessages', locale=['hr'], stdout=StringIO())
self.assertTrue(os.path.exists(self.MO_FILE_HR))
def test_multiple_locales(self):
with override_settings(LOCALE_PATHS=[os.path.join(self.test_dir, 'locale')]):
call_command('compilemessages', locale=['hr', 'fr'], stdout=StringIO())
self.assertTrue(os.path.exists(self.MO_FILE_HR))
self.assertTrue(os.path.exists(self.MO_FILE_FR))
class ExcludedLocaleCompilationTests(MessageCompilationTests):
work_subdir = 'exclude'
MO_FILE = 'locale/%s/LC_MESSAGES/django.mo'
def setUp(self):
super().setUp()
copytree('canned_locale', 'locale')
def test_command_help(self):
with captured_stdout(), captured_stderr():
# `call_command` bypasses the parser; by calling
# `execute_from_command_line` with the help subcommand we
# ensure that there are no issues with the parser itself.
execute_from_command_line(['django-admin', 'help', 'compilemessages'])
def test_one_locale_excluded(self):
call_command('compilemessages', exclude=['it'], stdout=StringIO())
self.assertTrue(os.path.exists(self.MO_FILE % 'en'))
self.assertTrue(os.path.exists(self.MO_FILE % 'fr'))
self.assertFalse(os.path.exists(self.MO_FILE % 'it'))
def test_multiple_locales_excluded(self):
call_command('compilemessages', exclude=['it', 'fr'], stdout=StringIO())
self.assertTrue(os.path.exists(self.MO_FILE % 'en'))
self.assertFalse(os.path.exists(self.MO_FILE % 'fr'))
self.assertFalse(os.path.exists(self.MO_FILE % 'it'))
def test_one_locale_excluded_with_locale(self):
call_command('compilemessages', locale=['en', 'fr'], exclude=['fr'], stdout=StringIO())
self.assertTrue(os.path.exists(self.MO_FILE % 'en'))
self.assertFalse(os.path.exists(self.MO_FILE % 'fr'))
self.assertFalse(os.path.exists(self.MO_FILE % 'it'))
def test_multiple_locales_excluded_with_locale(self):
call_command('compilemessages', locale=['en', 'fr', 'it'], exclude=['fr', 'it'],
stdout=StringIO())
self.assertTrue(os.path.exists(self.MO_FILE % 'en'))
self.assertFalse(os.path.exists(self.MO_FILE % 'fr'))
self.assertFalse(os.path.exists(self.MO_FILE % 'it'))
class IgnoreDirectoryCompilationTests(MessageCompilationTests):
# Reuse the exclude directory since it contains some locale fixtures.
work_subdir = 'exclude'
MO_FILE = '%s/%s/LC_MESSAGES/django.mo'
CACHE_DIR = Path('cache') / 'locale'
NESTED_DIR = Path('outdated') / 'v1' / 'locale'
def setUp(self):
super().setUp()
copytree('canned_locale', 'locale')
copytree('canned_locale', self.CACHE_DIR)
copytree('canned_locale', self.NESTED_DIR)
def assertAllExist(self, dir, langs):
self.assertTrue(all(Path(self.MO_FILE % (dir, lang)).exists() for lang in langs))
def assertNoneExist(self, dir, langs):
self.assertTrue(all(Path(self.MO_FILE % (dir, lang)).exists() is False for lang in langs))
def test_one_locale_dir_ignored(self):
call_command('compilemessages', ignore=['cache'], verbosity=0)
self.assertAllExist('locale', ['en', 'fr', 'it'])
self.assertNoneExist(self.CACHE_DIR, ['en', 'fr', 'it'])
self.assertAllExist(self.NESTED_DIR, ['en', 'fr', 'it'])
def test_multiple_locale_dirs_ignored(self):
call_command('compilemessages', ignore=['cache/locale', 'outdated'], verbosity=0)
self.assertAllExist('locale', ['en', 'fr', 'it'])
self.assertNoneExist(self.CACHE_DIR, ['en', 'fr', 'it'])
self.assertNoneExist(self.NESTED_DIR, ['en', 'fr', 'it'])
def test_ignores_based_on_pattern(self):
call_command('compilemessages', ignore=['*/locale'], verbosity=0)
self.assertAllExist('locale', ['en', 'fr', 'it'])
self.assertNoneExist(self.CACHE_DIR, ['en', 'fr', 'it'])
self.assertNoneExist(self.NESTED_DIR, ['en', 'fr', 'it'])
class CompilationErrorHandling(MessageCompilationTests):
def test_error_reported_by_msgfmt(self):
# po file contains wrong po formatting.
with self.assertRaises(CommandError):
call_command('compilemessages', locale=['ja'], verbosity=0, stderr=StringIO())
def test_msgfmt_error_including_non_ascii(self):
# po file contains invalid msgstr content (triggers non-ascii error content).
# Make sure the output of msgfmt is unaffected by the current locale.
env = os.environ.copy()
env.update({'LANG': 'C'})
with mock.patch('django.core.management.utils.run', lambda *args, **kwargs: run(*args, env=env, **kwargs)):
cmd = MakeMessagesCommand()
if cmd.gettext_version < (0, 18, 3):
self.skipTest("python-brace-format is a recent gettext addition.")
stderr = StringIO()
with self.assertRaisesMessage(CommandError, 'compilemessages generated one or more errors'):
call_command('compilemessages', locale=['ko'], stdout=StringIO(), stderr=stderr)
self.assertIn("' cannot start a field name", stderr.getvalue())
class ProjectAndAppTests(MessageCompilationTests):
LOCALE = 'ru'
PROJECT_MO_FILE = 'locale/%s/LC_MESSAGES/django.mo' % LOCALE
APP_MO_FILE = 'app_with_locale/locale/%s/LC_MESSAGES/django.mo' % LOCALE
class FuzzyTranslationTest(ProjectAndAppTests):
def setUp(self):
super().setUp()
gettext_module._translations = {} # flush cache or test will be useless
def test_nofuzzy_compiling(self):
with override_settings(LOCALE_PATHS=[os.path.join(self.test_dir, 'locale')]):
call_command('compilemessages', locale=[self.LOCALE], stdout=StringIO())
with translation.override(self.LOCALE):
self.assertEqual(gettext('Lenin'), 'Ленин')
self.assertEqual(gettext('Vodka'), 'Vodka')
def test_fuzzy_compiling(self):
with override_settings(LOCALE_PATHS=[os.path.join(self.test_dir, 'locale')]):
call_command('compilemessages', locale=[self.LOCALE], fuzzy=True, stdout=StringIO())
with translation.override(self.LOCALE):
self.assertEqual(gettext('Lenin'), 'Ленин')
self.assertEqual(gettext('Vodka'), 'Водка')
class AppCompilationTest(ProjectAndAppTests):
def test_app_locale_compiled(self):
call_command('compilemessages', locale=[self.LOCALE], stdout=StringIO())
self.assertTrue(os.path.exists(self.PROJECT_MO_FILE))
self.assertTrue(os.path.exists(self.APP_MO_FILE))
class PathLibLocaleCompilationTests(MessageCompilationTests):
work_subdir = 'exclude'
def test_locale_paths_pathlib(self):
with override_settings(LOCALE_PATHS=[Path(self.test_dir) / 'canned_locale']):
call_command('compilemessages', locale=['fr'], stdout=StringIO())
self.assertTrue(os.path.exists('canned_locale/fr/LC_MESSAGES/django.mo'))
| bsd-3-clause |
ambrosef/HLx_Examples | Acceleration/memcached/regressionSims/testgen/long/memtest_regressions_long.py | 2 | 5787 | #!/usr/bin/python
import memlib
## EDIT HERE ###################################################################
keySizes = [1,3,4,5,7,8,9,10,12,13,15,16,17,24,25,28,84,128]
#valueSizes = [1,3,4,5,8,9,10,12,13,16,17,24,28,184,208,1024]
#valueSizes = [1,3,4,5,8,9,10,12,13,16,17,24,28,184,208,1015]
valueSizes = [184,389, 792, 1015, 1862, 2469, 3285, 4397, 5850, 7932, 10174, 14473, 18043, 25836, 30859, 38623, 46827, 57482, 65520]
seq1repeat = 5
keyChars = map(chr, range(97, 123))
valueChars = map(chr, range(65, 91))
## EDIT FINISHED ###############################################################
DEBUG_SEQUENCES = False
PRINT_SEQUENCES = True
################################################################################
if DEBUG_SEQUENCES:
keySizes = [1,2,3]
valueSizes = [1,2]
def pair2kvpair(pair):
return memlib.kv_pair(pair[0], pair[1], "EFBEADDE", 42)
def seq1(keys, values, repeat):
if PRINT_SEQUENCES:
print "--- SEQUENCE 1 repeat %-3s -----------------------------------------------------" % repeat
kv_pairs = []
for key in keys:
for value in values:
kv_pairs.append( memlib.kv_pair(key, value, "EFBEADDE", 42) )
requests = []
responses = []
for kv_pair in kv_pairs:
if PRINT_SEQUENCES:
print "Set [%d -> %d]: %s -> %s" % (len(kv_pair['key']), len(kv_pair['value']), kv_pair['key'], kv_pair['value'])
requests.append( memlib.binarySetRequest( kv_pair , "00000000" ) )
responses.append( memlib.binarySetResponse( kv_pair, "00000000" ) )
for _ in range(repeat):
if PRINT_SEQUENCES:
print "Get [%d -> %d]: %s -> %s" % (len(kv_pair['key']), len(kv_pair['value']), kv_pair['key'], kv_pair['value'])
requests.append( memlib.binaryGetRequest( kv_pair , "00000000" ) )
responses.append( memlib.binaryGetResponse( kv_pair , "00000000" ) )
return (requests, responses)
def seq2(keys, values):
if PRINT_SEQUENCES:
print "--- SEQUENCE 2 -----------------------------------------------------------------"
requests = []
responses = []
for _ in range(len(values)):
# for more keys than values, duplicate use of values
values_used = values
if len(keys) > len(values):
while(len(keys) > len(values_used)):
values_used = values_used + values
values_used = values_used[0:len(keys)]
# requests
kv_pairs = map(pair2kvpair, zip(keys, values_used))
for kv_pair in kv_pairs:
if PRINT_SEQUENCES:
print "Set [%d -> %d]: %s -> %s" % (len(kv_pair['key']), len(kv_pair['value']), kv_pair['key'], kv_pair['value'])
requests.append( memlib.binarySetRequest(kv_pair, "00000000") )
responses.append( memlib.binarySetResponse(kv_pair, "00000000") )
for kv_pair in kv_pairs:
if PRINT_SEQUENCES:
print "Get [%d -> %d]: %s -> %s" % (len(kv_pair['key']), len(kv_pair['value']), kv_pair['key'], kv_pair['value'])
requests.append( memlib.binaryGetRequest(kv_pair, "00000000") )
responses.append( memlib.binaryGetResponse(kv_pair, "00000000") )
# rotation
values = values[1:] + values[0:1]
return (requests, responses)
################################################################################
if len(keySizes) > len(keyChars):
sys.exit("Error: Not enough key characters.")
if len(valueSizes) > len(valueChars):
sys.exit("Error: Not enough value characters.")
keyPairs = zip(keySizes, keyChars)
valuePairs = zip(valueSizes, valueChars)
keys = map(lambda (size, char): char * size, keyPairs)
values = map(lambda (size, char): char * size, valuePairs)
SEQ1 = seq1(keys, values, seq1repeat)
SEQ2 = seq2(keys, values)
SEQ3 = seq1(keys, values, 1)
# SEQ1
req = open("SEQ1_R12-pkt.in.long.txt", "w")
req.write( memlib.requests12Gbps(SEQ1[0]) )
req.close()
req = open("SEQ1_R1-pkt.in.long.txt", "w")
req.write( memlib.requests1Gbps(SEQ1[0]) )
req.close()
#res = open("SEQ1-pkt.out.long.txt", "w")
#res.write( memlib.responses(SEQ1[1]) )
#res.close()
res = open("SEQ1-pkt.out.long.hls.rtl.txt", "w")
res.write( memlib.responses_rtl_hls(SEQ1[1]) )
res.close()
# SEQ2
req = open("SEQ2_R12-pkt.in.long.txt", "w")
req.write( memlib.requests12Gbps(SEQ2[0]) )
req.close()
req = open("SEQ2_R1-pkt.in.long.txt", "w")
req.write( memlib.requests1Gbps(SEQ2[0]) )
req.close()
#res = open("SEQ2-pkt.out.long.txt", "w")
#res.write( memlib.responses(SEQ2[1]) )
#res.close()
res = open("SEQ2-pkt.out.long.hls.rtl.txt", "w")
res.write( memlib.responses_rtl_hls(SEQ2[1]) )
res.close()
# SEQ3
req = open("SEQ3_R12-pkt.in.long.txt", "w")
req.write( memlib.requests12Gbps(SEQ3[0]) )
req.close()
req = open("SEQ3_R1-pkt.in.long.txt", "w")
req.write( memlib.requests1Gbps(SEQ3[0]) )
req.close()
#res = open("SEQ3-pkt.out.long.txt", "w")
#res.write( memlib.responses(SEQ3[1]) )
#res.close()
res = open("SEQ3-pkt.out.long.hls.rtl.txt", "w")
res.write( memlib.responses_rtl_hls(SEQ3[1]) )
res.close()
####### Same thing for HLS outputs #######
# SEQ1
req = open("SEQ1_R12-pkt.in.long.hls.txt", "w")
req.write( memlib.requests12Gbps_hls(SEQ1[0]) )
req.close()
req = open("SEQ1_R1-pkt.in.long.hls.txt", "w")
req.write( memlib.requests1Gbps_hls(SEQ1[0]) )
req.close()
res = open("SEQ1-pkt.out.long.hls.txt", "w")
res.write( memlib.responses_hls(SEQ1[1]) )
res.close()
# SEQ2
req = open("SEQ2_R12-pkt.in.long.hls.txt", "w")
req.write( memlib.requests12Gbps_hls(SEQ2[0]) )
req.close()
req = open("SEQ2_R1-pkt.in.long.hls.txt", "w")
req.write( memlib.requests1Gbps_hls(SEQ2[0]) )
req.close()
res = open("SEQ2-pkt.out.long.hls.txt", "w")
res.write( memlib.responses_hls(SEQ2[1]) )
res.close()
# SEQ3
req = open("SEQ3_R12-pkt.in.long.hls.txt", "w")
req.write( memlib.requests12Gbps_hls(SEQ3[0]) )
req.close()
req = open("SEQ3_R1-pkt.in.long.hls.txt", "w")
req.write( memlib.requests1Gbps_hls(SEQ3[0]) )
req.close()
res = open("SEQ3-pkt.out.long.hls.txt", "w")
res.write( memlib.responses_hls(SEQ3[1]) )
res.close()
| bsd-3-clause |
Cinntax/home-assistant | homeassistant/components/channels/media_player.py | 2 | 9547 | """Support for interfacing with an instance of getchannels.com."""
import logging
import voluptuous as vol
from homeassistant.components.media_player import MediaPlayerDevice, PLATFORM_SCHEMA
from homeassistant.components.media_player.const import (
DOMAIN,
MEDIA_TYPE_CHANNEL,
MEDIA_TYPE_EPISODE,
MEDIA_TYPE_MOVIE,
MEDIA_TYPE_TVSHOW,
SUPPORT_NEXT_TRACK,
SUPPORT_PAUSE,
SUPPORT_PLAY,
SUPPORT_PLAY_MEDIA,
SUPPORT_PREVIOUS_TRACK,
SUPPORT_SELECT_SOURCE,
SUPPORT_STOP,
SUPPORT_VOLUME_MUTE,
)
from homeassistant.const import (
ATTR_ENTITY_ID,
CONF_HOST,
CONF_NAME,
CONF_PORT,
STATE_IDLE,
STATE_PAUSED,
STATE_PLAYING,
)
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
DATA_CHANNELS = "channels"
DEFAULT_NAME = "Channels"
DEFAULT_PORT = 57000
FEATURE_SUPPORT = (
SUPPORT_PLAY
| SUPPORT_PAUSE
| SUPPORT_STOP
| SUPPORT_VOLUME_MUTE
| SUPPORT_NEXT_TRACK
| SUPPORT_PREVIOUS_TRACK
| SUPPORT_PLAY_MEDIA
| SUPPORT_SELECT_SOURCE
)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
}
)
SERVICE_SEEK_FORWARD = "channels_seek_forward"
SERVICE_SEEK_BACKWARD = "channels_seek_backward"
SERVICE_SEEK_BY = "channels_seek_by"
# Service call validation schemas
ATTR_SECONDS = "seconds"
CHANNELS_SCHEMA = vol.Schema({vol.Required(ATTR_ENTITY_ID): cv.entity_id})
CHANNELS_SEEK_BY_SCHEMA = CHANNELS_SCHEMA.extend(
{vol.Required(ATTR_SECONDS): vol.Coerce(int)}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Channels platform."""
device = ChannelsPlayer(
config.get(CONF_NAME), config.get(CONF_HOST), config.get(CONF_PORT)
)
if DATA_CHANNELS not in hass.data:
hass.data[DATA_CHANNELS] = []
add_entities([device], True)
hass.data[DATA_CHANNELS].append(device)
def service_handler(service):
"""Handle service."""
entity_id = service.data.get(ATTR_ENTITY_ID)
device = next(
(
device
for device in hass.data[DATA_CHANNELS]
if device.entity_id == entity_id
),
None,
)
if device is None:
_LOGGER.warning("Unable to find Channels with entity_id: %s", entity_id)
return
if service.service == SERVICE_SEEK_FORWARD:
device.seek_forward()
elif service.service == SERVICE_SEEK_BACKWARD:
device.seek_backward()
elif service.service == SERVICE_SEEK_BY:
seconds = service.data.get("seconds")
device.seek_by(seconds)
hass.services.register(
DOMAIN, SERVICE_SEEK_FORWARD, service_handler, schema=CHANNELS_SCHEMA
)
hass.services.register(
DOMAIN, SERVICE_SEEK_BACKWARD, service_handler, schema=CHANNELS_SCHEMA
)
hass.services.register(
DOMAIN, SERVICE_SEEK_BY, service_handler, schema=CHANNELS_SEEK_BY_SCHEMA
)
class ChannelsPlayer(MediaPlayerDevice):
"""Representation of a Channels instance."""
def __init__(self, name, host, port):
"""Initialize the Channels app."""
from pychannels import Channels
self._name = name
self._host = host
self._port = port
self.client = Channels(self._host, self._port)
self.status = None
self.muted = None
self.channel_number = None
self.channel_name = None
self.channel_image_url = None
self.now_playing_title = None
self.now_playing_episode_title = None
self.now_playing_season_number = None
self.now_playing_episode_number = None
self.now_playing_summary = None
self.now_playing_image_url = None
self.favorite_channels = []
def update_favorite_channels(self):
"""Update the favorite channels from the client."""
self.favorite_channels = self.client.favorite_channels()
def update_state(self, state_hash):
"""Update all the state properties with the passed in dictionary."""
self.status = state_hash.get("status", "stopped")
self.muted = state_hash.get("muted", False)
channel_hash = state_hash.get("channel")
np_hash = state_hash.get("now_playing")
if channel_hash:
self.channel_number = channel_hash.get("channel_number")
self.channel_name = channel_hash.get("channel_name")
self.channel_image_url = channel_hash.get("channel_image_url")
else:
self.channel_number = None
self.channel_name = None
self.channel_image_url = None
if np_hash:
self.now_playing_title = np_hash.get("title")
self.now_playing_episode_title = np_hash.get("episode_title")
self.now_playing_season_number = np_hash.get("season_number")
self.now_playing_episode_number = np_hash.get("episode_number")
self.now_playing_summary = np_hash.get("summary")
self.now_playing_image_url = np_hash.get("image_url")
else:
self.now_playing_title = None
self.now_playing_episode_title = None
self.now_playing_season_number = None
self.now_playing_episode_number = None
self.now_playing_summary = None
self.now_playing_image_url = None
@property
def name(self):
"""Return the name of the player."""
return self._name
@property
def state(self):
"""Return the state of the player."""
if self.status == "stopped":
return STATE_IDLE
if self.status == "paused":
return STATE_PAUSED
if self.status == "playing":
return STATE_PLAYING
return None
def update(self):
"""Retrieve latest state."""
self.update_favorite_channels()
self.update_state(self.client.status())
@property
def source_list(self):
"""List of favorite channels."""
sources = [channel["name"] for channel in self.favorite_channels]
return sources
@property
def is_volume_muted(self):
"""Boolean if volume is currently muted."""
return self.muted
@property
def media_content_id(self):
"""Content ID of current playing channel."""
return self.channel_number
@property
def media_content_type(self):
"""Content type of current playing media."""
return MEDIA_TYPE_CHANNEL
@property
def media_image_url(self):
"""Image url of current playing media."""
if self.now_playing_image_url:
return self.now_playing_image_url
if self.channel_image_url:
return self.channel_image_url
return "https://getchannels.com/assets/img/icon-1024.png"
@property
def media_title(self):
"""Title of current playing media."""
if self.state:
return self.now_playing_title
return None
@property
def supported_features(self):
"""Flag of media commands that are supported."""
return FEATURE_SUPPORT
def mute_volume(self, mute):
"""Mute (true) or unmute (false) player."""
if mute != self.muted:
response = self.client.toggle_muted()
self.update_state(response)
def media_stop(self):
"""Send media_stop command to player."""
self.status = "stopped"
response = self.client.stop()
self.update_state(response)
def media_play(self):
"""Send media_play command to player."""
response = self.client.resume()
self.update_state(response)
def media_pause(self):
"""Send media_pause command to player."""
response = self.client.pause()
self.update_state(response)
def media_next_track(self):
"""Seek ahead."""
response = self.client.skip_forward()
self.update_state(response)
def media_previous_track(self):
"""Seek back."""
response = self.client.skip_backward()
self.update_state(response)
def select_source(self, source):
"""Select a channel to tune to."""
for channel in self.favorite_channels:
if channel["name"] == source:
response = self.client.play_channel(channel["number"])
self.update_state(response)
break
def play_media(self, media_type, media_id, **kwargs):
"""Send the play_media command to the player."""
if media_type == MEDIA_TYPE_CHANNEL:
response = self.client.play_channel(media_id)
self.update_state(response)
elif media_type in [MEDIA_TYPE_MOVIE, MEDIA_TYPE_EPISODE, MEDIA_TYPE_TVSHOW]:
response = self.client.play_recording(media_id)
self.update_state(response)
def seek_forward(self):
"""Seek forward in the timeline."""
response = self.client.seek_forward()
self.update_state(response)
def seek_backward(self):
"""Seek backward in the timeline."""
response = self.client.seek_backward()
self.update_state(response)
def seek_by(self, seconds):
"""Seek backward in the timeline."""
response = self.client.seek(seconds)
self.update_state(response)
| apache-2.0 |
mewtaylor/django | django/contrib/gis/forms/fields.py | 504 | 4316 | from __future__ import unicode_literals
from django import forms
from django.contrib.gis.geos import GEOSException, GEOSGeometry
from django.utils.translation import ugettext_lazy as _
from .widgets import OpenLayersWidget
class GeometryField(forms.Field):
"""
This is the basic form field for a Geometry. Any textual input that is
accepted by GEOSGeometry is accepted by this form. By default,
this includes WKT, HEXEWKB, WKB (in a buffer), and GeoJSON.
"""
widget = OpenLayersWidget
geom_type = 'GEOMETRY'
default_error_messages = {
'required': _('No geometry value provided.'),
'invalid_geom': _('Invalid geometry value.'),
'invalid_geom_type': _('Invalid geometry type.'),
'transform_error': _('An error occurred when transforming the geometry '
'to the SRID of the geometry form field.'),
}
def __init__(self, **kwargs):
# Pop out attributes from the database field, or use sensible
# defaults (e.g., allow None).
self.srid = kwargs.pop('srid', None)
self.geom_type = kwargs.pop('geom_type', self.geom_type)
super(GeometryField, self).__init__(**kwargs)
self.widget.attrs['geom_type'] = self.geom_type
def to_python(self, value):
"""
Transforms the value to a Geometry object.
"""
if value in self.empty_values:
return None
if not isinstance(value, GEOSGeometry):
try:
value = GEOSGeometry(value)
except (GEOSException, ValueError, TypeError):
raise forms.ValidationError(self.error_messages['invalid_geom'], code='invalid_geom')
# Try to set the srid
if not value.srid:
try:
value.srid = self.widget.map_srid
except AttributeError:
if self.srid:
value.srid = self.srid
return value
def clean(self, value):
"""
Validates that the input value can be converted to a Geometry
object (which is returned). A ValidationError is raised if
the value cannot be instantiated as a Geometry.
"""
geom = super(GeometryField, self).clean(value)
if geom is None:
return geom
# Ensuring that the geometry is of the correct type (indicated
# using the OGC string label).
if str(geom.geom_type).upper() != self.geom_type and not self.geom_type == 'GEOMETRY':
raise forms.ValidationError(self.error_messages['invalid_geom_type'], code='invalid_geom_type')
# Transforming the geometry if the SRID was set.
if self.srid and self.srid != -1 and self.srid != geom.srid:
try:
geom.transform(self.srid)
except GEOSException:
raise forms.ValidationError(
self.error_messages['transform_error'], code='transform_error')
return geom
def has_changed(self, initial, data):
""" Compare geographic value of data with its initial value. """
try:
data = self.to_python(data)
initial = self.to_python(initial)
except forms.ValidationError:
return True
# Only do a geographic comparison if both values are available
if initial and data:
data.transform(initial.srid)
# If the initial value was not added by the browser, the geometry
# provided may be slightly different, the first time it is saved.
# The comparison is done with a very low tolerance.
return not initial.equals_exact(data, tolerance=0.000001)
else:
# Check for change of state of existence
return bool(initial) != bool(data)
class GeometryCollectionField(GeometryField):
geom_type = 'GEOMETRYCOLLECTION'
class PointField(GeometryField):
geom_type = 'POINT'
class MultiPointField(GeometryField):
geom_type = 'MULTIPOINT'
class LineStringField(GeometryField):
geom_type = 'LINESTRING'
class MultiLineStringField(GeometryField):
geom_type = 'MULTILINESTRING'
class PolygonField(GeometryField):
geom_type = 'POLYGON'
class MultiPolygonField(GeometryField):
geom_type = 'MULTIPOLYGON'
| bsd-3-clause |
azul-cloud/serendipity | storages/backends/symlinkorcopy.py | 19 | 2625 | import os
from django.conf import settings
from django.core.files.storage import FileSystemStorage
__doc__ = """
I needed to efficiently create a mirror of a directory tree (so that
"origin pull" CDNs can automatically pull files). The trick was that
some files could be modified, and some could be identical to the original.
Of course it doesn't make sense to store the exact same data twice on the
file system. So I created SymlinkOrCopyStorage.
SymlinkOrCopyStorage allows you to symlink a file when it's identical to
the original file and to copy the file if it's modified.
Of course, it's impossible to know if a file is modified just by looking
at the file, without knowing what the original file was.
That's what the symlinkWithin parameter is for. It accepts one or more paths
(if multiple, they should be concatenated using a colon (:)).
Files that will be saved using SymlinkOrCopyStorage are then checked on their
location: if they are within one of the symlink_within directories,
they will be symlinked, otherwise they will be copied.
The rationale is that unmodified files will exist in their original location,
e.g. /htdocs/example.com/image.jpg and modified files will be stored in
a temporary directory, e.g. /tmp/image.jpg.
"""
class SymlinkOrCopyStorage(FileSystemStorage):
"""Stores symlinks to files instead of actual files whenever possible
When a file that's being saved is currently stored in the symlink_within
directory, then symlink the file. Otherwise, copy the file.
"""
def __init__(self, location=settings.MEDIA_ROOT, base_url=settings.MEDIA_URL,
symlink_within=None):
super(SymlinkOrCopyStorage, self).__init__(location, base_url)
self.symlink_within = symlink_within.split(":")
def _save(self, name, content):
full_path_dst = self.path(name)
directory = os.path.dirname(full_path_dst)
if not os.path.exists(directory):
os.makedirs(directory)
elif not os.path.isdir(directory):
raise IOError("%s exists and is not a directory." % directory)
full_path_src = os.path.abspath(content.name)
symlinked = False
# Only symlink if the current platform supports it.
if getattr(os, "symlink", False):
for path in self.symlink_within:
if full_path_src.startswith(path):
os.symlink(full_path_src, full_path_dst)
symlinked = True
break
if not symlinked:
super(SymlinkOrCopyStorage, self)._save(name, content)
return name
| mit |
TeachAtTUM/edx-platform | openedx/core/djangoapps/theming/helpers.py | 10 | 11228 | """
Helpers for accessing comprehensive theming related variables.
This file is imported at startup. Imports of models or things which import models will break startup on Django 1.9+. If
you need models here, please import them inside the function which uses them.
"""
import os
import re
from logging import getLogger
from django.conf import settings
from microsite_configuration import microsite
from openedx.core.djangoapps.site_configuration import helpers as configuration_helpers
from openedx.core.djangoapps.theming.helpers_dirs import (
Theme,
get_project_root_name_from_settings,
get_theme_base_dirs_from_settings,
get_theme_dirs,
get_themes_unchecked
)
from openedx.core.djangoapps.request_cache.middleware import RequestCache, request_cached
logger = getLogger(__name__) # pylint: disable=invalid-name
@request_cached
def get_template_path(relative_path, **kwargs):
"""
This is a proxy function to hide microsite_configuration behind comprehensive theming.
The calculated value is cached for the lifetime of the current request.
"""
# We need to give priority to theming over microsites
# So, we apply microsite override only if there is no associated site theme
# and associated microsite is present.
if not current_request_has_associated_site_theme() and microsite.is_request_in_microsite():
relative_path = microsite.get_template_path(relative_path, **kwargs)
return relative_path
def is_request_in_themed_site():
"""
This is a proxy function to hide microsite_configuration behind comprehensive theming.
"""
# We need to give priority to theming/site-configuration over microsites
return configuration_helpers.is_site_configuration_enabled() or microsite.is_request_in_microsite()
def get_template(uri):
"""
This is a proxy function to hide microsite_configuration behind comprehensive theming.
:param uri: uri of the template
"""
# We need to give priority to theming over microsites
# So, we apply microsite template override only when there is no associated theme,
if not current_request_has_associated_site_theme():
return microsite.get_template(uri)
def get_template_path_with_theme(relative_path):
"""
Returns template path in current site's theme if it finds one there otherwise returns same path.
Example:
>> get_template_path_with_theme('header.html')
'/red-theme/lms/templates/header.html'
Parameters:
relative_path (str): template's path relative to the templates directory e.g. 'footer.html'
Returns:
(str): template path in current site's theme
"""
relative_path = os.path.normpath(relative_path)
theme = get_current_theme()
if not theme:
return relative_path
# strip `/` if present at the start of relative_path
template_name = re.sub(r'^/+', '', relative_path)
template_path = theme.template_path / template_name
absolute_path = theme.path / "templates" / template_name
if absolute_path.exists():
return str(template_path)
else:
return relative_path
def get_all_theme_template_dirs():
"""
Returns template directories for all the themes.
Example:
>> get_all_theme_template_dirs()
[
'/edx/app/edxapp/edx-platform/themes/red-theme/lms/templates/',
]
Returns:
(list): list of directories containing theme templates.
"""
themes = get_themes()
template_paths = list()
for theme in themes:
template_paths.extend(theme.template_dirs)
return template_paths
def get_project_root_name():
"""
Return root name for the current project
Example:
>> get_project_root_name()
'lms'
# from studio
>> get_project_root_name()
'cms'
Returns:
(str): component name of platform e.g lms, cms
"""
return get_project_root_name_from_settings(settings.PROJECT_ROOT)
def strip_site_theme_templates_path(uri):
"""
Remove site template theme path from the uri.
Example:
>> strip_site_theme_templates_path('/red-theme/lms/templates/header.html')
'header.html'
Arguments:
uri (str): template path from which to remove site theme path. e.g. '/red-theme/lms/templates/header.html'
Returns:
(str): template path with site theme path removed.
"""
theme = get_current_theme()
if not theme:
return uri
templates_path = "/".join([
theme.theme_dir_name,
get_project_root_name(),
"templates"
])
uri = re.sub(r'^/*' + templates_path + '/*', '', uri)
return uri
def get_current_request():
"""
Return current request instance.
Returns:
(HttpRequest): returns current request
"""
return RequestCache.get_current_request()
def get_current_site():
"""
Return current site.
Returns:
(django.contrib.sites.models.Site): returns current site
"""
request = get_current_request()
if not request:
return None
return getattr(request, 'site', None)
def get_current_site_theme():
"""
Return current site theme object. Returns None if theming is disabled.
Returns:
(ecommerce.theming.models.SiteTheme): site theme object for the current site.
"""
# Return None if theming is disabled
if not is_comprehensive_theming_enabled():
return None
request = get_current_request()
if not request:
return None
return getattr(request, 'site_theme', None)
def get_current_theme():
"""
Return current theme object. Returns None if theming is disabled.
Returns:
(ecommerce.theming.models.SiteTheme): site theme object for the current site.
"""
# Return None if theming is disabled
if not is_comprehensive_theming_enabled():
return None
site_theme = get_current_site_theme()
if not site_theme:
return None
try:
return Theme(
name=site_theme.theme_dir_name,
theme_dir_name=site_theme.theme_dir_name,
themes_base_dir=get_theme_base_dir(site_theme.theme_dir_name),
project_root=get_project_root_name()
)
except ValueError as error:
# Log exception message and return None, so that open source theme is used instead
logger.exception('Theme not found in any of the themes dirs. [%s]', error)
return None
def current_request_has_associated_site_theme():
"""
True if current request has an associated SiteTheme, False otherwise.
Returns:
True if current request has an associated SiteTheme, False otherwise
"""
request = get_current_request()
site_theme = getattr(request, 'site_theme', None)
return bool(site_theme and site_theme.id)
def get_theme_base_dir(theme_dir_name, suppress_error=False):
"""
Returns absolute path to the directory that contains the given theme.
Args:
theme_dir_name (str): theme directory name to get base path for
suppress_error (bool): if True function will return None if theme is not found instead of raising an error
Returns:
(str): Base directory that contains the given theme
"""
for themes_dir in get_theme_base_dirs():
if theme_dir_name in get_theme_dirs(themes_dir):
return themes_dir
if suppress_error:
return None
raise ValueError(
"Theme '{theme}' not found in any of the following themes dirs, \nTheme dirs: \n{dir}".format(
theme=theme_dir_name,
dir=get_theme_base_dirs(),
))
def theme_exists(theme_name, themes_dir=None):
"""
Returns True if a theme exists with the specified name.
"""
for theme in get_themes(themes_dir=themes_dir):
if theme.theme_dir_name == theme_name:
return True
return False
def get_themes(themes_dir=None):
"""
get a list of all themes known to the system.
Args:
themes_dir (str): (Optional) Path to themes base directory
Returns:
list of themes known to the system.
"""
if not is_comprehensive_theming_enabled():
return []
if themes_dir is None:
themes_dir = get_theme_base_dirs_unchecked()
return get_themes_unchecked(themes_dir, settings.PROJECT_ROOT)
def get_theme_base_dirs_unchecked():
"""
Return base directories that contains all the themes.
Example:
>> get_theme_base_dirs_unchecked()
['/edx/app/ecommerce/ecommerce/themes']
Returns:
(List of Paths): Base theme directory paths
"""
theme_dirs = getattr(settings, "COMPREHENSIVE_THEME_DIRS", None)
return get_theme_base_dirs_from_settings(theme_dirs)
def get_theme_base_dirs():
"""
Return base directories that contains all the themes.
Ensures comprehensive theming is enabled.
Example:
>> get_theme_base_dirs()
['/edx/app/ecommerce/ecommerce/themes']
Returns:
(List of Paths): Base theme directory paths
"""
# Return an empty list if theming is disabled
if not is_comprehensive_theming_enabled():
return []
return get_theme_base_dirs_unchecked()
def is_comprehensive_theming_enabled():
"""
Returns boolean indicating whether comprehensive theming functionality is enabled or disabled.
Example:
>> is_comprehensive_theming_enabled()
True
Returns:
(bool): True if comprehensive theming is enabled else False
"""
# We need to give priority to theming over microsites
if settings.ENABLE_COMPREHENSIVE_THEMING and current_request_has_associated_site_theme():
return True
# Disable theming for microsites
# Microsite configurations take priority over the default site theme.
if microsite.is_request_in_microsite():
return False
return settings.ENABLE_COMPREHENSIVE_THEMING
def get_config_value_from_site_or_settings(name, site=None, site_config_name=None):
"""
Given a configuration setting name, try to get it from the site configuration and then fall back on the settings.
If site_config_name is not specified then "name" is used as the key for both collections.
Args:
name (str): The name of the setting to get the value of.
site: The site that we are trying to fetch the value for.
site_config_name: The name of the setting within the site configuration.
Returns:
The value stored in the configuration.
"""
from openedx.core.djangoapps.site_configuration.models import SiteConfiguration
if site_config_name is None:
site_config_name = name
if site is None:
site = get_current_site()
site_configuration = None
if site is not None:
try:
site_configuration = getattr(site, "configuration", None)
except SiteConfiguration.DoesNotExist:
pass
value_from_settings = getattr(settings, name, None)
if site_configuration is not None:
return site_configuration.get_value(site_config_name, default=value_from_settings)
else:
return value_from_settings
| agpl-3.0 |
tcpcloud/monitoring-for-openstack | oschecks/ceilometer.py | 4 | 2011 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Openstack Monitoring script for Sensu / Nagios
#
# Copyright © 2013-2014 eNovance <licensing@enovance.com>
#
# Author: Mehdi Abaakouk <mehdi.abaakouk@enovance.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oschecks import utils
def _check_ceilometer_api():
ceilometer = utils.Ceilometer()
ceilometer.add_argument('-w', dest='warning', type=int, default=5,
help='Warning timeout for Ceilometer APIs calls')
ceilometer.add_argument('-c', dest='critical', type=int, default=10,
help='Critical timeout for Ceilometer APIs calls')
options, client = ceilometer.setup()
elapsed, meters = utils.timeit(client.meters.list)
if not meters:
utils.critical("Unable to contact Ceilometer API.")
if elapsed > options.critical:
utils.critical("Get meters took more than %d seconds, "
"it's too long.|response_time=%d" %
(options.critical, elapsed))
elif elapsed > options.warning:
utils.warning("Get meters took more than %d seconds, "
"it's too long.|response_time=%d" %
(options.warning, elapsed))
else:
utils.ok("Get meters, Ceilometer API is working: "
"list %d meters in %d seconds.|response_time=%d" %
(len(meters), elapsed, elapsed))
def check_ceilometer_api():
utils.safe_run(_check_ceilometer_api)
| apache-2.0 |
kumar303/zamboni | mkt/site/monitors.py | 3 | 9592 | import os
import socket
import StringIO
import tempfile
import time
import traceback
from django.conf import settings
import commonware.log
import elasticsearch
import requests
from cache_nuggets.lib import memoize
from PIL import Image
from lib.crypto import packaged, receipt
from lib.crypto.packaged import SigningError as PackageSigningError
from lib.crypto.receipt import SigningError
from lib.pay_server import client
monitor_log = commonware.log.getLogger('z.monitor')
def memcache():
memcache = getattr(settings, 'CACHES', {}).get('default')
memcache_results = []
status = ''
if memcache and 'memcache' in memcache['BACKEND']:
hosts = memcache['LOCATION']
using_twemproxy = False
if not isinstance(hosts, (tuple, list)):
hosts = [hosts]
for host in hosts:
ip, port = host.split(':')
if ip == '127.0.0.1':
using_twemproxy = True
try:
s = socket.socket()
s.connect((ip, int(port)))
except Exception, e:
result = False
status = 'Failed to connect to memcached (%s): %s' % (host, e)
monitor_log.critical(status)
else:
result = True
finally:
s.close()
memcache_results.append((ip, port, result))
if (not using_twemproxy and len(hosts) > 1 and
len(memcache_results) < 2):
# If the number of requested hosts is greater than 1, but less
# than 2 replied, raise an error.
status = ('2+ memcache servers are required.'
'%s available') % len(memcache_results)
monitor_log.warning(status)
# If we are in debug mode, don't worry about checking for memcache.
elif settings.DEBUG:
return status, []
if not memcache_results:
status = 'Memcache is not configured'
monitor_log.info(status)
return status, memcache_results
def libraries():
# Check Libraries and versions
libraries_results = []
status = ''
try:
Image.new('RGB', (16, 16)).save(StringIO.StringIO(), 'JPEG')
libraries_results.append(('PIL+JPEG', True, 'Got it!'))
except Exception, e:
msg = "Failed to create a jpeg image: %s" % e
libraries_results.append(('PIL+JPEG', False, msg))
try:
import M2Crypto # NOQA
libraries_results.append(('M2Crypto', True, 'Got it!'))
except ImportError:
libraries_results.append(('M2Crypto', False, 'Failed to import'))
if settings.SPIDERMONKEY:
if os.access(settings.SPIDERMONKEY, os.R_OK):
libraries_results.append(('Spidermonkey is ready!', True, None))
# TODO: see if it works?
else:
msg = "You said spidermonkey was at (%s)" % settings.SPIDERMONKEY
libraries_results.append(('Spidermonkey', False, msg))
# If settings are debug and spidermonkey is empty,
# thorw this error.
elif settings.DEBUG and not settings.SPIDERMONKEY:
msg = 'SPIDERMONKEY is empty'
libraries_results.append(('Spidermonkey', True, msg))
else:
msg = "Please set SPIDERMONKEY in your settings file."
libraries_results.append(('Spidermonkey', False, msg))
missing_libs = [l for l, s, m in libraries_results if not s]
if missing_libs:
status = 'missing libs: %s' % ",".join(missing_libs)
return status, libraries_results
def elastic():
es = elasticsearch.Elasticsearch(hosts=settings.ES_HOSTS)
elastic_results = None
status = ''
try:
health = es.cluster.health()
if health['status'] == 'red':
status = 'ES is red'
elastic_results = health
except elasticsearch.ElasticsearchException:
monitor_log.exception('Failed to communicate with ES')
elastic_results = {'error': traceback.format_exc()}
status = 'traceback'
return status, elastic_results
def path():
# Check file paths / permissions
rw = (settings.TMP_PATH,
settings.NETAPP_STORAGE,
settings.UPLOADS_PATH,
settings.ADDONS_PATH,
settings.GUARDED_ADDONS_PATH,
settings.ADDON_ICONS_PATH,
settings.WEBSITE_ICONS_PATH,
settings.PREVIEWS_PATH,
settings.REVIEWER_ATTACHMENTS_PATH,)
r = [os.path.join(settings.ROOT, 'locale')]
filepaths = [(path, os.R_OK | os.W_OK, "We want read + write")
for path in rw]
filepaths += [(path, os.R_OK, "We want read") for path in r]
filepath_results = []
filepath_status = True
for path, perms, notes in filepaths:
path_exists = os.path.exists(path)
path_perms = os.access(path, perms)
filepath_status = filepath_status and path_exists and path_perms
filepath_results.append((path, path_exists, path_perms, notes))
key_exists = os.path.exists(settings.WEBAPPS_RECEIPT_KEY)
key_perms = os.access(settings.WEBAPPS_RECEIPT_KEY, os.R_OK)
filepath_status = filepath_status and key_exists and key_perms
filepath_results.append(('settings.WEBAPPS_RECEIPT_KEY',
key_exists, key_perms, 'We want read'))
status = filepath_status
status = ''
if not filepath_status:
status = 'check main status page for broken perms'
return status, filepath_results
def redis():
# Check Redis
redis_results = [None, 'REDIS_BACKEND is not set']
status = 'REDIS_BACKEND is not set'
if getattr(settings, 'REDIS_BACKEND', False):
from caching.invalidation import get_redis_backend
status = ''
try:
redis = get_redis_backend()
redis_results = redis.info()
except Exception, e:
redis_results = None
status = ('Failed to chat with redis')
monitor_log.critical('Failed to chat with redis: (%s)' % e)
return status, redis_results
# The signer check actually asks the signing server to sign something. Do this
# once per nagios check, once per web head might be a bit much. The memoize
# slows it down a bit, by caching the result for 15 seconds.
@memoize('monitors-signer', time=15)
def receipt_signer():
destination = getattr(settings, 'SIGNING_SERVER', None)
if not destination:
return '', 'Signer is not configured.'
# Just send some test data into the signer.
now = int(time.time())
not_valid = (settings.SITE_URL + '/not-valid')
data = {'detail': not_valid, 'exp': now + 3600, 'iat': now,
'iss': settings.SITE_URL,
'product': {'storedata': 'id=1', 'url': u'http://not-valid.com'},
'nbf': now, 'typ': 'purchase-receipt',
'reissue': not_valid,
'user': {'type': 'directed-identifier',
'value': u'something-not-valid'},
'verify': not_valid
}
try:
result = receipt.sign(data)
except SigningError as err:
msg = 'Error on signing (%s): %s' % (destination, err)
return msg, msg
try:
cert, rest = receipt.crack(result)
except Exception as err:
msg = 'Error on cracking receipt (%s): %s' % (destination, err)
return msg, msg
# Check that the certs used to sign the receipts are not about to expire.
limit = now + (60 * 60 * 24) # One day.
if cert['exp'] < limit:
msg = 'Cert will expire soon (%s)' % destination
return msg, msg
cert_err_msg = 'Error on checking public cert (%s): %s'
location = cert['iss']
try:
resp = requests.get(location, timeout=5, stream=False)
except Exception as err:
msg = cert_err_msg % (location, err)
return msg, msg
if not resp.ok:
msg = cert_err_msg % (location, resp.reason)
return msg, msg
cert_json = resp.json()
if not cert_json or 'jwk' not in cert_json:
msg = cert_err_msg % (location, 'Not valid JSON/JWK')
return msg, msg
return '', 'Signer working and up to date'
# Like the receipt signer above this asks the packaged app signing
# service to sign one for us.
@memoize('monitors-package-signer', time=60)
def package_signer():
destination = getattr(settings, 'SIGNED_APPS_SERVER', None)
if not destination:
return '', 'Signer is not configured.'
app_path = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'nagios_check_packaged_app.zip')
signed_path = tempfile.mktemp()
try:
packaged.sign_app(app_path, signed_path, None, False)
return '', 'Package signer working'
except PackageSigningError, e:
msg = 'Error on package signing (%s): %s' % (destination, e)
return msg, msg
finally:
os.unlink(signed_path)
# Not called settings to avoid conflict with django.conf.settings.
def settings_check():
required = ['APP_PURCHASE_KEY', 'APP_PURCHASE_TYP', 'APP_PURCHASE_AUD',
'APP_PURCHASE_SECRET']
for key in required:
if not getattr(settings, key):
msg = 'Missing required value %s' % key
return msg, msg
return '', 'Required settings ok'
def solitude():
try:
res = client.api.services.request.get()
except Exception as err:
return repr(err), repr(err)
auth = res.get('authenticated', None)
if auth != 'marketplace':
msg = 'Solitude authenticated as: %s' % auth
return msg, msg
return '', 'Solitude authentication ok'
| bsd-3-clause |
C00kiie/Youtube-Mp3-telegram-bot | youtube_dl/extractor/viki.py | 29 | 13668 | # coding: utf-8
from __future__ import unicode_literals
import hashlib
import hmac
import itertools
import json
import re
import time
from .common import InfoExtractor
from ..utils import (
ExtractorError,
int_or_none,
parse_age_limit,
parse_iso8601,
sanitized_Request,
)
class VikiBaseIE(InfoExtractor):
_VALID_URL_BASE = r'https?://(?:www\.)?viki\.(?:com|net|mx|jp|fr)/'
_API_QUERY_TEMPLATE = '/v4/%sapp=%s&t=%s&site=www.viki.com'
_API_URL_TEMPLATE = 'http://api.viki.io%s&sig=%s'
_APP = '65535a'
_APP_VERSION = '2.2.5.1428709186'
_APP_SECRET = '-$iJ}@p7!G@SyU/je1bEyWg}upLu-6V6-Lg9VD(]siH,r.,m-r|ulZ,U4LC/SeR)'
_GEO_BYPASS = False
_NETRC_MACHINE = 'viki'
_token = None
_ERRORS = {
'geo': 'Sorry, this content is not available in your region.',
'upcoming': 'Sorry, this content is not yet available.',
# 'paywall': 'paywall',
}
def _prepare_call(self, path, timestamp=None, post_data=None):
path += '?' if '?' not in path else '&'
if not timestamp:
timestamp = int(time.time())
query = self._API_QUERY_TEMPLATE % (path, self._APP, timestamp)
if self._token:
query += '&token=%s' % self._token
sig = hmac.new(
self._APP_SECRET.encode('ascii'),
query.encode('ascii'),
hashlib.sha1
).hexdigest()
url = self._API_URL_TEMPLATE % (query, sig)
return sanitized_Request(
url, json.dumps(post_data).encode('utf-8')) if post_data else url
def _call_api(self, path, video_id, note, timestamp=None, post_data=None):
resp = self._download_json(
self._prepare_call(path, timestamp, post_data), video_id, note)
error = resp.get('error')
if error:
if error == 'invalid timestamp':
resp = self._download_json(
self._prepare_call(path, int(resp['current_timestamp']), post_data),
video_id, '%s (retry)' % note)
error = resp.get('error')
if error:
self._raise_error(resp['error'])
return resp
def _raise_error(self, error):
raise ExtractorError(
'%s returned error: %s' % (self.IE_NAME, error),
expected=True)
def _check_errors(self, data):
for reason, status in data.get('blocking', {}).items():
if status and reason in self._ERRORS:
message = self._ERRORS[reason]
if reason == 'geo':
self.raise_geo_restricted(msg=message)
raise ExtractorError('%s said: %s' % (
self.IE_NAME, message), expected=True)
def _real_initialize(self):
self._login()
def _login(self):
(username, password) = self._get_login_info()
if username is None:
return
login_form = {
'login_id': username,
'password': password,
}
login = self._call_api(
'sessions.json', None,
'Logging in as %s' % username, post_data=login_form)
self._token = login.get('token')
if not self._token:
self.report_warning('Unable to get session token, login has probably failed')
@staticmethod
def dict_selection(dict_obj, preferred_key, allow_fallback=True):
if preferred_key in dict_obj:
return dict_obj.get(preferred_key)
if not allow_fallback:
return
filtered_dict = list(filter(None, [dict_obj.get(k) for k in dict_obj.keys()]))
return filtered_dict[0] if filtered_dict else None
class VikiIE(VikiBaseIE):
IE_NAME = 'viki'
_VALID_URL = r'%s(?:videos|player)/(?P<id>[0-9]+v)' % VikiBaseIE._VALID_URL_BASE
_TESTS = [{
'url': 'http://www.viki.com/videos/1023585v-heirs-episode-14',
'info_dict': {
'id': '1023585v',
'ext': 'mp4',
'title': 'Heirs Episode 14',
'uploader': 'SBS',
'description': 'md5:c4b17b9626dd4b143dcc4d855ba3474e',
'upload_date': '20131121',
'age_limit': 13,
},
'skip': 'Blocked in the US',
}, {
# clip
'url': 'http://www.viki.com/videos/1067139v-the-avengers-age-of-ultron-press-conference',
'md5': '86c0b5dbd4d83a6611a79987cc7a1989',
'info_dict': {
'id': '1067139v',
'ext': 'mp4',
'title': "'The Avengers: Age of Ultron' Press Conference",
'description': 'md5:d70b2f9428f5488321bfe1db10d612ea',
'duration': 352,
'timestamp': 1430380829,
'upload_date': '20150430',
'uploader': 'Arirang TV',
'like_count': int,
'age_limit': 0,
}
}, {
'url': 'http://www.viki.com/videos/1048879v-ankhon-dekhi',
'info_dict': {
'id': '1048879v',
'ext': 'mp4',
'title': 'Ankhon Dekhi',
'duration': 6512,
'timestamp': 1408532356,
'upload_date': '20140820',
'uploader': 'Spuul',
'like_count': int,
'age_limit': 13,
},
'skip': 'Blocked in the US',
}, {
# episode
'url': 'http://www.viki.com/videos/44699v-boys-over-flowers-episode-1',
'md5': '5fa476a902e902783ac7a4d615cdbc7a',
'info_dict': {
'id': '44699v',
'ext': 'mp4',
'title': 'Boys Over Flowers - Episode 1',
'description': 'md5:b89cf50038b480b88b5b3c93589a9076',
'duration': 4204,
'timestamp': 1270496524,
'upload_date': '20100405',
'uploader': 'group8',
'like_count': int,
'age_limit': 13,
}
}, {
# youtube external
'url': 'http://www.viki.com/videos/50562v-poor-nastya-complete-episode-1',
'md5': '63f8600c1da6f01b7640eee7eca4f1da',
'info_dict': {
'id': '50562v',
'ext': 'webm',
'title': 'Poor Nastya [COMPLETE] - Episode 1',
'description': '',
'duration': 606,
'timestamp': 1274949505,
'upload_date': '20101213',
'uploader': 'ad14065n',
'uploader_id': 'ad14065n',
'like_count': int,
'age_limit': 13,
}
}, {
'url': 'http://www.viki.com/player/44699v',
'only_matching': True,
}, {
# non-English description
'url': 'http://www.viki.com/videos/158036v-love-in-magic',
'md5': '1713ae35df5a521b31f6dc40730e7c9c',
'info_dict': {
'id': '158036v',
'ext': 'mp4',
'uploader': 'I Planet Entertainment',
'upload_date': '20111122',
'timestamp': 1321985454,
'description': 'md5:44b1e46619df3a072294645c770cef36',
'title': 'Love In Magic',
'age_limit': 13,
},
}]
def _real_extract(self, url):
video_id = self._match_id(url)
video = self._call_api(
'videos/%s.json' % video_id, video_id, 'Downloading video JSON')
self._check_errors(video)
title = self.dict_selection(video.get('titles', {}), 'en', allow_fallback=False)
if not title:
title = 'Episode %d' % video.get('number') if video.get('type') == 'episode' else video.get('id') or video_id
container_titles = video.get('container', {}).get('titles', {})
container_title = self.dict_selection(container_titles, 'en')
title = '%s - %s' % (container_title, title)
description = self.dict_selection(video.get('descriptions', {}), 'en')
duration = int_or_none(video.get('duration'))
timestamp = parse_iso8601(video.get('created_at'))
uploader = video.get('author')
like_count = int_or_none(video.get('likes', {}).get('count'))
age_limit = parse_age_limit(video.get('rating'))
thumbnails = []
for thumbnail_id, thumbnail in video.get('images', {}).items():
thumbnails.append({
'id': thumbnail_id,
'url': thumbnail.get('url'),
})
subtitles = {}
for subtitle_lang, _ in video.get('subtitle_completions', {}).items():
subtitles[subtitle_lang] = [{
'ext': subtitles_format,
'url': self._prepare_call(
'videos/%s/subtitles/%s.%s' % (video_id, subtitle_lang, subtitles_format)),
} for subtitles_format in ('srt', 'vtt')]
result = {
'id': video_id,
'title': title,
'description': description,
'duration': duration,
'timestamp': timestamp,
'uploader': uploader,
'like_count': like_count,
'age_limit': age_limit,
'thumbnails': thumbnails,
'subtitles': subtitles,
}
streams = self._call_api(
'videos/%s/streams.json' % video_id, video_id,
'Downloading video streams JSON')
if 'external' in streams:
result.update({
'_type': 'url_transparent',
'url': streams['external']['url'],
})
return result
formats = []
for format_id, stream_dict in streams.items():
height = int_or_none(self._search_regex(
r'^(\d+)[pP]$', format_id, 'height', default=None))
for protocol, format_dict in stream_dict.items():
# rtmps URLs does not seem to work
if protocol == 'rtmps':
continue
format_url = format_dict['url']
if format_id == 'm3u8':
m3u8_formats = self._extract_m3u8_formats(
format_url, video_id, 'mp4',
entry_protocol='m3u8_native',
m3u8_id='m3u8-%s' % protocol, fatal=False)
# Despite CODECS metadata in m3u8 all video-only formats
# are actually video+audio
for f in m3u8_formats:
if f.get('acodec') == 'none' and f.get('vcodec') != 'none':
f['acodec'] = None
formats.extend(m3u8_formats)
elif format_url.startswith('rtmp'):
mobj = re.search(
r'^(?P<url>rtmp://[^/]+/(?P<app>.+?))/(?P<playpath>mp4:.+)$',
format_url)
if not mobj:
continue
formats.append({
'format_id': 'rtmp-%s' % format_id,
'ext': 'flv',
'url': mobj.group('url'),
'play_path': mobj.group('playpath'),
'app': mobj.group('app'),
'page_url': url,
})
else:
formats.append({
'url': format_url,
'format_id': '%s-%s' % (format_id, protocol),
'height': height,
})
self._sort_formats(formats)
result['formats'] = formats
return result
class VikiChannelIE(VikiBaseIE):
IE_NAME = 'viki:channel'
_VALID_URL = r'%s(?:tv|news|movies|artists)/(?P<id>[0-9]+c)' % VikiBaseIE._VALID_URL_BASE
_TESTS = [{
'url': 'http://www.viki.com/tv/50c-boys-over-flowers',
'info_dict': {
'id': '50c',
'title': 'Boys Over Flowers',
'description': 'md5:ecd3cff47967fe193cff37c0bec52790',
},
'playlist_mincount': 71,
}, {
'url': 'http://www.viki.com/tv/1354c-poor-nastya-complete',
'info_dict': {
'id': '1354c',
'title': 'Poor Nastya [COMPLETE]',
'description': 'md5:05bf5471385aa8b21c18ad450e350525',
},
'playlist_count': 127,
}, {
'url': 'http://www.viki.com/news/24569c-showbiz-korea',
'only_matching': True,
}, {
'url': 'http://www.viki.com/movies/22047c-pride-and-prejudice-2005',
'only_matching': True,
}, {
'url': 'http://www.viki.com/artists/2141c-shinee',
'only_matching': True,
}]
_PER_PAGE = 25
def _real_extract(self, url):
channel_id = self._match_id(url)
channel = self._call_api(
'containers/%s.json' % channel_id, channel_id,
'Downloading channel JSON')
self._check_errors(channel)
title = self.dict_selection(channel['titles'], 'en')
description = self.dict_selection(channel['descriptions'], 'en')
entries = []
for video_type in ('episodes', 'clips', 'movies'):
for page_num in itertools.count(1):
page = self._call_api(
'containers/%s/%s.json?per_page=%d&sort=number&direction=asc&with_paging=true&page=%d'
% (channel_id, video_type, self._PER_PAGE, page_num), channel_id,
'Downloading %s JSON page #%d' % (video_type, page_num))
for video in page['response']:
video_id = video['id']
entries.append(self.url_result(
'http://www.viki.com/videos/%s' % video_id, 'Viki'))
if not page['pagination']['next']:
break
return self.playlist_result(entries, channel_id, title, description)
| mit |
mostaphaRoudsari/Honeybee | src/Honeybee_Read Annual Result I.py | 1 | 44279 | #
# Honeybee: A Plugin for Environmental Analysis (GPL) started by Mostapha Sadeghipour Roudsari
#
# This file is part of Honeybee.
#
# Copyright (c) 2013-2020, Mostapha Sadeghipour Roudsari <mostapha@ladybug.tools>
# Honeybee is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published
# by the Free Software Foundation; either version 3 of the License,
# or (at your option) any later version.
#
# Honeybee is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Honeybee; If not, see <http://www.gnu.org/licenses/>.
#
# @license GPL-3.0+ <http://spdx.org/licenses/GPL-3.0+>
"""
Read Annual Daylight Results I [Standard Daysim Results]
-
Provided by Honeybee 0.0.66
Args:
_illFilesAddress: List of .ill files
_testPoints: List of 3d Points
occupancyFiles_: Address to a Daysim occupancy file. You can find some example in \Daysim\occ. Use Honeybee Occupancy Generator to generate a custom occupancy file. You can also use EnergyPlus Schedules directly. If the schedule is using continuous values any value larger than .2 will be considered as occupied.
lightingControlGroups_: Daysim lighting control groups. Daysim can model up to 10 lighting control groups together. Default is > cntrlType = 4, lightingPower = 250, lightingSetpoint = 300, ballastLossFactor = 20, standbyPower = 3, delayTime = 5
_DLAIllumThresholds_: Illuminance threshold for Daylight Autonomy calculation in lux. Default is set to 300 lux.
SHDGroupI_Sensors_: Senors for dhading group I. Use shadingGroupSensors component to prepare the inputs
SHDGroupII_Sensors_: Senors for dhading group II. Use shadingGroupSensors component to prepare the inputs
_runIt: set to True to run the analysis
Returns:
DLA: Daylight Autonomy > Percentage of the time during the active occupancy hours that the test point receives more daylight than the illuminance threshold.
UDLI_Less_100: Useful Daylight illuminance > Percentage of time during the active occupancy hours that the test point receives less than 100 lux.
UDLI_100_2000: Useful Daylight illuminance > Percentage of time during the active occupancy hours that the test point receives between 100 and 2000 lux.
UDLI_More_2000: Useful Daylight illuminance > Percentage of time during the active occupancy hours that the test point receives more than 2000 lux.
CDA: Continuous Daylight Autonomy > Similar to Daylight Autonomy except that the point receives illuminaceLevel/illuminace threshold for hours that illuminance level is less than the threshold.
sDA: Spatial Daylight Autonomy > sDA is the percent of analysis points across the analysis area that meet or exceed _DLAIllumThresholds value (set to 300 lux for LEED) for at least 50% of the analysis period. Honeybee doesn't consider the effect of dynamic blinds in calculating sDA.
annualProfiles: A .csv file generated by Daysim that can be used as lighting schedule for annual energy simulation
"""
ghenv.Component.Name = "Honeybee_Read Annual Result I"
ghenv.Component.NickName = 'readAnnualResultsI'
ghenv.Component.Message = 'VER 0.0.66\nJUL_07_2020'
ghenv.Component.IconDisplayMode = ghenv.Component.IconDisplayMode.application
ghenv.Component.Category = "HB-Legacy"
ghenv.Component.SubCategory = "04 | Daylight | Daylight"
#compatibleHBVersion = VER 0.0.57\nNOV_03_2015
#compatibleLBVersion = VER 0.0.59\nFEB_01_2015
try: ghenv.Component.AdditionalHelpFromDocStrings = "2"
except: pass
from System import Object
import Grasshopper.Kernel as gh
from Grasshopper import DataTree
from Grasshopper.Kernel.Data import GH_Path
import Rhino as rc
import scriptcontext as sc
import os
import subprocess
import time
import shutil
"""
def testPtsStr(self, testPoint, ptsNormal):
return '%.4f'%testPoint.X + '\t' + \
'%.4f'%testPoint.Y + '\t' + \
'%.4f'%testPoint.Z + '\t' + \
'%.4f'%ptsNormal.X + '\t' + \
'%.4f'%ptsNormal.Y + '\t' + \
'%.4f'%ptsNormal.Z + '\n'
"""
def getFilelength(fileName):
with open(fileName) as inf:
for i, l in enumerate(inf):
pass
return i + 1
def executeBatchFiles(batchFileNames, maxPRuns = None, shell = False, waitingTime = 0.2):
"""Run a number of batch files in parallel and
wait to end of the analysis.
Args:
batchFileNames: List of batch files
maxPRuns: max number of files to be ran in parallel (default = 0)
shell: set to True if you do NOT want to see the cmd window while the analysis is runnig
"""
if not maxPRuns : maxPRuns = 1
maxPRuns = int(maxPRuns)
total = len(batchFileNames)
if maxPRuns < 1: maxPRuns = 1
if maxPRuns > total: maxPRuns = total
running = 0
done = False
jobs = []
pid = 0
try:
while not done:
if running < maxPRuns and pid < total:
# execute the files
jobs.append(subprocess.Popen(batchFileNames[pid].replace("\\", "/") , shell = shell))
pid+=1
time.sleep(waitingTime)
# count how many jobs are running and how many are done
running = 0
finished = 0
for job in jobs:
if job.poll() is None:
#one job is still running
running += 1
else:
finished += 1
if running == maxPRuns:
# wait for half a second
#print "waiting..."
time.sleep(waitingTime)
if finished == total:
done = True
except Exception, e:
print "Something went wrong: %s"%str(e)
def convertIllFileDaraTreeIntoSortedDictionary(illFilesAddress):
# I should move this function into Honeybee_Honeybee #BadPractice!
shadingGroupsCount = 0
shadingGroups = []
# get number of shading groups
for branch in range(illFilesAddress.BranchCount):
if illFilesAddress.Path(branch).Indices[0] not in shadingGroups:
shadingGroups.append(illFilesAddress.Path(branch).Indices[0])
shadingGroupsCount+=1
illFileSets = {}
for branch in range(illFilesAddress.BranchCount):
# sort files inside each branch if they are not sorted
fileNames = list(illFilesAddress.Branch(branch))
try:
fileNames = sorted(fileNames, key=lambda fileName: int(fileName \
.split(".")[-2] \
.strip("_down") \
.strip("_up") \
.split("_")[-1]))
except:
tmpmsg = "Can't sort .ill files based on the file names. Make sure branches are sorted correctly."
w = gh.GH_RuntimeMessageLevel.Warning
ghenv.Component.AddRuntimeMessage(w, tmpmsg)
#convert data tree to a useful dictionary
shadingGroupNumber = illFilesAddress.Path(branch).Indices[0]
if shadingGroupNumber not in illFileSets.keys():
illFileSets[shadingGroupNumber] = []
# create a separate list for each state
# the structure now is like llFileSets[shadingGroupNumber][[state 1], [state 2],..., [state n]]
illFileSets[shadingGroupNumber].append(fileNames)
return illFileSets
def convertEPScheduleToDSSchedule(scheduleName, folder):
lb_preparation = sc.sticky["ladybug_Preparation"]()
HBScheduleList = sc.sticky["honeybee_ScheduleLib"].keys()
if scheduleName.upper() not in HBScheduleList:
raise ValueError("Can't find %s in EnergyPlus schedules."%(scheduleName))
heading = "# Daysim occupancy file,,,\n" + \
"# time_step 60, comment: weekdays are based on user list inputs." + \
"daylight savings time is based on user input),,\n" + \
"# month,day,time,occupancy (1=present/0=absent)\n"
readSchedules = sc.sticky["honeybee_ReadSchedules"](scheduleName, 0)
dailyValues = readSchedules.getScheduleValues()
hourlyValues = []
for values in dailyValues: hourlyValues.extend(values)
# create a temp folder inside folder will .ill files
if not os.path.isdir(folder): os.mkdir(folder)
# write the values to file
fullPath = os.path.join(folder, scheduleName.replace(" ", "_") + ".csv")
with open(fullPath, "w") as occFile:
occFile.write(heading)
for HOY, occ in enumerate(hourlyValues):
HOY += 1
d, m, t = lb_preparation.hour2Date(HOY, True)
m += 1 #month starts from 0 in Ladybug hour2Date. I should fix this at some point
t -= .5 # add half an hour to the time to be similar to daysim
if t == -.5: t = 23.5
if float(occ) >= .2: occ = 1
else: occ = 0
occLine = str(m) + "," + str(d) + "," + str(t) + "," + str(occ) + "\n"
occFile.write(occLine)
return fullPath
def main(illFilesAddress, testPts, testVecs, occFiles, lightingControlGroups, SHDGroupI_Sensors, SHDGroupII_Sensors, DLAIllumThresholds, runInBackground=False):
if sc.sticky.has_key('honeybee_release'):
try:
if not sc.sticky['honeybee_release'].isCompatible(ghenv.Component): return -1
if sc.sticky['honeybee_release'].isInputMissing(ghenv.Component): return -1
except:
warning = "You need a newer version of Honeybee to use this compoent." + \
" Use updateHoneybee component to update userObjects.\n" + \
"If you have already updated userObjects drag Honeybee_Honeybee component " + \
"into canvas and try again."
w = gh.GH_RuntimeMessageLevel.Warning
ghenv.Component.AddRuntimeMessage(w, warning)
return -1
hb_folders = sc.sticky["honeybee_folders"]
hb_RADPath = hb_folders["RADPath"]
hb_RADLibPath = hb_folders["RADLibPath"]
hb_DSPath = hb_folders["DSPath"]
hb_DSCore = hb_folders["DSCorePath"]
hb_DSLibPath = hb_folders["DSLibPath"]
else:
msg = "You should first let Honeybee to fly first..."
return msg, None
daysimHeaderKeywords = ["project_name", "project_directory", "bin_directory", "tmp_directory", "Template_File",
"place", "latitude", "longitude", "time_zone", "site_elevation", "time_step",
"wea_data_short_file", "wea_data_short_file_units", "lower_direct_threshold", "lower_diffuse_threshold",
"output_units", "sensor_file_unit", "material_file", "geometry_file",
"radiance_source_files", "sensor_file", "viewpoint_file", "AdaptiveZoneApplies", "dgp_image_x_size", "dgp_image_y_size",
"ab", "ad", "as", "ar", "aa", "lr", "st", "sj", "lw", "dj", "ds", "dr", "dp",
"occupancy", "minimum_illuminance_level", "daylight_savings_time", "shading", "electric_lighting_system",
"sensor_file_info", "daylight_autonomy_active_RGB", "electric_lighting", "direct_sunlight_file", "thermal_simulation",
"user_profile", "PNGScheduleExists" ]
# I will remove this function later and just use WriteDS class
class genDefaultLightingControl(object):
def __init__(self, sensorPts = [], cntrlType = 4, lightingPower = 250, lightingSetpoint = 300, ballastLossFactor = 20, standbyPower = 3, delayTime = 5):
self.sensorPts = sensorPts
self.lightingControlStr = self.getLightingControlStr(cntrlType, lightingPower, lightingSetpoint, ballastLossFactor, standbyPower, delayTime)
def getLightingControlStr(self, cntrlType, lightingPower = 250, lightingSetpoint = 300, ballastLossFactor = 20, standbyPower = 3, delayTime = 5):
cntrlType += 1
# manual control
lightingControlDict = {
1 : 'manualControl',
2 : 'onlyOffSensor',
3 : 'onWhenOccupied',
4 : 'dimming',
5 : 'onlyOffSensorAndDimming',
6 : 'onWithDimming'}
lightingStr = `cntrlType` + " " + lightingControlDict[cntrlType] + " " + `lightingPower` + " 1 "
if cntrlType != 1:
lightingStr += `standbyPower` + " "
if cntrlType > 3:
lightingStr += `ballastLossFactor` + " " + `lightingSetpoint` + " "
if cntrlType != 1 and cntrlType!=4:
lightingStr += `delayTime`
lightingStr += "\n"
return lightingStr
def isSensor(testPt, sensors):
for pt in sensors:
if pt==None: return False
if pt.DistanceTo(testPt) < sc.doc.ModelAbsoluteTolerance:
# this is a senor point
return True
# not a sensor
return False
msg = str.Empty
# PREPARATION/CHECKING THE INPUTS #
# number of spaces
# this component considers each branch as a separate space and will generate
# a separate heading file for each space and generate a separate set of results
numOfSpaces = testPts.BranchCount
# number of total points
numOfPts = testPts.DataCount
# set up illuminance levels for the spaces if they are not already set
if len(DLAIllumThresholds)==0: DLAIllumThresholds = [300] * numOfSpaces
# check for occupancy file
occupancyFilesFolder = os.path.join(sc.sticky["Honeybee_DefaultFolder"], "DaysimCSVOCC\\")
if len(occFiles)!=0:
for fileCount, fileName in enumerate(occFiles):
if fileName.lower().endswith(".csv"):
try:
if not os.path.isfile(fileName):
msg = "Can't find the occupancy file: " + fileName
return msg, None
except:
msg = "Occupancy file address is not valid."
return msg, None
else:
#try:
# might be an energyplus schedule
filePath = convertEPScheduleToDSSchedule(fileName, occupancyFilesFolder)
occFiles[fileCount] = filePath
else:
daysimOccFile = os.path.join(sc.sticky["Honeybee_DefaultFolder"], "DaysimCSVOCC\\userDefinedOcc_9to17.csv")
occFiles = [daysimOccFile] * numOfSpaces
if not os.path.isfile(daysimOccFile):
msg = "Can't find the default occupancy file at: " + daysimOccFile + \
"\nYou can generate an occupancy file and connect the file address to occupancyFiles_ input."
return msg, None
# separate daylighting controls for each space
class SHDGroupSensors(object):
def __init__(self, sensorsList):
self.intSensors = sensorsList[0]
self.extSensors = sensorsList[1]
lightingControls = []
SHDGroupISensors = []
SHDGroupIISensors = []
originalIllFiles = []
testPoints = []
testVectors = []
numOfPtsInEachSpace = []
# collect the data for spaces
for branchNum in range(numOfSpaces):
ptList = list(testPts.Branch(branchNum))
testPoints.append(ptList)
numOfPtsInEachSpace.append(len(ptList))
try: testVectors.append(list(testVecs.Branch(branchNum)))
except: testVectors.append([rc.Geometry.Vector3d.ZAxis] * testPts.Branch(branchNum).Count)
try: lightingControls.append(list(lightingControlGroups.Branch(branchNum)))
except: lightingControls.append([genDefaultLightingControl()])
try: SHDGroupISensors.append(SHDGroupSensors(SHDGroupI_Sensors.Branch(branchNum)))
except: SHDGroupISensors.append(None)
try: SHDGroupIISensors.append(SHDGroupSensors((SHDGroupII_Sensors.Branch(branchNum))))
except: SHDGroupIISensors.append(None)
# create a place holder for each shading group
# sort the ill files based on their names
originalIllFilesSorted = convertIllFileDaraTreeIntoSortedDictionary(illFilesAddress)
# number of points should be the same in all the illfile lists
# that's why I just try the first list of the ill files
numOfPtsInEachFile = []
for illFile in originalIllFilesSorted[0][0]:
with open(illFile, "r") as illInf:
for lineCount, line in enumerate(illInf):
if not line.startswith("#"):
numOfPtsInEachFile.append(len(line.strip().split(" ")) - 4)
break
# find the current project directory that could be differnt from the old one
projectDirectory = os.path.dirname(originalIllFilesSorted[0][0][0]) + "\\"
# print numOfPtsInEachFile
#print numOfPtsInEachSpace
# make sure the number of points inside the ill file matches the number of points
# inside the point list
if sum(numOfPtsInEachFile) != numOfPts:
msg = "Number of points in ill files: " + `sum(numOfPtsInEachFile)` + \
" doesn't match the number of points in point files: " + `numOfPts`
return msg, None
# find the heading files and creat multiple ill files for the study
heaFiles = []
filePath = os.path.dirname(originalIllFilesSorted[0][0][0])
try:
files = os.listdir(filePath)
except:
msg = "Can't find the heading files (*.hea) at " + filePath
return msg, None
for fileName in files:
if fileName.EndsWith(".hea"): heaFiles.append(fileName)
# sort heading files and pt files
try: heaFiles = sorted(heaFiles, key=lambda fileName: int(fileName.split(".")[-2].split("_")[-1]))
except: pass
# copy one of the heading files to be modified
heaFile = heaFiles[0]
with open(os.path.join(filePath, heaFile), "r") as heainf:
baseHea = heainf.readlines()
modifiedHeaBase = str.Empty
keywordsToBeRemoved = ["daylight_autonomy_active_RGB", "electric_lighting", "direct_sunlight_file", "thermal_simulation", "occupancy_profile",
"continuous_daylight_autonomy_active_RGB", "UDI_100_active_RGB", "UDI_100_2000_active_RGB", "UDI_2000_active_RGB",
"DDS_sensor_file", "DDS_file", "sensor_file_info"]
linesToBePassed = []
for lineCount, line in enumerate(baseHea):
line = line.strip()
if not lineCount in linesToBePassed:
if line.split(" ")[0] == ("sensor_file"):
modifiedHeaBase += "sensor_file [sensor_file]\n"
elif line.startswith("occupancy-file"):
modifiedHeaBase += "occupancy-file [occupancy]\n"
elif line.startswith("occupancy"):
modifiedHeaBase += "occupancy 5 [occupancy]\n"
elif line.startswith("project_name"):
projectName = line.split("project_name")[-1].strip()
modifiedHeaBase += "project_name [project_name]\n"
elif line.startswith("project_directory"):
# projectDirectory = line.split("project_directory")[-1].strip()
modifiedHeaBase += "project_directory " + projectDirectory + "\n"
elif line.startswith("tmp_directory"):
# create a place holder for the new temp file
modifiedHeaBase += "tmp_directory " + os.path.join(projectDirectory, "tmp[spaceCount]") + "\\\n"
elif line.startswith("daylight_savings_time"):
modifiedHeaBase += "daylight_savings_time 1\n"
elif line.startswith("minimum_illuminance_level"):
modifiedHeaBase += "minimum_illuminance_level [minimum_illuminance_level]\n"
elif line.split(" ")[0] == "shading":
# add the place holder for new dc and ill file names
if line.find(".ill") >= 0: line = line.replace(".ill", "[spaceCount].ill")
if line.find(".dc") >= 0: line = line.replace(".dc", "[spaceCount].dc")
shadingStr = line + "\n"
for lineC in range(lineCount + 1, len(baseHea)):
line = baseHea[lineC].strip()
if lineCount > len(baseHea) or line == str.Empty or line.startswith("=") or line.split(" ")[0] in daysimHeaderKeywords:
# good example here that I should have used the while loop instead!
break
else:
linesToBePassed.append(lineC)
# add the place holder for new dc and ill file names
if line.find(".ill") >= 0:
line = line.replace(".ill", "[spaceCount].ill")
# I'm not sure if I really need to modify the .dc files
# based on the graph on daysim page it should only look
# for the ill files and not the dc files
if line.find(".dc") >= 0:
line = line.replace(".dc", "[spaceCount].dc")
linesToBePassed.append(lineC)
shadingStr += line + "\n"
modifiedHeaBase += shadingStr
#modifiedHeaBase.append("minimum_illuminance_level [minimum_illuminance_level]\n")
elif line.split(" ")[0] == "electric_lighting_system" or line.split(" ")[0] == "user_profile":
# remove the lines related to electric lighting system as the new ones should be assigned
for lineC in range(lineCount + 1, len(baseHea)):
line = baseHea[lineC].strip()
if lineCount > len(baseHea) or line == str.Empty or line.startswith("=") or line.split(" ")[0] in daysimHeaderKeywords:
# good example here that I should have used the while loop instead!
break
else:
linesToBePassed.append(lineC)
elif line.split(" ")[0] in keywordsToBeRemoved:
pass
else:
modifiedHeaBase += line + "\n"
# clean the parts that are related to lighting control and schedule
##replace
# re-write the ill files based on the number of points in each space
# if the study is only for a single space then all the ill files should be merged
# considering the structure of .ill files and the fact that the files can be really
# huge this part can take long. It is good to consider a new name for these files so
# in case the user has already ran the study for this folder the script just use the
# available files
# generate new files for each space
# check if the files are already generated once
# not a good idea at all - many users don't really change the file name and run several
# studies with the same name in this case, results will always stay the same! Oops...
firstRun = True
newIllFileNamesDict = {}
for shdGroupCounter, illFileList in originalIllFilesSorted.items():
newIllFileNamesDict[shdGroupCounter] = []
for shadingStateCount in range(len(illFileList)):
for spaceCount in range(numOfSpaces):
newIllFileName = illFileList[shadingStateCount][0].split(".ill")[0] + "_space_" + str(spaceCount) + ".ill"
newDcFileName = illFileList[shadingStateCount][0].split(".ill")[0] + "_space_" + str(spaceCount) + ".dc"
newIllFileNamesDict[shdGroupCounter].append(newIllFileName) #collect ill files to calculate sDA
#if not (os.path.isfile(newIllFileName) and os.path.isfile(newDcFileName)):
# firstRun = True
# break
# open all the available ill files and put them in the dictionary
illFilesDict = {}
newIllFilesDict = {}
if firstRun:
for shdGroupCounter, illFileList in originalIllFilesSorted.items():
for shadingStateCount, shadingStateFiles in enumerate(illFileList):
# create a place holder for new .ill files for each shading group
newIllFileNamesDict[shdGroupCounter] = []
# open all the files for this shading state into memory
for counter, illFile in enumerate(illFileList[shadingStateCount]):
illfile = open(illFile, "r")
illFilesDict[counter] = illfile #put each ill file from each cpu separate/ I don't know why I have done this
# open new ill files for each space and put them in the same directory
for spaceCount in range(numOfSpaces):
newIllFileName = illFileList[shadingStateCount][0].split(".ill")[0] + "_space_" + str(spaceCount) + ".ill"
newIllFileNamesDict[shdGroupCounter].append(newIllFileName) #collect new ill file names to calculate sDA
newIllFile = open(newIllFileName, "w")
newIllFilesDict[spaceCount] = newIllFile
# all the files will have the same length of 8760 lines for the hours of the year
for line in range(8760):
# merge the line from all the source file
mergedLine = []
for illFileKey in illFilesDict.keys():
line = illFilesDict[illFileKey].readline()
if illFileKey==0:
dateInfo = line.strip().split(" ")[:4]
mergedLine.extend(line.strip().split(" ")[4:])
# write the values to the target files
for illFileKey in newIllFilesDict.keys():
line = " ".join(dateInfo + mergedLine[sum(numOfPtsInEachSpace[:illFileKey]):sum(numOfPtsInEachSpace[:illFileKey+1])])
newIllFilesDict[illFileKey].write(line + "\n")
# close all the opened files
for illFileKey in illFilesDict.keys(): illFilesDict[illFileKey].close()
for illFileKey in newIllFilesDict.keys(): newIllFilesDict[illFileKey].close()
# print numOfPtsInEachSpace
# write the new .dc files for
dcFilesDict = {}
newDcFilesDict = {}
for shdGroupCounter, illFileList in originalIllFilesSorted.items():
for shadingStateCount, shadingStateFiles in enumerate(illFileList):
#illFileDict[shaidngGroupCounter]
lenOfDCFiles = []
for counter, illFile in enumerate(shadingStateFiles):
if illFile.endswith("_up.ill"):
dcFile = illFile.replace("_up.ill", ".dc")
elif illFile.endswith("_down.ill"):
dcFile = illFile.replace("_down.ill", ".dc")
else:
dcFile = illFile.replace(".ill", ".dc")
lenOfDCFile = getFilelength(dcFile) - 6 #Daysim files has 6 lines as header
lenOfDCFiles.append(lenOfDCFile)
dcfile = open(dcFile, "r")
dcFilesDict[counter] = dcfile
# open new ill files for each space and put them in the same directory
for spaceCount in range(numOfSpaces):
newDcFileName = illFileList[shadingStateCount][0].split(".ill")[0] + "_space_" + str(spaceCount) + ".dc"
newDcFile = open(newDcFileName, "w")
newDcFilesDict[spaceCount] = newDcFile
heading = str.Empty
for line in dcFilesDict[0]:
if line.startswith("#"):
#make one instance of heading
heading += line
else:
newDcFilesDict[0].write(heading)
newDcFilesDict[0].write(line)
break
pointCount = 1
spaceCount = 0
for dcFileKey in dcFilesDict.keys():
for line in dcFilesDict[dcFileKey]:
if not line.startswith("#"):
# write the line
newDcFilesDict[spaceCount].write(line)
pointCount+=1
if pointCount == sum(numOfPtsInEachSpace[:spaceCount + 1]):
# end of the file, start a new file
spaceCount += 1
try: newDcFilesDict[spaceCount].write(heading)
except: pass
# close all the opened files
for dcFileKey in dcFilesDict.keys(): dcFilesDict[dcFileKey].close()
for dcFileKey in newDcFilesDict.keys(): newDcFilesDict[dcFileKey].close()
heaFileNames = []
# write point files and heading files
for spaceCount in range(numOfSpaces):
tmpFolder = os.path.join(projectDirectory, "tmp_space_" + str(spaceCount))
if not os.path.isdir(tmpFolder): os.mkdir(tmpFolder)
subProjectName = projectName + "_space_" + str(spaceCount)
ptsFileName = subProjectName + ".pts"
modifiedHea = modifiedHeaBase
with open(os.path.join(filePath, ptsFileName), "w") as ptsf:
for ptCount, testPoint in enumerate(testPoints[spaceCount]):
ptNormal = testVectors[spaceCount][ptCount]
ptStr = '%.4f'%testPoint.X + '\t' + \
'%.4f'%testPoint.Y + '\t' + \
'%.4f'%testPoint.Z + '\t' + \
'%.4f'%ptNormal.X + '\t' + \
'%.4f'%ptNormal.Y + '\t' + \
'%.4f'%ptNormal.Z + '\n'
ptsf.write(ptStr)
# replace some of the values
# replace sensor file with the new file
if modifiedHea.find("[sensor_file]") >= 0:
modifiedHea = modifiedHea.replace("[sensor_file]", ptsFileName)
else:
modifiedHea += "sensor_file " + ptsFileName + "\n"
# occupancy file
try:
occFileFullPath = occFiles[spaceCount]
except:
occFileFullPath = occFiles[0]
#copy occupancy file to the folder
occFileName = os.path.basename(occFileFullPath)
targetFile = os.path.join(projectDirectory, occFileName)
if not os.path.isdir(targetFile):
shutil.copy2(occFileFullPath, targetFile)
if modifiedHea.find("[occupancy]") >= 0:
modifiedHea = modifiedHea.replace("[occupancy]", occFileName)
else:
# pass
modifiedHea += "occupancy-file " + occFileName + "\n"
modifiedHea += "occupancy 5 " + occFileName + "\n"
modifiedHea = modifiedHea.replace("[project_name]", subProjectName)
# daylight saving
if modifiedHea.find("daylight_savings_time") >= 0:
pass
else:
modifiedHea += "daylight_savings_time 1\n"
# illuminance level threshold
try: illumT = DLAIllumThresholds[spaceCount]
except: illumT = DLAIllumThresholds[0]
if modifiedHea.find("[minimum_illuminance_level]") >= 0:
modifiedHea = modifiedHea.replace("[minimum_illuminance_level]", str(illumT))
else:
modifiedHea += "minimum_illuminance_level " + str(illumT)+ "\n"
# replace the file names for advanced shadings
modifiedHea = modifiedHea.replace("[spaceCount]", "_space_" + str(spaceCount))
# add user information
modifiedHea += "user_profile 1\n" + \
"active 100 1 1\n"
try:
lghtCtrls = lightingControls[spaceCount]
lightingGroupSensors = []
except:
lghtCtrls = []
if len(lghtCtrls)!=0:
modifiedHea += "\n\nelectric_lighting_system " + str(len(lghtCtrls)) + "\n"
for lightingControl in lghtCtrls:
lightingGroupSensors.append(lightingControl.sensorPts)
lightingControlDefinition = lightingControl.lightingControlStr
modifiedHea += lightingControlDefinition
# write sensor info
modifiedHea += "\nsensor_file_info "
for pt in testPoints[spaceCount]:
sensorInfo = []
# test shading group
for groupCount, shdGroupSensor in enumerate([SHDGroupISensors[spaceCount], SHDGroupIISensors[spaceCount]]):
if shdGroupSensor!=None:
if isSensor(pt, shdGroupSensor.intSensors):
sensorInfo.append('BG' + str(groupCount+1))
if isSensor(pt, shdGroupSensor.extSensors):
sensorInfo.append('BG' + str(groupCount+1) + '_Ext')
# test lighting group
for groupCount, lightingGroupSensor in enumerate(lightingGroupSensors):
if lightingGroupSensor!=[] and isSensor(pt, lightingGroupSensor):
sensorInfo.append('LG' + str(groupCount+1))
if len(sensorInfo)==0:
modifiedHea += "0 "
elif len(sensorInfo)==1:
modifiedHea += sensorInfo[0] + " "
else:
modifiedHea += ",".join(sensorInfo) + " "
# output files
modifiedHea += "\n\n############################\n" + \
"# Daylighting Result Files #\n" + \
"############################\n"
modifiedHea += "daylight_autonomy_active_RGB " + subProjectName +"_autonomy.DA\n"
modifiedHea += "continuous_daylight_autonomy_active_RGB " + subProjectName +".CDA\n"
modifiedHea += "UDI_100_active_RGB " + subProjectName +"_less_than_100.UDI\n"
modifiedHea += "UDI_100_2000_active_RGB " + subProjectName +"_100_2000.UDI\n"
modifiedHea += "UDI_2000_active_RGB " + subProjectName + "_more_than_2000.UDI\n"
modifiedHea += "occupancy_profile " + subProjectName + "_occ_profile.csv\n"
modifiedHea += "electric_lighting " + subProjectName + "_electriclighting.htm\n"
modifiedHea += "direct_sunlight_file " + subProjectName + ".dir\n"
modifiedHea += "thermal_simulation " + subProjectName + "_intgain.csv\n"
#modifiedHea += "DDS_sensor_file " + subProjectName +".CDA\n".dds\n"
#modifiedHea += "DDS_file " + subProjectName +".sen\n"
heaFileName = subProjectName + ".hea"
heaFileNames.append(heaFileName)
with open(os.path.join(filePath, heaFileName), "w") as heaf:
heaf.write(modifiedHea)
# write batch files
batchFileNames = []
pathStr = "SET RAYPATH=.;" + hb_RADLibPath + ";" + hb_DSPath + ";" + hb_DSLibPath + ";\nPATH=" + hb_RADPath + ";" + hb_DSPath + ";" + hb_DSLibPath + ";$PATH\n"
for heaFileName in heaFileNames:
batchFileName = heaFileName.replace(".hea", ".bat")
batchFileNames.append(batchFileName)
with open(os.path.join(filePath, batchFileName), "w") as batchInf:
batchFileStr = ":: Daysim Result Calculation - Generated by Honeybee\n\n"
batchFileStr += pathStr
# gen glare profile in case there is any dynamic shading systems!
if len(originalIllFilesSorted.keys())>1:
batchFileStr += ':: Glare Profile in The Case of Dynamic Shading Calculation\n' + \
'gen_directsunlight ' + os.path.join(filePath, heaFileName) + '\n'
batchFileStr += ':: Generate the result files\n' + \
'ds_el_lighting.exe ' + os.path.join(filePath, heaFileName) + '\n'
batchInf.write(batchFileStr)
# write a batch file and run the study
ncpus = int(os.environ["NUMBER_OF_PROCESSORS"])
if ncpus == 0: ncpus = 1
#execute the batch files in parallel if there is enough CPUs!
fileNames = []
for fileName in batchFileNames:
batchFileName = os.path.join(filePath, fileName)
fileNames.append(batchFileName)
executeBatchFiles(fileNames, ncpus - 1, shell=runInBackground)
# calculate sDA
#sDADict = {}
#if len(newIllFileNamesDict.keys())!=1:
# warning = "This version of Honeybee doesn't consider dynamic blinds in sDA calculation!\n"
# w = gh.GH_RuntimeMessageLevel.Warning
# ghenv.Component.AddRuntimeMessage(w, warning)
#
#for spaceCount, spaceIllFiles in enumerate(newIllFileNamesDict[0]):
# totalOccupancyHours = 0
# sDADict[spaceCount] = 0
# try: DLAIllumThreshold = DLAIllumThresholds[spaceCount]
# except: DLAIllumThreshold = DLAIllumThresholds[0]
#
#
# # open the file to read the values
# with open(spaceIllFiles, "r") as illInf:
#
# # import occupancy profile
# try: occFile = occFiles[spaceCount]
# except: occFile = occFiles[0]
# with open(occFile, "r") as occInFile:
# occupancyLines = occInFile.readlines()
#
# # each line represnt an hour
# for lineCount, line in enumerate(illInf):
# higherThanThreshold = 0
# # check the occupancy profile
# if int(occupancyLines[lineCount + 3].split(",")[-1]) != 0:
# totalOccupancyHours += 1
# illValues = line.split(" ")[1].strip().split(" ")
#
# # check number of points that satisfy the minimum illuminance
# for sensorCount, illuminance in enumerate(illValues):
# # print float(illuminance), DLAIllumThreshold, float(illuminance) >= DLAIllumThreshold
# if float(illuminance) >= DLAIllumThreshold:
# higherThanThreshold += 1
#
# if higherThanThreshold/len(illValues) > .5:
# sDADict[spaceCount] += 1
#
# sDADict[spaceCount] = "%.2f"%((sDADict[spaceCount]/totalOccupancyHours) * 100)
# read all the results
DLALists = []
underUDLILists = []
inRangeUDLILists = []
overUDLILists = []
CDALists = []
EPLSchLists = []
htmLists = []
resultFiles = os.listdir(projectDirectory)
for fileName in resultFiles:
if fileName.endswith(".DA"): DLALists.append(os.path.join(filePath,fileName))
elif fileName.endswith(".CDA"): CDALists.append(os.path.join(filePath,fileName))
elif fileName.endswith(".htm"): htmLists.append(os.path.join(filePath,fileName))
elif fileName.endswith("_intgain.csv"): EPLSchLists.append(os.path.join(filePath,fileName))
elif fileName.endswith("less_than_100.UDI"): underUDLILists.append(os.path.join(filePath,fileName))
elif fileName.endswith("100_2000.UDI"): inRangeUDLILists.append(os.path.join(filePath,fileName))
elif fileName.endswith("more_than_2000.UDI"): overUDLILists.append(os.path.join(filePath,fileName))
# sort the lists
try: CDALists = sorted(CDALists, key=lambda fileName: int(fileName.split(".")[-2].split("_")[-1]))
except: pass
try: DLALists = sorted(DLALists, key=lambda fileName: int(fileName.split(".")[-2].split("_")[-2]))
except: pass
try: htmLists = sorted(htmLists, key=lambda fileName: int(fileName.split(".")[-2].split("_")[-2]))
except: pass
try: EPLSchLists = sorted(EPLSchLists, key=lambda fileName: int(fileName.split(".")[-2].split("_")[-2]))
except: pass
try: underUDLILists = sorted(underUDLILists, key=lambda fileName: int(fileName.split(".")[-2].split("_")[-4]))
except: pass
try: inRangeUDLILists = sorted(inRangeUDLILists, key=lambda fileName: int(fileName.split(".")[-2].split("_")[-3]))
except: pass
try: overUDLILists = sorted(overUDLILists, key=lambda fileName: int(fileName.split(".")[-2].split("_")[-4]))
except: pass
return None, [DLALists, underUDLILists, inRangeUDLILists, overUDLILists, CDALists, EPLSchLists, htmLists]
def isAllNone(dataList):
for item in dataList.AllData():
if item!=None: return False
return True
# Throwing warning if any of the mandatory input is missing
if isAllNone(_illFilesAddress) == True or isAllNone(_testPoints) == True:
msg = "Major input missing. Please check"
w = gh.GH_RuntimeMessageLevel.Warning
ghenv.Component.AddRuntimeMessage(w, msg)
if _runIt and not isAllNone(_illFilesAddress) and not isAllNone(_testPoints):
_testPoints.SimplifyPaths()
lightingControlGroups_.SimplifyPaths()
_illFilesAddress.SimplifyPaths()
res = main(_illFilesAddress, _testPoints, ptsVectors_, occupancyFiles_, lightingControlGroups_, SHDGroupI_Sensors_, SHDGroupII_Sensors_, _DLAIllumThresholds_, _runIt > 1)
if res!= -1:
msg, results = res
if msg!=None:
w = gh.GH_RuntimeMessageLevel.Warning
ghenv.Component.AddRuntimeMessage(w, msg)
else:
DLALists, underUDLILists, inRangeUDLILists, overUDLILists, CDALists, EPLSchLists, htmLists = results
DLA = DataTree[Object]()
UDLI_Less_100 = DataTree[Object]()
UDLI_100_2000 = DataTree[Object]()
UDLI_More_2000 = DataTree[Object]()
CDA = DataTree[Object]()
annualProfiles = DataTree[Object]()
sDA = DataTree[Object]()
htmReport = DataTree[Object]()
def readDSStandardResults(filePath):
results = []
with open(filePath, "r") as inf:
for line in inf:
if not line.startswith("#"):
results.append(float(line.split("\t")[-1]))
return results
def getsDA(DLARes, threshold = 50):
moreThan = 0
for res in DLARes:
if res >= threshold:
moreThan += 1
return "%.2f"%((moreThan/len(DLARes)) * 100)
for branchNum in range(_testPoints.BranchCount):
p = GH_Path(branchNum)
DLARes = readDSStandardResults(DLALists[branchNum])
DLA.AddRange(DLARes, p)
UDLI_Less_100.AddRange(readDSStandardResults(underUDLILists[branchNum]), p)
UDLI_100_2000.AddRange(readDSStandardResults(inRangeUDLILists[branchNum]), p)
UDLI_More_2000.AddRange(readDSStandardResults(overUDLILists[branchNum]), p)
CDA.AddRange(readDSStandardResults(CDALists[branchNum]), p)
annualProfiles.Add(EPLSchLists[branchNum], p)
sDA.Add(getsDA(DLARes), p)
htmReport.Add(htmLists[branchNum], p)
| gpl-3.0 |
grap/account-financial-tools | __unported__/account_compute_tax_amount/account_move_line.py | 7 | 3399 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (c) 2013 Camptocamp (http://www.camptocamp.com)
# All Right Reserved
#
# Author : Vincent Renaville (Camptocamp)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import orm, fields
import openerp.addons.decimal_precision as dp
class account_move_line(orm.Model):
_inherit = "account.move.line"
# We set the tax_amount invisible, because we recompute it in every case.
_columns = {
'tax_amount': fields.float(
'Tax/Base Amount',
digits_compute=dp.get_precision('Account'),
invisible=True,
select=True,
help="If the Tax account is a tax code account, "
"this field will contain the taxed amount. "
"If the tax account is base tax code, "
"this field will contain the basic amount (without tax)."
),
}
def create(self, cr, uid, vals, context=None, check=True):
result = super(account_move_line, self).create(cr, uid, vals,
context=context,
check=check)
if result:
move_line = self.read(cr, uid, result,
['credit', 'debit', 'tax_code_id'],
context=context)
if move_line['tax_code_id']:
tax_amount = move_line['credit'] - move_line['debit']
self.write(cr, uid, [result],
{'tax_amount': tax_amount},
context=context)
return result
def write(self, cr, uid, ids, vals, context=None, check=True,
update_check=True):
result = super(account_move_line, self).write(
cr, uid, ids, vals,
context=context,
check=check,
update_check=update_check
)
if result:
if ('debit' in vals) or ('credit' in vals):
move_lines = self.read(cr, uid, ids,
['credit', 'debit', 'tax_code_id'],
context=context)
for move_line in move_lines:
if move_line['tax_code_id']:
tax_amount = move_line['credit'] - move_line['debit']
self.write(cr, uid,
[move_line['id']],
{'tax_amount': tax_amount},
context=context)
return result
| agpl-3.0 |
risteon/nimble | nimble/sources/datasets.py | 1 | 1222 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
import os
from ..composition import LabeledDataSource
from .images import ImageFileSource
from .labels import TransformMatrixSource
class KittiOdometrySource(LabeledDataSource):
"""Read images and ground truth poses of the Kitti dataset.
http://www.cvlibs.net/datasets/kitti/
Currently, this only reads the left image.
"""
def __init__(self, kitti_root_path, sequence, **kwargs):
self.seekable = True
self.parallel_possible = False
self.cached = False
self._sequence = sequence
self._sequence_folder = os.path.join(kitti_root_path, u"sequences", u"{:02d}".format(self._sequence),
u"image_2")
poses_file = os.path.join(kitti_root_path, u"poses", u"{:02d}.txt".format(self._sequence))
image_source = ImageFileSource(self._image_filename)
label_source = TransformMatrixSource(poses_file)
super(KittiOdometrySource, self).__init__(data_source=image_source, label_source=label_source, **kwargs)
def _image_filename(self, position):
return os.path.join(self._sequence_folder, "{:06}.png".format(position))
| mit |
daevaorn/sentry | src/sentry/migrations/0234_auto__add_savedsearchuserdefault__add_unique_savedsearchuserdefault_pr.py | 9 | 43642 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'SavedSearchUserDefault'
db.create_table('sentry_savedsearch_userdefault', (
('id', self.gf('sentry.db.models.fields.bounded.BoundedBigAutoField')(primary_key=True)),
('savedsearch', self.gf('sentry.db.models.fields.foreignkey.FlexibleForeignKey')(to=orm['sentry.SavedSearch'])),
('project', self.gf('sentry.db.models.fields.foreignkey.FlexibleForeignKey')(to=orm['sentry.Project'])),
('user', self.gf('sentry.db.models.fields.foreignkey.FlexibleForeignKey')(to=orm['sentry.User'])),
))
db.send_create_signal('sentry', ['SavedSearchUserDefault'])
# Adding unique constraint on 'SavedSearchUserDefault', fields ['project', 'user']
db.create_unique('sentry_savedsearch_userdefault', ['project_id', 'user_id'])
def backwards(self, orm):
# Removing unique constraint on 'SavedSearchUserDefault', fields ['project', 'user']
db.delete_unique('sentry_savedsearch_userdefault', ['project_id', 'user_id'])
# Deleting model 'SavedSearchUserDefault'
db.delete_table('sentry_savedsearch_userdefault')
models = {
'sentry.activity': {
'Meta': {'object_name': 'Activity'},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True'})
},
'sentry.apikey': {
'Meta': {'object_name': 'ApiKey'},
'allowed_origins': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'}),
'label': ('django.db.models.fields.CharField', [], {'default': "'Default'", 'max_length': '64', 'blank': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'key_set'", 'to': "orm['sentry.Organization']"}),
'scopes': ('django.db.models.fields.BigIntegerField', [], {'default': 'None'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'})
},
'sentry.auditlogentry': {
'Meta': {'object_name': 'AuditLogEntry'},
'actor': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'audit_actors'", 'null': 'True', 'to': "orm['sentry.User']"}),
'actor_key': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.ApiKey']", 'null': 'True', 'blank': 'True'}),
'actor_label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ip_address': ('django.db.models.fields.GenericIPAddressField', [], {'max_length': '39', 'null': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'target_object': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'target_user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'audit_targets'", 'null': 'True', 'to': "orm['sentry.User']"})
},
'sentry.authidentity': {
'Meta': {'unique_together': "(('auth_provider', 'ident'), ('auth_provider', 'user'))", 'object_name': 'AuthIdentity'},
'auth_provider': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.AuthProvider']"}),
'data': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'last_synced': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_verified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.authprovider': {
'Meta': {'object_name': 'AuthProvider'},
'config': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'default_global_access': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'default_role': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '50'}),
'default_teams': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.Team']", 'symmetrical': 'False', 'blank': 'True'}),
'flags': ('django.db.models.fields.BigIntegerField', [], {'default': '0'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_sync': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']", 'unique': 'True'}),
'provider': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'sync_time': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'})
},
'sentry.broadcast': {
'Meta': {'object_name': 'Broadcast'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_expires': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2016, 1, 25, 0, 0)', 'null': 'True', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'link': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'upstream_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'})
},
'sentry.broadcastseen': {
'Meta': {'unique_together': "(('broadcast', 'user'),)", 'object_name': 'BroadcastSeen'},
'broadcast': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Broadcast']"}),
'date_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.event': {
'Meta': {'unique_together': "(('project_id', 'event_id'),)", 'object_name': 'Event', 'db_table': "'sentry_message'", 'index_together': "(('group_id', 'datetime'),)"},
'data': ('sentry.db.models.fields.node.NodeField', [], {'null': 'True', 'blank': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'db_column': "'message_id'"}),
'group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'time_spent': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'null': 'True'})
},
'sentry.eventmapping': {
'Meta': {'unique_together': "(('project_id', 'event_id'),)", 'object_name': 'EventMapping'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.eventuser': {
'Meta': {'unique_together': "(('project', 'ident'), ('project', 'hash'))", 'object_name': 'EventUser', 'index_together': "(('project', 'email'), ('project', 'username'), ('project', 'ip_address'))"},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True'}),
'hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'ip_address': ('django.db.models.fields.GenericIPAddressField', [], {'max_length': '39', 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'})
},
'sentry.file': {
'Meta': {'object_name': 'File'},
'blob': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'legacy_blob'", 'null': 'True', 'to': "orm['sentry.FileBlob']"}),
'blobs': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.FileBlob']", 'through': "orm['sentry.FileBlobIndex']", 'symmetrical': 'False'}),
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True'}),
'headers': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'path': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'size': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'sentry.fileblob': {
'Meta': {'object_name': 'FileBlob'},
'checksum': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '40'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'path': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'size': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'})
},
'sentry.fileblobindex': {
'Meta': {'unique_together': "(('file', 'blob', 'offset'),)", 'object_name': 'FileBlobIndex'},
'blob': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.FileBlob']"}),
'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'offset': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {})
},
'sentry.group': {
'Meta': {'object_name': 'Group', 'db_table': "'sentry_groupedmessage'", 'index_together': "(('project', 'first_release'),)"},
'active_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'db_column': "'view'", 'blank': 'True'}),
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True', 'blank': 'True'}),
'first_release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']", 'null': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'blank': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'level': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'num_comments': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'null': 'True'}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'resolved_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'score': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'default': '0'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'time_spent_count': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'default': '0'}),
'time_spent_total': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'default': '0'}),
'times_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '1', 'db_index': 'True'})
},
'sentry.groupassignee': {
'Meta': {'object_name': 'GroupAssignee', 'db_table': "'sentry_groupasignee'"},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'assignee_set'", 'unique': 'True', 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'assignee_set'", 'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'sentry_assignee_set'", 'to': "orm['sentry.User']"})
},
'sentry.groupbookmark': {
'Meta': {'unique_together': "(('project', 'user', 'group'),)", 'object_name': 'GroupBookmark'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'sentry_bookmark_set'", 'to': "orm['sentry.User']"})
},
'sentry.groupemailthread': {
'Meta': {'unique_together': "(('email', 'group'), ('email', 'msgid'))", 'object_name': 'GroupEmailThread'},
'date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'groupemail_set'", 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'msgid': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'groupemail_set'", 'to': "orm['sentry.Project']"})
},
'sentry.grouphash': {
'Meta': {'unique_together': "(('project', 'hash'),)", 'object_name': 'GroupHash'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}),
'hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'})
},
'sentry.groupmeta': {
'Meta': {'unique_together': "(('group', 'key'),)", 'object_name': 'GroupMeta'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'value': ('django.db.models.fields.TextField', [], {})
},
'sentry.groupresolution': {
'Meta': {'object_name': 'GroupResolution'},
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'unique': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.grouprulestatus': {
'Meta': {'unique_together': "(('rule', 'group'),)", 'object_name': 'GroupRuleStatus'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_active': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'rule': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Rule']"}),
'status': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'})
},
'sentry.groupseen': {
'Meta': {'unique_together': "(('user', 'group'),)", 'object_name': 'GroupSeen'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'db_index': 'False'})
},
'sentry.groupsnooze': {
'Meta': {'object_name': 'GroupSnooze'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'unique': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'until': ('django.db.models.fields.DateTimeField', [], {})
},
'sentry.grouptagkey': {
'Meta': {'unique_together': "(('project', 'group', 'key'),)", 'object_name': 'GroupTagKey'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'values_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.grouptagvalue': {
'Meta': {'unique_together': "(('project', 'key', 'value', 'group'),)", 'object_name': 'GroupTagValue', 'db_table': "'sentry_messagefiltervalue'"},
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'grouptag'", 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'grouptag'", 'null': 'True', 'to': "orm['sentry.Project']"}),
'times_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.helppage': {
'Meta': {'object_name': 'HelpPage'},
'content': ('django.db.models.fields.TextField', [], {}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_visible': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64', 'unique': 'True', 'null': 'True'}),
'priority': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '50'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'sentry.lostpasswordhash': {
'Meta': {'object_name': 'LostPasswordHash'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'unique': 'True'})
},
'sentry.option': {
'Meta': {'object_name': 'Option'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.organization': {
'Meta': {'object_name': 'Organization'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'default_role': ('django.db.models.fields.CharField', [], {'default': "'member'", 'max_length': '32'}),
'flags': ('django.db.models.fields.BigIntegerField', [], {'default': '1'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'org_memberships'", 'symmetrical': 'False', 'through': "orm['sentry.OrganizationMember']", 'to': "orm['sentry.User']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.organizationaccessrequest': {
'Meta': {'unique_together': "(('team', 'member'),)", 'object_name': 'OrganizationAccessRequest'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'member': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.OrganizationMember']"}),
'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Team']"})
},
'sentry.organizationmember': {
'Meta': {'unique_together': "(('organization', 'user'), ('organization', 'email'))", 'object_name': 'OrganizationMember'},
'counter': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'flags': ('django.db.models.fields.BigIntegerField', [], {'default': '0'}),
'has_global_access': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'member_set'", 'to': "orm['sentry.Organization']"}),
'role': ('django.db.models.fields.CharField', [], {'default': "'member'", 'max_length': '32'}),
'teams': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.Team']", 'symmetrical': 'False', 'through': "orm['sentry.OrganizationMemberTeam']", 'blank': 'True'}),
'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '50', 'blank': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'sentry_orgmember_set'", 'null': 'True', 'to': "orm['sentry.User']"})
},
'sentry.organizationmemberteam': {
'Meta': {'unique_together': "(('team', 'organizationmember'),)", 'object_name': 'OrganizationMemberTeam', 'db_table': "'sentry_organizationmember_teams'"},
'id': ('sentry.db.models.fields.bounded.BoundedAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'organizationmember': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.OrganizationMember']"}),
'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Team']"})
},
'sentry.organizationoption': {
'Meta': {'unique_together': "(('organization', 'key'),)", 'object_name': 'OrganizationOption', 'db_table': "'sentry_organizationoptions'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.project': {
'Meta': {'unique_together': "(('team', 'slug'), ('organization', 'slug'))", 'object_name': 'Project'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'first_event': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'null': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Team']"})
},
'sentry.projectkey': {
'Meta': {'object_name': 'ProjectKey'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'key_set'", 'to': "orm['sentry.Project']"}),
'public_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'roles': ('django.db.models.fields.BigIntegerField', [], {'default': '1'}),
'secret_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'})
},
'sentry.projectoption': {
'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'ProjectOption', 'db_table': "'sentry_projectoptions'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.release': {
'Meta': {'unique_together': "(('project', 'version'),)", 'object_name': 'Release'},
'data': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_released': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_started': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'new_groups': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'owner': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True', 'blank': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'ref': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'sentry.releasefile': {
'Meta': {'unique_together': "(('release', 'ident'),)", 'object_name': 'ReleaseFile'},
'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'name': ('django.db.models.fields.TextField', [], {}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"})
},
'sentry.rule': {
'Meta': {'object_name': 'Rule'},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.savedsearch': {
'Meta': {'unique_together': "(('project', 'name'),)", 'object_name': 'SavedSearch'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_default': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'query': ('django.db.models.fields.TextField', [], {})
},
'sentry.savedsearchuserdefault': {
'Meta': {'unique_together': "(('project', 'user'),)", 'object_name': 'SavedSearchUserDefault', 'db_table': "'sentry_savedsearch_userdefault'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'savedsearch': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.SavedSearch']"}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.tagkey': {
'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'TagKey', 'db_table': "'sentry_filterkey'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'values_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.tagvalue': {
'Meta': {'unique_together': "(('project', 'key', 'value'),)", 'object_name': 'TagValue', 'db_table': "'sentry_filtervalue'"},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True', 'blank': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'times_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.team': {
'Meta': {'unique_together': "(('organization', 'slug'),)", 'object_name': 'Team'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.user': {
'Meta': {'object_name': 'User', 'db_table': "'auth_user'"},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_managed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'db_column': "'first_name'", 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'})
},
'sentry.useroption': {
'Meta': {'unique_together': "(('user', 'project', 'key'),)", 'object_name': 'UserOption'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.userreport': {
'Meta': {'object_name': 'UserReport', 'index_together': "(('project', 'event_id'), ('project', 'date_added'))"},
'comments': ('django.db.models.fields.TextField', [], {}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
}
}
complete_apps = ['sentry'] | bsd-3-clause |
JackWoot/E2E-Messenger | Server/passlib/handlers/sun_md5_crypt.py | 19 | 13964 | """passlib.handlers.sun_md5_crypt - Sun's Md5 Crypt, used on Solaris
.. warning::
This implementation may not reproduce
the original Solaris behavior in some border cases.
See documentation for details.
"""
#=============================================================================
# imports
#=============================================================================
# core
from hashlib import md5
import re
import logging; log = logging.getLogger(__name__)
from warnings import warn
# site
# pkg
from passlib.utils import h64, to_unicode
from passlib.utils.compat import b, bytes, byte_elem_value, irange, u, \
uascii_to_str, unicode, str_to_bascii
import passlib.utils.handlers as uh
# local
__all__ = [
"sun_md5_crypt",
]
#=============================================================================
# backend
#=============================================================================
# constant data used by alg - Hamlet act 3 scene 1 + null char
# exact bytes as in http://www.ibiblio.org/pub/docs/books/gutenberg/etext98/2ws2610.txt
# from Project Gutenberg.
MAGIC_HAMLET = b(
"To be, or not to be,--that is the question:--\n"
"Whether 'tis nobler in the mind to suffer\n"
"The slings and arrows of outrageous fortune\n"
"Or to take arms against a sea of troubles,\n"
"And by opposing end them?--To die,--to sleep,--\n"
"No more; and by a sleep to say we end\n"
"The heartache, and the thousand natural shocks\n"
"That flesh is heir to,--'tis a consummation\n"
"Devoutly to be wish'd. To die,--to sleep;--\n"
"To sleep! perchance to dream:--ay, there's the rub;\n"
"For in that sleep of death what dreams may come,\n"
"When we have shuffled off this mortal coil,\n"
"Must give us pause: there's the respect\n"
"That makes calamity of so long life;\n"
"For who would bear the whips and scorns of time,\n"
"The oppressor's wrong, the proud man's contumely,\n"
"The pangs of despis'd love, the law's delay,\n"
"The insolence of office, and the spurns\n"
"That patient merit of the unworthy takes,\n"
"When he himself might his quietus make\n"
"With a bare bodkin? who would these fardels bear,\n"
"To grunt and sweat under a weary life,\n"
"But that the dread of something after death,--\n"
"The undiscover'd country, from whose bourn\n"
"No traveller returns,--puzzles the will,\n"
"And makes us rather bear those ills we have\n"
"Than fly to others that we know not of?\n"
"Thus conscience does make cowards of us all;\n"
"And thus the native hue of resolution\n"
"Is sicklied o'er with the pale cast of thought;\n"
"And enterprises of great pith and moment,\n"
"With this regard, their currents turn awry,\n"
"And lose the name of action.--Soft you now!\n"
"The fair Ophelia!--Nymph, in thy orisons\n"
"Be all my sins remember'd.\n\x00" #<- apparently null at end of C string is included (test vector won't pass otherwise)
)
# NOTE: these sequences are pre-calculated iteration ranges used by X & Y loops w/in rounds function below
xr = irange(7)
_XY_ROUNDS = [
tuple((i,i,i+3) for i in xr), # xrounds 0
tuple((i,i+1,i+4) for i in xr), # xrounds 1
tuple((i,i+8,(i+11)&15) for i in xr), # yrounds 0
tuple((i,(i+9)&15, (i+12)&15) for i in xr), # yrounds 1
]
del xr
def raw_sun_md5_crypt(secret, rounds, salt):
"given secret & salt, return encoded sun-md5-crypt checksum"
global MAGIC_HAMLET
assert isinstance(secret, bytes)
assert isinstance(salt, bytes)
# validate rounds
if rounds <= 0:
rounds = 0
real_rounds = 4096 + rounds
# NOTE: spec seems to imply max 'rounds' is 2**32-1
# generate initial digest to start off round 0.
# NOTE: algorithm 'salt' includes full config string w/ trailing "$"
result = md5(secret + salt).digest()
assert len(result) == 16
# NOTE: many things in this function have been inlined (to speed up the loop
# as much as possible), to the point that this code barely resembles
# the algorithm as described in the docs. in particular:
#
# * all accesses to a given bit have been inlined using the formula
# rbitval(bit) = (rval((bit>>3) & 15) >> (bit & 7)) & 1
#
# * the calculation of coinflip value R has been inlined
#
# * the conditional division of coinflip value V has been inlined as
# a shift right of 0 or 1.
#
# * the i, i+3, etc iterations are precalculated in lists.
#
# * the round-based conditional division of x & y is now performed
# by choosing an appropriate precalculated list, so that it only
# calculates the 7 bits which will actually be used.
#
X_ROUNDS_0, X_ROUNDS_1, Y_ROUNDS_0, Y_ROUNDS_1 = _XY_ROUNDS
# NOTE: % appears to be *slightly* slower than &, so we prefer & if possible
round = 0
while round < real_rounds:
# convert last result byte string to list of byte-ints for easy access
rval = [ byte_elem_value(c) for c in result ].__getitem__
# build up X bit by bit
x = 0
xrounds = X_ROUNDS_1 if (rval((round>>3) & 15)>>(round & 7)) & 1 else X_ROUNDS_0
for i, ia, ib in xrounds:
a = rval(ia)
b = rval(ib)
v = rval((a >> (b % 5)) & 15) >> ((b>>(a&7)) & 1)
x |= ((rval((v>>3)&15)>>(v&7))&1) << i
# build up Y bit by bit
y = 0
yrounds = Y_ROUNDS_1 if (rval(((round+64)>>3) & 15)>>(round & 7)) & 1 else Y_ROUNDS_0
for i, ia, ib in yrounds:
a = rval(ia)
b = rval(ib)
v = rval((a >> (b % 5)) & 15) >> ((b>>(a&7)) & 1)
y |= ((rval((v>>3)&15)>>(v&7))&1) << i
# extract x'th and y'th bit, xoring them together to yeild "coin flip"
coin = ((rval(x>>3) >> (x&7)) ^ (rval(y>>3) >> (y&7))) & 1
# construct hash for this round
h = md5(result)
if coin:
h.update(MAGIC_HAMLET)
h.update(unicode(round).encode("ascii"))
result = h.digest()
round += 1
# encode output
return h64.encode_transposed_bytes(result, _chk_offsets)
# NOTE: same offsets as md5_crypt
_chk_offsets = (
12,6,0,
13,7,1,
14,8,2,
15,9,3,
5,10,4,
11,
)
#=============================================================================
# handler
#=============================================================================
class sun_md5_crypt(uh.HasRounds, uh.HasSalt, uh.GenericHandler):
"""This class implements the Sun-MD5-Crypt password hash, and follows the :ref:`password-hash-api`.
It supports a variable-length salt, and a variable number of rounds.
The :meth:`~passlib.ifc.PasswordHash.encrypt` and :meth:`~passlib.ifc.PasswordHash.genconfig` methods accept the following optional keywords:
:type salt: str
:param salt:
Optional salt string.
If not specified, a salt will be autogenerated (this is recommended).
If specified, it must be drawn from the regexp range ``[./0-9A-Za-z]``.
:type salt_size: int
:param salt_size:
If no salt is specified, this parameter can be used to specify
the size (in characters) of the autogenerated salt.
It currently defaults to 8.
:type rounds: int
:param rounds:
Optional number of rounds to use.
Defaults to 5500, must be between 0 and 4294963199, inclusive.
:type bare_salt: bool
:param bare_salt:
Optional flag used to enable an alternate salt digest behavior
used by some hash strings in this scheme.
This flag can be ignored by most users.
Defaults to ``False``.
(see :ref:`smc-bare-salt` for details).
:type relaxed: bool
:param relaxed:
By default, providing an invalid value for one of the other
keywords will result in a :exc:`ValueError`. If ``relaxed=True``,
and the error can be corrected, a :exc:`~passlib.exc.PasslibHashWarning`
will be issued instead. Correctable errors include ``rounds``
that are too small or too large, and ``salt`` strings that are too long.
.. versionadded:: 1.6
"""
#===================================================================
# class attrs
#===================================================================
name = "sun_md5_crypt"
setting_kwds = ("salt", "rounds", "bare_salt", "salt_size")
checksum_chars = uh.HASH64_CHARS
checksum_size = 22
# NOTE: docs say max password length is 255.
# release 9u2
# NOTE: not sure if original crypt has a salt size limit,
# all instances that have been seen use 8 chars.
default_salt_size = 8
min_salt_size = 0
max_salt_size = None
salt_chars = uh.HASH64_CHARS
default_rounds = 5500 # current passlib default
min_rounds = 0
max_rounds = 4294963199 ##2**32-1-4096
# XXX: ^ not sure what it does if past this bound... does 32 int roll over?
rounds_cost = "linear"
ident_values = (u("$md5$"), u("$md5,"))
#===================================================================
# instance attrs
#===================================================================
bare_salt = False # flag to indicate legacy hashes that lack "$$" suffix
#===================================================================
# constructor
#===================================================================
def __init__(self, bare_salt=False, **kwds):
self.bare_salt = bare_salt
super(sun_md5_crypt, self).__init__(**kwds)
#===================================================================
# internal helpers
#===================================================================
@classmethod
def identify(cls, hash):
hash = uh.to_unicode_for_identify(hash)
return hash.startswith(cls.ident_values)
@classmethod
def from_string(cls, hash):
hash = to_unicode(hash, "ascii", "hash")
#
# detect if hash specifies rounds value.
# if so, parse and validate it.
# by end, set 'rounds' to int value, and 'tail' containing salt+chk
#
if hash.startswith(u("$md5$")):
rounds = 0
salt_idx = 5
elif hash.startswith(u("$md5,rounds=")):
idx = hash.find(u("$"), 12)
if idx == -1:
raise uh.exc.MalformedHashError(cls, "unexpected end of rounds")
rstr = hash[12:idx]
try:
rounds = int(rstr)
except ValueError:
raise uh.exc.MalformedHashError(cls, "bad rounds")
if rstr != unicode(rounds):
raise uh.exc.ZeroPaddedRoundsError(cls)
if rounds == 0:
# NOTE: not sure if this is forbidden by spec or not;
# but allowing it would complicate things,
# and it should never occur anyways.
raise uh.exc.MalformedHashError(cls, "explicit zero rounds")
salt_idx = idx+1
else:
raise uh.exc.InvalidHashError(cls)
#
# salt/checksum separation is kinda weird,
# to deal cleanly with some backward-compatible workarounds
# implemented by original implementation.
#
chk_idx = hash.rfind(u("$"), salt_idx)
if chk_idx == -1:
# ''-config for $-hash
salt = hash[salt_idx:]
chk = None
bare_salt = True
elif chk_idx == len(hash)-1:
if chk_idx > salt_idx and hash[-2] == u("$"):
raise uh.exc.MalformedHashError(cls, "too many '$' separators")
# $-config for $$-hash
salt = hash[salt_idx:-1]
chk = None
bare_salt = False
elif chk_idx > 0 and hash[chk_idx-1] == u("$"):
# $$-hash
salt = hash[salt_idx:chk_idx-1]
chk = hash[chk_idx+1:]
bare_salt = False
else:
# $-hash
salt = hash[salt_idx:chk_idx]
chk = hash[chk_idx+1:]
bare_salt = True
return cls(
rounds=rounds,
salt=salt,
checksum=chk,
bare_salt=bare_salt,
)
def to_string(self, withchk=True):
ss = u('') if self.bare_salt else u('$')
rounds = self.rounds
if rounds > 0:
hash = u("$md5,rounds=%d$%s%s") % (rounds, self.salt, ss)
else:
hash = u("$md5$%s%s") % (self.salt, ss)
if withchk:
chk = self.checksum
if chk:
hash = u("%s$%s") % (hash, chk)
return uascii_to_str(hash)
#===================================================================
# primary interface
#===================================================================
# TODO: if we're on solaris, check for native crypt() support.
# this will require extra testing, to make sure native crypt
# actually behaves correctly. of particular importance:
# when using ""-config, make sure to append "$x" to string.
def _calc_checksum(self, secret):
# NOTE: no reference for how sun_md5_crypt handles unicode
if isinstance(secret, unicode):
secret = secret.encode("utf-8")
config = str_to_bascii(self.to_string(withchk=False))
return raw_sun_md5_crypt(secret, self.rounds, config).decode("ascii")
#===================================================================
# eoc
#===================================================================
#=============================================================================
# eof
#=============================================================================
| gpl-2.0 |
enotx/ebook_librarian | src/classification.py | 1 | 13177 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from PyQt5.QtCore import (QAbstractItemModel, QFile, QIODevice,
QItemSelectionModel, QModelIndex, Qt, QRect, QCoreApplication, QMetaObject)
from PyQt5 import QtWidgets
from PyQt5.uic import loadUi
from config import CLASSIFICATION_FILE_NAME
class TreeItem(object):
def __init__(self, data, parent=None):
self.parentItem = parent
self.itemData = data
self.childItems = []
def child(self, row):
return self.childItems[row]
def childCount(self):
return len(self.childItems)
def childNumber(self):
if self.parentItem != None:
return self.parentItem.childItems.index(self)
return 0
def columnCount(self):
return len(self.itemData)
def data(self, column):
return self.itemData[column]
def insertChildren(self, position, count, columns):
if position < 0 or position > len(self.childItems):
return False
for row in range(count):
data = [None for v in range(columns)]
item = TreeItem(data, self)
self.childItems.insert(position, item)
return True
def insertColumns(self, position, columns):
if position < 0 or position > len(self.itemData):
return False
for column in range(columns):
self.itemData.insert(position, None)
for child in self.childItems:
child.insertColumns(position, columns)
return True
def parent(self):
return self.parentItem
def removeChildren(self, position, count):
if position < 0 or position + count > len(self.childItems):
return False
for row in range(count):
self.childItems.pop(position)
return True
def removeColumns(self, position, columns):
if position < 0 or position + columns > len(self.itemData):
return False
for column in range(columns):
self.itemData.pop(position)
for child in self.childItems:
child.removeColumns(position, columns)
return True
def setData(self, column, value):
if column < 0 or column >= len(self.itemData):
return False
self.itemData[column] = value
return True
class TreeModel(QAbstractItemModel):
def __init__(self, headers, data, parent=None):
super(TreeModel, self).__init__(parent)
rootData = [header for header in headers]
self.rootItem = TreeItem(rootData)
self.setupModelData(data.split("\n"), self.rootItem)
def columnCount(self, parent=QModelIndex()):
return self.rootItem.columnCount()
def data(self, index, role):
if not index.isValid():
return None
if role != Qt.DisplayRole and role != Qt.EditRole:
return None
item = self.getItem(index)
return item.data(index.column())
def flags(self, index):
if not index.isValid():
return 0
return Qt.ItemIsEditable | Qt.ItemIsEnabled | Qt.ItemIsSelectable
def getItem(self, index):
if index.isValid():
item = index.internalPointer()
if item:
return item
return self.rootItem
def headerData(self, section, orientation, role=Qt.DisplayRole):
if orientation == Qt.Horizontal and role == Qt.DisplayRole:
return self.rootItem.data(section)
return None
def index(self, row, column, parent=QModelIndex()):
if parent.isValid() and parent.column() != 0:
return QModelIndex()
parentItem = self.getItem(parent)
childItem = parentItem.child(row)
if childItem:
return self.createIndex(row, column, childItem)
else:
return QModelIndex()
def insertColumns(self, position, columns, parent=QModelIndex()):
self.beginInsertColumns(parent, position, position + columns - 1)
success = self.rootItem.insertColumns(position, columns)
self.endInsertColumns()
return success
def insertRows(self, position, rows, parent=QModelIndex()):
parentItem = self.getItem(parent)
self.beginInsertRows(parent, position, position + rows - 1)
success = parentItem.insertChildren(position, rows,
self.rootItem.columnCount())
self.endInsertRows()
return success
def parent(self, index):
if not index.isValid():
return QModelIndex()
childItem = self.getItem(index)
parentItem = childItem.parent()
if parentItem == self.rootItem:
return QModelIndex()
return self.createIndex(parentItem.childNumber(), 0, parentItem)
def removeColumns(self, position, columns, parent=QModelIndex()):
self.beginRemoveColumns(parent, position, position + columns - 1)
success = self.rootItem.removeColumns(position, columns)
self.endRemoveColumns()
if self.rootItem.columnCount() == 0:
self.removeRows(0, self.rowCount())
return success
def removeRows(self, position, rows, parent=QModelIndex()):
parentItem = self.getItem(parent)
self.beginRemoveRows(parent, position, position + rows - 1)
success = parentItem.removeChildren(position, rows)
self.endRemoveRows()
return success
def rowCount(self, parent=QModelIndex()):
parentItem = self.getItem(parent)
return parentItem.childCount()
def setData(self, index, value, role=Qt.EditRole):
if role != Qt.EditRole:
return False
item = self.getItem(index)
result = item.setData(index.column(), value)
if result:
self.dataChanged.emit(index, index)
return result
def setHeaderData(self, section, orientation, value, role=Qt.EditRole):
if role != Qt.EditRole or orientation != Qt.Horizontal:
return False
result = self.rootItem.setData(section, value)
if result:
self.headerDataChanged.emit(orientation, section, section)
return result
def setupModelData(self, lines, parent):
parents = [parent]
indentations = [0]
number = 0
while number < len(lines):
position = 0
while position < len(lines[number]):
if lines[number][position] != " ":
break
position += 1
lineData = lines[number][position:].trimmed()
if lineData:
# Read the column data from the rest of the line.
columnData = [s for s in lineData.split('\t') if s]
if position > indentations[-1]:
# The last child of the current parent is now the new
# parent unless the current parent has no children.
if parents[-1].childCount() > 0:
parents.append(parents[-1].child(parents[-1].childCount() - 1))
indentations.append(position)
else:
while position < indentations[-1] and len(parents) > 0:
parents.pop()
indentations.pop()
# Append a new item to the current parent's list of children.
parent = parents[-1]
parent.insertChildren(parent.childCount(), 1,
self.rootItem.columnCount())
for column in range(len(columnData)):
parent.child(parent.childCount() -1).setData(column, columnData[column])
number += 1
class MainWindow(QtWidgets.QMainWindow):
def __init__(self, parent=None):
super(MainWindow, self).__init__(parent)
loadUi(sys.path[0] + '/ui/classification.ui', self)
self.view.header().close()
headers = ["Classification"]
# try:
# except:
file = QFile(CLASSIFICATION_FILE_NAME)
file.open(QIODevice.ReadOnly)
model = TreeModel(headers, file.readAll())
file.close()
self.view.setModel(model)
for column in range(model.columnCount()):
self.view.resizeColumnToContents(column)
self.exitAction.triggered.connect(QtWidgets.QApplication.instance().quit)
self.view.selectionModel().selectionChanged.connect(self.updateActions)
self.actionsMenu.aboutToShow.connect(self.updateActions)
self.insertRowAction.triggered.connect(self.insertRow)
self.insertColumnAction.triggered.connect(self.insertColumn)
self.removeRowAction.triggered.connect(self.removeRow)
self.removeColumnAction.triggered.connect(self.removeColumn)
self.insertChildAction.triggered.connect(self.insertChild)
self.saveAction.triggered.connect(self.savaStatus)
self.updateActions()
def insertChild(self):
index = self.view.selectionModel().currentIndex()
model = self.view.model()
if model.columnCount(index) == 0:
if not model.insertColumn(0, index):
return
if not model.insertRow(0, index):
return
for column in range(model.columnCount(index)):
child = model.index(0, column, index)
model.setData(child, "[No data]", Qt.EditRole)
if model.headerData(column, Qt.Horizontal) is None:
model.setHeaderData(column, Qt.Horizontal, "[No header]",
Qt.EditRole)
self.view.selectionModel().setCurrentIndex(model.index(0, 0, index),
QItemSelectionModel.ClearAndSelect)
self.updateActions()
def insertColumn(self):
model = self.view.model()
column = self.view.selectionModel().currentIndex().column()
changed = model.insertColumn(column + 1)
if changed:
model.setHeaderData(column + 1, Qt.Horizontal, "[No header]",
Qt.EditRole)
self.updateActions()
return changed
def insertRow(self):
index = self.view.selectionModel().currentIndex()
model = self.view.model()
if not model.insertRow(index.row()+1, index.parent()):
return
self.updateActions()
for column in range(model.columnCount(index.parent())):
child = model.index(index.row()+1, column, index.parent())
model.setData(child, "[No data]", Qt.EditRole)
def removeColumn(self):
model = self.view.model()
column = self.view.selectionModel().currentIndex().column()
changed = model.removeColumn(column)
if changed:
self.updateActions()
return changed
def removeRow(self):
index = self.view.selectionModel().currentIndex()
model = self.view.model()
if (model.removeRow(index.row(), index.parent())):
self.updateActions()
def updateActions(self):
hasSelection = not self.view.selectionModel().selection().isEmpty()
self.removeRowAction.setEnabled(hasSelection)
self.removeColumnAction.setEnabled(hasSelection)
hasCurrent = self.view.selectionModel().currentIndex().isValid()
self.insertRowAction.setEnabled(hasCurrent)
self.insertColumnAction.setEnabled(hasCurrent)
if hasCurrent:
self.view.closePersistentEditor(self.view.selectionModel().currentIndex())
row = self.view.selectionModel().currentIndex().row()
column = self.view.selectionModel().currentIndex().column()
# if self.view.selectionModel().currentIndex().parent().isValid():
# self.statusBar().showMessage("Position: (%d,%d)" % (row, column))
# else:
# self.statusBar().showMessage("Position: (%d,%d) in top level" % (row, column))
def showitems(self, item, statuslist):
statuslist.append('')
x = self.getlevel(item)
for i in range(x):
# print (' ',end = "")
statuslist[-1] += ' '
statuslist[-1] += (str(item.itemData[0]))
# print (str(item.itemData[0]))
if item.childCount:
# print ("ok")
for i in item.childItems:
self.showitems(i, statuslist)
def getlevel(self, item):
x = 0
while item.parentItem != self.view.model().rootItem:
x += 1
item = item.parentItem
return x
def savaStatus(self):
f = open (CLASSIFICATION_FILE_NAME, 'w')
model = self.view.model()
statuslist = []
for i in model.rootItem.childItems:
self.showitems(i,statuslist)
# print (str(i.itemData[0])[2:-1])
for i in statuslist:
if i[:2] == "b'" and i[-1] == "'":
f.write(i[2:-1]+'\n')
else:
f.write(i+'\n')
f.close()
if __name__ == '__main__':
import sys
app = QtWidgets.QApplication(sys.argv)
window = MainWindow()
window.show()
sys.exit(app.exec_())
| gpl-3.0 |
Jet-Streaming/gyp | pylib/gyp/easy_xml.py | 3 | 4960 | # Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import re
import os
def XmlToString(content, encoding='utf-8', pretty=False):
""" Writes the XML content to disk, touching the file only if it has changed.
Visual Studio files have a lot of pre-defined structures. This function makes
it easy to represent these structures as Python data structures, instead of
having to create a lot of function calls.
Each XML element of the content is represented as a list composed of:
1. The name of the element, a string,
2. The attributes of the element, a dictionary (optional), and
3+. The content of the element, if any. Strings are simple text nodes and
lists are child elements.
Example 1:
<test/>
becomes
['test']
Example 2:
<myelement a='value1' b='value2'>
<childtype>This is</childtype>
<childtype>it!</childtype>
</myelement>
becomes
['myelement', {'a':'value1', 'b':'value2'},
['childtype', 'This is'],
['childtype', 'it!'],
]
Args:
content: The structured content to be converted.
encoding: The encoding to report on the first XML line.
pretty: True if we want pretty printing with indents and new lines.
Returns:
The XML content as a string.
"""
# We create a huge list of all the elements of the file.
xml_parts = ['<?xml version="1.0" encoding="%s"?>' % encoding]
if pretty:
xml_parts.append('\n')
_ConstructContentList(xml_parts, content, pretty)
# Convert it to a string
return ''.join(xml_parts)
def _ConstructContentList(xml_parts, specification, pretty, level=0):
""" Appends the XML parts corresponding to the specification.
Args:
xml_parts: A list of XML parts to be appended to.
specification: The specification of the element. See EasyXml docs.
pretty: True if we want pretty printing with indents and new lines.
level: Indentation level.
"""
# The first item in a specification is the name of the element.
if pretty:
indentation = ' ' * level
new_line = '\n'
else:
indentation = ''
new_line = ''
name = specification[0]
if not isinstance(name, str):
raise Exception('The first item of an EasyXml specification should be '
'a string. Specification was ' + str(specification))
xml_parts.append(indentation + '<' + name)
# Optionally in second position is a dictionary of the attributes.
rest = specification[1:]
if rest and isinstance(rest[0], dict):
for at, val in sorted(rest[0].iteritems()):
xml_parts.append(' %s="%s"' % (at, _XmlEscape(val, attr=True)))
rest = rest[1:]
if rest:
xml_parts.append('>')
all_strings = reduce(lambda x, y: x and isinstance(y, str), rest, True)
multi_line = not all_strings
if multi_line and new_line:
xml_parts.append(new_line)
for child_spec in rest:
# If it's a string, append a text node.
# Otherwise recurse over that child definition
if isinstance(child_spec, str):
xml_parts.append(_XmlEscape(child_spec))
else:
_ConstructContentList(xml_parts, child_spec, pretty, level + 1)
if multi_line and indentation:
xml_parts.append(indentation)
xml_parts.append('</%s>%s' % (name, new_line))
else:
xml_parts.append('/>%s' % new_line)
def WriteXmlIfChanged(content, path, encoding='utf-8', pretty=False,
win32=False):
""" Writes the XML content to disk, touching the file only if it has changed.
Args:
content: The structured content to be written.
path: Location of the file.
encoding: The encoding to report on the first line of the XML file.
pretty: True if we want pretty printing with indents and new lines.
"""
xml_string = XmlToString(content, encoding, pretty)
if win32 and os.linesep != '\r\n':
xml_string = xml_string.replace('\n', '\r\n')
# Get the old content
try:
f = open(path, 'r')
existing = f.read()
f.close()
except:
existing = None
# It has changed, write it
if existing != xml_string:
f = open(path, 'w')
f.write(xml_string)
f.close()
_xml_escape_map = {
'"': '"',
"'": ''',
'<': '<',
'>': '>',
'&': '&',
'\n': '
',
'\r': '
',
}
_xml_escape_re = re.compile(
"(%s)" % "|".join(map(re.escape, _xml_escape_map.keys())))
def _XmlEscape(value, attr=False):
""" Escape a string for inclusion in XML."""
def replace(match):
m = match.string[match.start() : match.end()]
# don't replace single quotes in attrs
if attr and m == "'":
return m
return _xml_escape_map[m]
return _xml_escape_re.sub(replace, value)
| bsd-3-clause |
stanley89/helios-server | helios/security.py | 3 | 5315 | """
Helios Security -- mostly access control
Ben Adida (ben@adida.net)
"""
# nicely update the wrapper function
from functools import update_wrapper
from django.core.urlresolvers import reverse
from django.core.exceptions import *
from django.http import *
from django.conf import settings
from models import *
from helios_auth.security import get_user
from django.http import HttpResponseRedirect
import urllib
import helios
# current voter
def get_voter(request, user, election):
"""
return the current voter
"""
voter = None
if request.session.has_key('CURRENT_VOTER_ID'):
voter = Voter.objects.get(id=request.session['CURRENT_VOTER_ID'])
if voter.election != election:
voter = None
if not voter:
if user:
voter = Voter.get_by_election_and_user(election, user)
return voter
# a function to check if the current user is a trustee
HELIOS_TRUSTEE_UUID = 'helios_trustee_uuid'
def get_logged_in_trustee(request):
if request.session.has_key(HELIOS_TRUSTEE_UUID):
return Trustee.get_by_uuid(request.session[HELIOS_TRUSTEE_UUID])
else:
return None
def set_logged_in_trustee(request, trustee):
request.session[HELIOS_TRUSTEE_UUID] = trustee.uuid
#
# some common election checks
#
def do_election_checks(election, props):
# frozen
if props.has_key('frozen'):
frozen = props['frozen']
else:
frozen = None
# newvoters (open for registration)
if props.has_key('newvoters'):
newvoters = props['newvoters']
else:
newvoters = None
# frozen check
if frozen != None:
if frozen and not election.frozen_at:
raise PermissionDenied()
if not frozen and election.frozen_at:
raise PermissionDenied()
# open for new voters check
if newvoters != None:
if election.can_add_voters() != newvoters:
raise PermissionDenied()
def get_election_by_uuid(uuid):
if not uuid:
raise Exception("no election ID")
return Election.get_by_uuid(uuid)
# decorator for views that pertain to an election
# takes parameters:
# frozen - is the election frozen
# newvoters - does the election accept new voters
def election_view(**checks):
def election_view_decorator(func):
def election_view_wrapper(request, election_uuid=None, *args, **kw):
election = get_election_by_uuid(election_uuid)
if not election:
raise Http404
# do checks
do_election_checks(election, checks)
# if private election, only logged in voters
if election.private_p and not checks.get('allow_logins',False):
from views import password_voter_login
if not user_can_see_election(request, election):
return_url = request.get_full_path()
return HttpResponseRedirect("%s?%s" % (reverse(password_voter_login, args=[election.uuid]), urllib.urlencode({
'return_url' : return_url
})))
return func(request, election, *args, **kw)
return update_wrapper(election_view_wrapper, func)
return election_view_decorator
def user_can_admin_election(user, election):
if not user:
return False
# election or site administrator
return election.admin == user or user.admin_p
def user_can_see_election(request, election):
user = get_user(request)
if not election.private_p:
return True
# election is private
# but maybe this user is the administrator?
if user_can_admin_election(user, election):
return True
# or maybe this is a trustee of the election?
trustee = get_logged_in_trustee(request)
if trustee and trustee.election.uuid == election.uuid:
return True
# then this user has to be a voter
return (get_voter(request, user, election) != None)
def api_client_can_admin_election(api_client, election):
return election.api_client == api_client and api_client != None
# decorator for checking election admin access, and some properties of the election
# frozen - is the election frozen
# newvoters - does the election accept new voters
def election_admin(**checks):
def election_admin_decorator(func):
def election_admin_wrapper(request, election_uuid=None, *args, **kw):
election = get_election_by_uuid(election_uuid)
user = get_user(request)
if not user_can_admin_election(user, election):
raise PermissionDenied()
# do checks
do_election_checks(election, checks)
return func(request, election, *args, **kw)
return update_wrapper(election_admin_wrapper, func)
return election_admin_decorator
def trustee_check(func):
def trustee_check_wrapper(request, election_uuid, trustee_uuid, *args, **kwargs):
election = get_election_by_uuid(election_uuid)
trustee = Trustee.get_by_election_and_uuid(election, trustee_uuid)
if trustee == get_logged_in_trustee(request):
return func(request, election, trustee, *args, **kwargs)
else:
raise PermissionDenied()
return update_wrapper(trustee_check_wrapper, func)
def can_create_election(request):
user = get_user(request)
if not user:
return False
if helios.ADMIN_ONLY:
return user.admin_p
else:
return user.can_create_election()
def user_can_feature_election(user, election):
if not user:
return False
return user.admin_p
| apache-2.0 |
soxfmr/engineshooter | engineshooter/spiders/engine/baidu.py | 1 | 6685 | # -*- coding: utf-8 -*-
import re
import os
import scrapy
import random
import string
import tempfile
import webbrowser
from os.path import join
from urllib import urlencode
from StringIO import StringIO
from engineshooter.items import SearchResultItem
class BaiduEngine:
name = 'baidu'
BASE_URL = 'https://www.baidu.com/s?rn=50&wd={}'
CAPTCHA_URL = 'https://ipv4.google.com/sorry/index'
NOTHING_MATCHES_TAG = ('<div class="content_none"><div class="nors">', )
def __init__(self, spider):
self.spider = spider
self.keyword = ''
self.callback = None
self.maxpage = 0
self.result = []
self.intercept_status = False
self.url_next_page = None
self.request = None
self.payload = None
self.current_page = 0
def search(self, keyword, callback, maxpage=0):
self.maxpage = maxpage
self.keyword = keyword
self.callback = callback
return scrapy.Request(url=BaiduEngine.BASE_URL.format(self.keyword), callback=self.callback)
def parse(self, response):
# reset
self.request = None
self.result = []
self.url_next_page = None
self.intercept_status = False
# Nothing found
empty = True
for tag in BaiduEngine.NOTHING_MATCHES_TAG:
if tag not in response.body:
empty = False
break
if empty:
self.spider.logger.warning('Empty search result')
return False
"""
# Determine whether the captcha present
if response.status in [301, 302]:
if GoogleEngine.CAPTCHA_URL in response.headers['Location']:
self.spider.logger.info(response.headers['Location'])
self.spider.logger.warning('Captcha redirect detect, grabing the captcha...')
self.request = scrapy.Request(url = response.headers['Location'], callback = self.callback,
dont_filter = True, meta = {'route' : self.grab_captcha})
else:
if 'route' not in response.meta:
# Validate success
self.spider.logger.info('Validate success, continue for next request')
self.url_next_page = response.headers['Location']
return False
if response.status in [503] or 'route' in response.meta:
'''
self.spider.logger.warning('Validation code incorrectly, please retry')
self.request = scrapy.Request(url = response.url, callback = self.callback,
dont_filter = True, meta = {'engine' : self, 'route' : self.grab_captcha})
'''
response.meta['route'](response)
return False
"""
# Extact all of result
for item in response.css('div.result > h3.t'):
try:
result = SearchResultItem()
result['url'] = re.search('(http|https)://.+', item.css('a::attr(href)').extract_first()).group()
# Right to Left
title = u''.join([plain.extract() for plain in item.css('a::text')])
result['title'] = title.encode('utf-8')
self.result.append(result)
except Exception as e:
self.spider.logger.error('An error occured when extract the item: ' + str(e))
# Current page
current_page = response.css('strong > span.pc::text').extract_first()
if current_page:
self.current_page = int(current_page)
self.spider.logger.info('Current search index %d', self.current_page)
# Parse next page information
next_page = response.css('a.n::attr(href)').extract()
next_text = response.css('a.n::text').extract()
if next_page:
length = len(next_page)
# Stopped sending request if not next page button present
if length > 1 or '>' in next_text[0]:
if length == 2:
_, next_page = next_page
else:
next_page = next_page[0]
next_page = re.sub(r'pn=(\d+)', 'pn=%d&rn=50' % (self.current_page * 50), next_page)
self.url_next_page = response.urljoin(next_page)
self.spider.logger.info('Totally %d urls been extracted from current page', len( self.result ))
self.spider.logger.info('Response parsing completed')
return True
def next(self):
if self.request:
self.spider.logger.info('Urgent request provides, sending request directly.')
return self.request
if self.maxpage > 0 and self.current_page >= self.maxpage:
self.spider.logger.info('Crawled %d pages as desire', self.maxpage)
return
if self.url_next_page == None:
self.spider.logger.info('Reached the end of page')
return
self.spider.logger.info('Sending request for next page')
return scrapy.Request(url = self.url_next_page, callback = self.callback, dont_filter = True)
def grab_captcha(self, response):
self.payload = {'q' : response.css('input[name=q]::attr(value)').extract_first().encode('utf-8'),
'continue' : response.css('input[name=continue]::attr(value)').extract_first().encode('utf-8') }
imgurl = response.urljoin(response.css('img::attr(src)').extract_first())
self.request = scrapy.Request(url=imgurl, callback=self.callback, meta = {
'route' : self.require_captcha, 'url' : response.url})
# Notify user for captcha
self.intercept_status = True
def require_captcha(self, response):
tmpdir = tempfile.gettempdir()
path = join(tmpdir, ''.join(random.choice(string.letters + string.digits) for _ in range(6)) + '.jpg')
with open(path, 'wb+') as handle:
handle.write(response.body)
webbrowser.open(path)
while True:
captcha = raw_input('Please enter the captcha: ')
if not captcha:
continue
self.payload['captcha'] = captcha
url = '{}?{}'.format(BaiduEngine.CAPTCHA_URL, urlencode(self.payload))
self.spider.logger.info(url)
self.request = scrapy.Request(url=url, dont_filter = True, meta = {'route' : self.grab_captcha})
self.spider.logger.info(self.payload)
break
os.remove(path)
def get_result_url(self):
return map(lambda item: item['url'], self.result)
def get_result(self):
return self.result
def intercept(self):
return self.intercept_status
| mit |
vkscool/nupic | tests/unit/nupic/algorithms/nab_detector_test.py | 9 | 10846 | #! /usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2015, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Test the NuPIC imports run as expected in
nab/detectors/numenta/numenta_detector.py. They are
nupic/algorithms/anomaly_likelihood and
nupic/frameworks/opf/modelfactory.ModelFactory. The intent here is not to test
functionality but rather that the functions are able to run in NAB.
NAB repo: https://github.com/numenta/NAB
"""
import copy
import csv
import datetime
import os
import unittest
from nupic.algorithms import anomaly_likelihood as an
from nupic.frameworks.opf.modelfactory import ModelFactory
from nupic.frameworks.opf.clamodel import CLAModel
from nupic.support.unittesthelpers.testcasebase import TestCaseBase
def _getDateList(numSamples, startDatetime):
"""
Generate a sequence of sample dates starting at startDatetime and incrementing
every 5 minutes.
@param numSamples (int) number of datetimes to generate
@param startDatetime (datetime) the start (first) datetime
@return dateList (list) generated sequence of datetimes
"""
dateList = []
td = datetime.timedelta(minutes=5)
currentDate = startDatetime + td
for _ in xrange(numSamples):
dateList.append(currentDate)
currentDate = currentDate + td
return dateList
def _addSampleData(numSamples=20, spikeValue=1.0, spikePeriod=10):
"""
Add sample anomaly data to the existing/new data list. Data is constant 0.0,
where anomalies are spikes to 1.0 at an interval set by spikePeriod. The test
data is trivial, as explicit testing of functions is done in other unit tests.
@param numSamples (int) number of data entries to produce
@param spikeValue (float) value of the anomaly spikes
@param spikePeriod (int) periodicity of anomaly spikes, where one will
occur for every spikePeriod data entries
@return data (list) list of generated data entries
"""
# Generate datetimes
lastDate = datetime.datetime(2015, 4, 1)
dateList = _getDateList(numSamples, lastDate)
# Generate data with anomaly spikes
data = []
for idx, date in enumerate(dateList):
if (spikePeriod > 0) and ( (idx + 1) % spikePeriod == 0):
data.append([date, idx, spikeValue])
else:
data.append([date, idx, 0.0])
return data
def _writeToCSV(data, headers, fileName):
"""
Write list of data to CSV.
@param data (list) list of data entries, where each row is a list
@param headers (list) column headers, where each entry in list is
a string
"""
with open(fileName, "wb") as f:
writer = csv.writer(f, delimiter=",", lineterminator="\n")
writer.writerow(headers)
writer.writerows(data)
class NABTest(TestCaseBase):
def setUp(self):
# Generate sample data, save to CSV (not used now, but put in place
# for future NAB tests)
self.data = _addSampleData()
self.dataFileName = "temp_data.csv"
_writeToCSV(self.data, ["datetime", "index", "value"], self.dataFileName)
def tearDown(self):
os.remove(self.dataFileName)
def testModelCreator(self):
"""
Tests the ModelFactory.create() method in
"nupic/frameworks/opf/modelfactory.py" by creating a new model object, as
in "NAB/detectors/numenta/numenta_detector.py".
Model paramaters are same as in NAB v0.8.
"""
# Create model as in NAB/.../numenta_detector.py
modelParams = {
"aggregationInfo": {
"days": 0,
"fields": [],
"hours": 0,
"microseconds": 0,
"milliseconds": 0,
"minutes": 0,
"months": 0,
"seconds": 0,
"weeks": 0,
"years": 0
},
"model": "CLA",
"modelParams": {
"anomalyParams": {
"anomalyCacheRecords": None,
"autoDetectThreshold": None,
"autoDetectWaitRecords": 5030
},
"clEnable": False,
"clParams": {
"alpha": 0.035828933612158,
"clVerbosity": 0,
"regionName": "CLAClassifierRegion",
"steps": "1"
},
"inferenceType": "TemporalAnomaly",
"sensorParams": {
"encoders": {
"timestamp_timeOfDay": {
"fieldname": "timestamp",
"name": "timestamp_timeOfDay",
"timeOfDay": [
21,
9.49122334747737
],
"type": "DateEncoder"
},
"timestamp_dayOfWeek": None,
"timestamp_weekend": None,
"value": {
"name": "value",
"fieldname": "value",
"numBuckets": 94.0,
"seed": 42,
"type": "RandomDistributedScalarEncoder"
}
},
"sensorAutoReset": None,
"verbosity": 0
},
"spEnable": True,
"spParams": {
"potentialPct": 0.8,
"columnCount": 2048,
"globalInhibition": 1,
"inputWidth": 0,
"maxBoost": 1.0,
"numActiveColumnsPerInhArea": 40,
"seed": 1956,
"spVerbosity": 0,
"spatialImp": "cpp",
"synPermActiveInc": 0.0015,
"synPermConnected": 0.1,
"synPermInactiveDec": 0.0005
},
"tpEnable": True,
"tpParams": {
"activationThreshold": 13,
"cellsPerColumn": 32,
"columnCount": 2048,
"globalDecay": 0.0,
"initialPerm": 0.21,
"inputWidth": 2048,
"maxAge": 0,
"maxSegmentsPerCell": 128,
"maxSynapsesPerSegment": 32,
"minThreshold": 10,
"newSynapseCount": 20,
"outputType": "normal",
"pamLength": 3,
"permanenceDec": 0.1,
"permanenceInc": 0.1,
"seed": 1960,
"temporalImp": "cpp",
"verbosity": 0
},
"trainSPNetOnlyIfRequested": False
},
"predictAheadTime": None,
"version": 1
}
sensorParams = (modelParams["modelParams"]["sensorParams"]
["encoders"]["value"])
sensorParams["resolution"] = max(0.001,
(1.2 - 0.2) / sensorParams.pop("numBuckets"))
model = ModelFactory.create(modelParams)
self.assertIs(type(model), CLAModel, msg="The created model is not a"
"CLAModel, but rather is of type %s" % type(model))
def testNABAnomalyLikelihood(self):
"""
Tests the specific calls to nupic/algorithms/anomaly_likelihood as they"re
made in "NAB/detectors/numenta/numenta_detector.py".
Note "NAB/.../numenta_detector.py" has its own class AnomalyLikelihood,
different from nupic/algorithms/anomaly_likelihood.AnomalyLikelihood, but
which calls the functions estimateAnomalyLikelihoods() and
updateAnomalyLikelihoods() from "nupic/algorithms/anomaly_likelihood.py".
"""
# AnomalyLikelihood object initial values
iteration = 0
probationaryPeriod = 4
historicalScores = []
likelihoodList = []
for dataPoint in self.data:
# Ignore the first probationaryPeriod data points
if len(historicalScores) < probationaryPeriod:
likelihood = 0.5
else:
if iteration % 4 == 0:
_, _, distribution = an.estimateAnomalyLikelihoods(
historicalScores,
skipRecords = probationaryPeriod)
likelihoods, _, distribution = an.updateAnomalyLikelihoods(
[dataPoint], distribution)
likelihood = 1.0 - likelihoods[0]
historicalScores.append(dataPoint)
iteration += 1
likelihoodList.append(likelihood)
truthLikelihoodList = [0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5,
0.044565462999999972, 0.044565462999999972,
0.044565462999999972, 0.044565462999999972,
0.90319951499999995, 0.90319951499999995,
0.90319951499999995, 0.90319951499999995,
0.78814460099999994, 0.78814460099999994,
0.78814460099999994, 0.78814460099999994]
for i in xrange(len(likelihoodList)):
self.assertAlmostEqual(likelihoodList[i], truthLikelihoodList[i],
msg="unequal values are at index %i" % i)
if __name__ == "__main__":
unittest.main()
| gpl-3.0 |
ruziniu/v2ex | main.py | 15 | 57245 | #!/usr/bin/env python
# coding=utf-8
import base64
import os
import re
import time
import datetime
import hashlib
import urllib
import string
import random
import pickle
import math
from google.appengine.ext import webapp
from google.appengine.api import memcache
from google.appengine.api import urlfetch
from google.appengine.api.labs import taskqueue
from google.appengine.api import mail
from google.appengine.ext import db
from google.appengine.ext.webapp import util
from google.appengine.ext.webapp import template
from v2ex.babel import Member
from v2ex.babel import Counter
from v2ex.babel import Section
from v2ex.babel import Node
from v2ex.babel import Topic
from v2ex.babel import Reply
from v2ex.babel import PasswordResetToken
from v2ex.babel import SYSTEM_VERSION
from v2ex.babel.security import *
from v2ex.babel.ua import *
from v2ex.babel.da import *
from v2ex.babel.l10n import *
from v2ex.babel.ext.cookies import Cookies
from v2ex.babel.ext.sessions import Session
from v2ex.babel.handlers import BaseHandler
from django.utils import simplejson as json
from v2ex.babel.ext import captcha
template.register_template_library('v2ex.templatetags.filters')
import config
class HomeHandler(webapp.RequestHandler):
def head(self):
pass
def get(self):
host = self.request.headers['Host']
if host == 'beta.v2ex.com':
return self.redirect('http://www.v2ex.com/')
site = GetSite()
browser = detect(self.request)
template_values = {}
template_values['site'] = GetSite()
template_values['canonical'] = 'http://' + site.domain + '/'
template_values['rnd'] = random.randrange(1, 100)
template_values['page_title'] = site.title
template_values['system_version'] = SYSTEM_VERSION
member = CheckAuth(self)
if member:
if member.my_home != None and len(member.my_home) > 0:
return self.redirect(member.my_home)
l10n = GetMessages(self, member, site)
template_values['l10n'] = l10n
if member:
self.response.headers['Set-Cookie'] = 'auth=' + member.auth + '; expires=' + (datetime.datetime.now() + datetime.timedelta(days=365)).strftime("%a, %d-%b-%Y %H:%M:%S GMT") + '; path=/'
template_values['member'] = member
try:
blocked = pickle.loads(member.blocked.encode('utf-8'))
except:
blocked = []
if (len(blocked) > 0):
template_values['blocked'] = ','.join(map(str, blocked))
if member:
recent_nodes = memcache.get('member::' + str(member.num) + '::recent_nodes')
if recent_nodes:
template_values['recent_nodes'] = recent_nodes
nodes_new = []
nodes_new = memcache.get('home_nodes_new')
if nodes_new is None:
nodes_new = []
qnew = db.GqlQuery("SELECT * FROM Node ORDER BY created DESC LIMIT 10")
if (qnew.count() > 0):
i = 0
for node in qnew:
nodes_new.append(node)
i = i + 1
memcache.set('home_nodes_new', nodes_new, 86400)
template_values['nodes_new'] = nodes_new
ignored = ['newbie', 'in', 'flamewar', 'pointless', 'tuan', '528491', 'chamber', 'autistic', 'blog', 'love', 'flood', 'beforesunrise', 'diary', 'fanfou', 'closed']
if browser['ios']:
home_rendered = memcache.get('home_rendered_mobile')
if home_rendered is None:
latest = memcache.get('q_latest_16')
if (latest):
template_values['latest'] = latest
else:
q2 = db.GqlQuery("SELECT * FROM Topic ORDER BY last_touched DESC LIMIT 16")
topics = []
for topic in q2:
if topic.node_name not in ignored:
topics.append(topic)
memcache.set('q_latest_16', topics, 600)
latest = topics
template_values['latest'] = latest
path = os.path.join(os.path.dirname(__file__), 'tpl', 'portion', 'home_mobile.html')
home_rendered = template.render(path, template_values)
memcache.set('home_rendered_mobile', home_rendered, 600)
template_values['home'] = home_rendered
else:
home_rendered = memcache.get('home_rendered')
if home_rendered is None:
latest = memcache.get('q_latest_16')
if (latest):
template_values['latest'] = latest
else:
q2 = db.GqlQuery("SELECT * FROM Topic ORDER BY last_touched DESC LIMIT 16")
topics = []
for topic in q2:
if topic.node_name not in ignored:
topics.append(topic)
memcache.set('q_latest_16', topics, 600)
latest = topics
template_values['latest'] = latest
path = os.path.join(os.path.dirname(__file__), 'tpl', 'portion', 'home.html')
home_rendered = template.render(path, template_values)
memcache.set('home_rendered', home_rendered, 600)
template_values['home'] = home_rendered
member_total = memcache.get('member_total')
if member_total is None:
q3 = db.GqlQuery("SELECT * FROM Counter WHERE name = 'member.total'")
if (q3.count() > 0):
member_total = q3[0].value
else:
member_total = 0
memcache.set('member_total', member_total, 3600)
template_values['member_total'] = member_total
topic_total = memcache.get('topic_total')
if topic_total is None:
q4 = db.GqlQuery("SELECT * FROM Counter WHERE name = 'topic.total'")
if (q4.count() > 0):
topic_total = q4[0].value
else:
topic_total = 0
memcache.set('topic_total', topic_total, 3600)
template_values['topic_total'] = topic_total
reply_total = memcache.get('reply_total')
if reply_total is None:
q5 = db.GqlQuery("SELECT * FROM Counter WHERE name = 'reply.total'")
if (q5.count() > 0):
reply_total = q5[0].value
else:
reply_total = 0
memcache.set('reply_total', reply_total, 3600)
template_values['reply_total'] = reply_total
hottest = memcache.get('index_hottest_sidebar')
if hottest is None:
qhot = db.GqlQuery("SELECT * FROM Node ORDER BY topics DESC LIMIT 25")
hottest = u''
for node in qhot:
hottest = hottest + '<a href="/go/' + node.name + '" class="item_node">' + node.title + '</a>'
memcache.set('index_hottest_sidebar', hottest, 86400)
template_values['index_hottest_sidebar'] = hottest
c = memcache.get('index_categories')
if c is None:
c = ''
i = 0
if site.home_categories is not None:
categories = site.home_categories.split("\n")
else:
categories = []
for category in categories:
category = category.strip()
i = i + 1
c = c + '<div class="cell"><table cellpadding="0" cellspacing="0" border="0"><tr><td align="right" width="60"><span class="fade">' + category + '</span></td><td style="line-height: 200%; padding-left: 10px;">'
qx = db.GqlQuery("SELECT * FROM Node WHERE category = :1 ORDER BY topics DESC", category)
for node in qx:
c = c + '<a href="/go/' + node.name + '" style="font-size: 14px;">' + node.title + '</a> '
c = c + '</td></tr></table></div>'
memcache.set('index_categories', c, 86400)
template_values['c'] = c
if (browser['ios']):
path = os.path.join(os.path.dirname(__file__), 'tpl', 'mobile', 'index.html')
else:
path = os.path.join(os.path.dirname(__file__), 'tpl', 'desktop', 'index.html')
output = template.render(path, template_values)
self.response.out.write(output)
class PlanesHandler(BaseHandler):
def get(self):
c = 0
c = memcache.get('planes_c')
s = ''
s = memcache.get('planes')
if (s == None):
c = 0
s = ''
q = db.GqlQuery("SELECT * FROM Section ORDER BY nodes DESC")
if (q.count() > 0):
for section in q:
q2 = db.GqlQuery("SELECT * FROM Node WHERE section_num = :1 ORDER BY topics DESC", section.num)
n = ''
if (q2.count() > 0):
nodes = []
i = 0
for node in q2:
nodes.append(node)
i = i + 1
random.shuffle(nodes)
for node in nodes:
fs = random.randrange(12, 16)
n = n + '<a href="/go/' + node.name + '" class="item_node">' + node.title + '</a>'
c = c + 1
s = s + '<div class="sep20"></div><div class="box"><div class="cell"><div class="fr"><strong class="snow">' + section.title_alternative + u'</strong><small class="snow"> • ' + str(section.nodes) + ' nodes</small></div>' + section.title + '</div><div class="inner" align="center">' + n + '</div></div>'
memcache.set('planes', s, 86400)
memcache.set('planes_c', c, 86400)
self.values['c'] = c
self.values['s'] = s
self.values['page_title'] = self.site.title.decode('utf-8') + u' › ' + self.l10n.planes.decode('utf-8')
self.finalize(template_name='planes')
class RecentHandler(webapp.RequestHandler):
def get(self):
site = GetSite()
browser = detect(self.request)
template_values = {}
template_values['site'] = site
template_values['rnd'] = random.randrange(1, 100)
template_values['system_version'] = SYSTEM_VERSION
template_values['page_title'] = site.title + u' › 最近的 50 个主题'
member = CheckAuth(self)
l10n = GetMessages(self, member, site)
template_values['l10n'] = l10n
if member:
template_values['member'] = member
try:
blocked = pickle.loads(member.blocked.encode('utf-8'))
except:
blocked = []
if (len(blocked) > 0):
template_values['blocked'] = ','.join(map(str, blocked))
latest = memcache.get('q_recent_50')
if (latest):
template_values['latest'] = latest
else:
q2 = db.GqlQuery("SELECT * FROM Topic ORDER BY last_touched DESC LIMIT 16,50")
topics = []
IGNORED_RECENT = ['flamewar', 'pointless', 'in', 'autistic', 'chamber', 'flood', 'diary', 'fanfou']
for topic in q2:
if topic.node_name not in IGNORED_RECENT:
topics.append(topic)
memcache.set('q_recent_50', topics, 80)
template_values['latest'] = topics
template_values['latest_total'] = len(topics)
if browser['ios']:
path = os.path.join(os.path.dirname(__file__), 'tpl', 'mobile', 'recent.html')
else:
path = os.path.join(os.path.dirname(__file__), 'tpl', 'desktop', 'recent.html')
output = template.render(path, template_values)
expires_date = datetime.datetime.utcnow() + datetime.timedelta(minutes=2)
expires_str = expires_date.strftime("%d %b %Y %H:%M:%S GMT")
self.response.headers.add_header("Expires", expires_str)
self.response.headers['Cache-Control'] = 'max-age=120, must-revalidate'
self.response.out.write(output)
class UAHandler(webapp.RequestHandler):
def get(self):
site = GetSite()
browser = detect(self.request)
template_values = {}
template_values['site'] = site
template_values['system_version'] = SYSTEM_VERSION
member = CheckAuth(self)
template_values['member'] = member
l10n = GetMessages(self, member, site)
template_values['l10n'] = l10n
template_values['ua'] = os.environ['HTTP_USER_AGENT']
template_values['page_title'] = site.title + u' › 用户代理字符串'
path = os.path.join(os.path.dirname(__file__), 'tpl', 'mobile', 'ua.html')
output = template.render(path, template_values)
self.response.out.write(output)
class SigninHandler(webapp.RequestHandler):
def get(self):
site = GetSite()
member = False
browser = detect(self.request)
template_values = {}
template_values['site'] = site
template_values['page_title'] = site.title + u' › 登入'
template_values['system_version'] = SYSTEM_VERSION
l10n = GetMessages(self, member, site)
template_values['l10n'] = l10n
errors = 0
template_values['errors'] = errors
template_values['next'] = self.request.referer
if browser['ios']:
path = os.path.join(os.path.dirname(__file__), 'tpl', 'mobile', 'signin.html')
else:
path = os.path.join(os.path.dirname(__file__), 'tpl', 'desktop', 'signin.html')
output = template.render(path, template_values)
self.response.out.write(output)
def post(self):
site = GetSite()
member = False
browser = detect(self.request)
template_values = {}
template_values['site'] = site
template_values['page_title'] = site.title + u' › 登入'
template_values['system_version'] = SYSTEM_VERSION
u = self.request.get('u').strip()
p = self.request.get('p').strip()
l10n = GetMessages(self, member, site)
template_values['l10n'] = l10n
errors = 0
error_messages = ['', '请输入用户名和密码', '你输入的用户名或密码不正确']
if (len(u) > 0 and len(p) > 0):
p_sha1 = hashlib.sha1(p).hexdigest()
if '@' in u:
q = db.GqlQuery("SELECT * FROM Member WHERE email = :1 AND password = :2", u.lower(), p_sha1)
else:
q = db.GqlQuery("SELECT * FROM Member WHERE username_lower = :1 AND password = :2", u.lower(), p_sha1)
if (q.count() == 1):
member = q[0]
self.response.headers['Set-Cookie'] = 'auth=' + member.auth + '; expires=' + (datetime.datetime.now() + datetime.timedelta(days=365)).strftime("%a, %d-%b-%Y %H:%M:%S GMT") + '; path=/'
next = self.request.get('next').strip()
host = self.request.host + '/'
if next.rfind(host)>0 and not next.rfind('/sign'):
self.redirect(next)
else:
self.redirect('/')
else:
errors = 2
else:
errors = 1
template_values['u'] = u
template_values['p'] = p
template_values['errors'] = errors
template_values['error_message'] = error_messages[errors]
if browser['ios']:
path = os.path.join(os.path.dirname(__file__), 'tpl', 'mobile', 'signin.html')
else:
path = os.path.join(os.path.dirname(__file__), 'tpl', 'desktop', 'signin.html')
output = template.render(path, template_values)
self.response.out.write(output)
class SignupHandler(webapp.RequestHandler):
def get(self):
site = GetSite()
member = False
chtml = captcha.displayhtml(
public_key = config.recaptcha_public_key,
use_ssl = False,
error = None)
browser = detect(self.request)
template_values = {}
template_values['site'] = site
template_values['page_title'] = site.title + u' › 注册'
template_values['system_version'] = SYSTEM_VERSION
template_values['errors'] = 0
template_values['captchahtml'] = chtml
l10n = GetMessages(self, member, site)
template_values['l10n'] = l10n
if browser['ios']:
path = os.path.join(os.path.dirname(__file__), 'tpl', 'mobile', 'signup.html')
else:
path = os.path.join(os.path.dirname(__file__), 'tpl', 'desktop', 'signup.html')
output = template.render(path, template_values)
self.response.out.write(output)
def post(self):
site = GetSite()
member = False
browser = detect(self.request)
template_values = {}
template_values['site'] = site
template_values['page_title'] = site.title + u' › 注册'
template_values['system_version'] = SYSTEM_VERSION
l10n = GetMessages(self, member, site)
template_values['l10n'] = l10n
errors = 0
# Verification: username
member_username_error = 0
member_username_error_messages = ['',
l10n.username_empty,
l10n.username_too_long,
l10n.username_too_short,
l10n.username_invalid,
l10n.username_taken]
member_username = self.request.get('username').strip()
# Special cases
if 'vpn' in member_username:
return self.redirect('http://www.v2ex.com/')
if (len(member_username) == 0):
errors = errors + 1
member_username_error = 1
else:
if (len(member_username) > 16):
errors = errors + 1
member_username_error = 2
else:
if (len(member_username) < 3):
errors = errors + 1
member_username_error = 3
else:
if (re.search('^[a-zA-Z0-9\_]+$', member_username)):
q = db.GqlQuery('SELECT __key__ FROM Member WHERE username_lower = :1', member_username.lower())
if (q.count() > 0):
errors = errors + 1
member_username_error = 5
else:
errors = errors + 1
member_username_error = 4
template_values['member_username'] = member_username
template_values['member_username_error'] = member_username_error
template_values['member_username_error_message'] = member_username_error_messages[member_username_error]
# Verification: password
member_password_error = 0
member_password_error_messages = ['',
u'请输入你的密码',
u'密码长度不能超过 32 个字符'
]
member_password = self.request.get('password').strip()
if (len(member_password) == 0):
errors = errors + 1
member_password_error = 1
else:
if (len(member_password) > 32):
errors = errors + 1
member_password_error = 2
template_values['member_password'] = member_password
template_values['member_password_error'] = member_password_error
template_values['member_password_error_message'] = member_password_error_messages[member_password_error]
# Verification: email
member_email_error = 0
member_email_error_messages = ['',
u'请输入你的电子邮件地址',
u'电子邮件地址长度不能超过 32 个字符',
u'你输入的电子邮件地址不符合规则',
u'抱歉这个电子邮件地址已经有人注册过了']
member_email = self.request.get('email').strip()
if (len(member_email) == 0):
errors = errors + 1
member_email_error = 1
else:
if (len(member_email) > 32):
errors = errors + 1
member_email_error = 2
else:
p = re.compile(r"(?:^|\s)[-a-z0-9_.+]+@(?:[-a-z0-9]+\.)+[a-z]{2,6}(?:\s|$)", re.IGNORECASE)
if (p.search(member_email)):
q = db.GqlQuery('SELECT __key__ FROM Member WHERE email = :1', member_email.lower())
if (q.count() > 0):
errors = errors + 1
member_email_error = 4
else:
errors = errors + 1
member_email_error = 3
template_values['member_email'] = member_email
template_values['member_email_error'] = member_email_error
template_values['member_email_error_message'] = member_email_error_messages[member_email_error]
# Verification: reCAPTCHA
challenge = self.request.get('recaptcha_challenge_field')
response = self.request.get('recaptcha_response_field')
remoteip = os.environ['REMOTE_ADDR']
cResponse = captcha.submit(
challenge,
response,
config.recaptcha_private_key,
remoteip)
if cResponse.is_valid:
logging.info('reCAPTCHA verification passed')
template_values['recaptcha_error'] = 0
else:
errors = errors + 1
error = cResponse.error_code
chtml = captcha.displayhtml(
public_key = config.recaptcha_public_key,
use_ssl = False,
error = cResponse.error_code)
template_values['captchahtml'] = chtml
template_values['recaptcha_error'] = 1
template_values['recaptcha_error_message'] = '请重新输入 reCAPTCHA 验证码'
template_values['errors'] = errors
if (errors == 0):
member = Member()
q = db.GqlQuery('SELECT * FROM Counter WHERE name = :1', 'member.max')
if (q.count() == 1):
counter = q[0]
counter.value = counter.value + 1
else:
counter = Counter()
counter.name = 'member.max'
counter.value = 1
q2 = db.GqlQuery('SELECT * FROM Counter WHERE name = :1', 'member.total')
if (q2.count() == 1):
counter2 = q2[0]
counter2.value = counter2.value + 1
else:
counter2 = Counter()
counter2.name = 'member.total'
counter2.value = 1
member.num = counter.value
member.username = member_username
member.username_lower = member_username.lower()
member.password = hashlib.sha1(member_password).hexdigest()
member.email = member_email.lower()
member.auth = hashlib.sha1(str(member.num) + ':' + member.password).hexdigest()
member.l10n = site.l10n
member.newbie = 1
member.noob = 0
if member.num == 1:
member.level = 0
else:
member.level = 1000
member.put()
counter.put()
counter2.put()
self.response.headers['Set-Cookie'] = 'auth=' + member.auth + '; expires=' + (datetime.datetime.now() + datetime.timedelta(days=365)).strftime("%a, %d-%b-%Y %H:%M:%S GMT") + '; path=/'
memcache.delete('member_total')
self.redirect('/')
else:
if browser['ios']:
path = os.path.join(os.path.dirname(__file__), 'tpl', 'mobile', 'signup.html')
else:
path = os.path.join(os.path.dirname(__file__), 'tpl', 'desktop', 'signup.html')
output = template.render(path, template_values)
self.response.out.write(output)
class SignoutHandler(webapp.RequestHandler):
def get(self):
site = GetSite()
browser = detect(self.request)
member = False
template_values = {}
template_values['site'] = site
template_values['page_title'] = site.title + u' › 登出'
template_values['system_version'] = SYSTEM_VERSION
l10n = GetMessages(self, member, site)
template_values['l10n'] = l10n
cookies = Cookies(self, max_age = 86400, path = '/')
del cookies['auth']
if browser['ios']:
path = os.path.join(os.path.dirname(__file__), 'tpl', 'mobile', 'signout.html')
else:
path = os.path.join(os.path.dirname(__file__), 'tpl', 'desktop', 'signout.html')
output = template.render(path, template_values)
self.response.out.write(output)
class ForgotHandler(webapp.RequestHandler):
def get(self):
site = GetSite()
browser = detect(self.request)
template_values = {}
template_values['rnd'] = random.randrange(1, 100)
template_values['site'] = site
member = CheckAuth(self)
l10n = GetMessages(self, member, site)
template_values['l10n'] = l10n
if member:
template_values['member'] = member
template_values['page_title'] = site.title + u' › 重新设置密码'
path = os.path.join(os.path.dirname(__file__), 'tpl', 'desktop', 'forgot.html')
output = template.render(path, template_values)
self.response.out.write(output)
def post(self):
site = GetSite()
browser = detect(self.request)
template_values = {}
template_values['rnd'] = random.randrange(1, 100)
template_values['site'] = site
member = CheckAuth(self)
l10n = GetMessages(self, member, site)
template_values['l10n'] = l10n
if member:
template_values['member'] = member
template_values['page_title'] = site.title + u' › 重新设置密码'
# Verification: username & email
username = self.request.get('username').strip().lower()
email = self.request.get('email').strip().lower()
q = db.GqlQuery("SELECT * FROM Member WHERE username_lower = :1 AND email = :2", username, email)
if q.count() == 1:
one = q[0]
q2 = db.GqlQuery("SELECT * FROM PasswordResetToken WHERE timestamp > :1 AND email = :2", (int(time.time()) - 86400), email)
if q2.count() > 2:
error_message = '你不能在 24 小时内进行超过 2 次的密码重设操作。'
template_values['errors'] = 1
template_values['error_message'] = error_message
path = os.path.join(os.path.dirname(__file__), 'tpl', 'desktop', 'forgot.html')
output = template.render(path, template_values)
self.response.out.write(output)
else:
token = ''.join([str(random.randint(0, 9)) for i in range(32)])
prt = PasswordResetToken()
prt.token = token
prt.member = one
prt.email = one.email
prt.timestamp = int(time.time())
prt.put()
mail_template_values = {}
mail_template_values['site'] = site
mail_template_values['one'] = one
mail_template_values['host'] = self.request.headers['Host']
mail_template_values['token'] = token
mail_template_values['ua'] = self.request.headers['User-Agent']
mail_template_values['ip'] = self.request.remote_addr
path = os.path.join(os.path.dirname(__file__), 'tpl', 'mail', 'reset_password.txt')
output = template.render(path, mail_template_values)
result = mail.send_mail(sender="v2ex.livid@me.com",
to=one.email,
subject="=?UTF-8?B?" + base64.b64encode((u"[" + site.title + u"] 重新设置密码").encode('utf-8')) + "?=",
body=output)
path = os.path.join(os.path.dirname(__file__), 'tpl', 'desktop', 'forgot_sent.html')
output = template.render(path, template_values)
self.response.out.write(output)
else:
error_message = '无法找到匹配的用户名和邮箱记录'
template_values['errors'] = 1
template_values['error_message'] = error_message
path = os.path.join(os.path.dirname(__file__), 'tpl', 'desktop', 'forgot.html')
output = template.render(path, template_values)
self.response.out.write(output)
class PasswordResetHandler(BaseHandler):
def get(self, token):
site = GetSite()
template_values = {}
template_values['site'] = site
member = False
l10n = GetMessages(self, member, site)
template_values['l10n'] = l10n
token = str(token.strip().lower())
q = db.GqlQuery("SELECT * FROM PasswordResetToken WHERE token = :1 AND valid = 1", token)
if q.count() == 1:
prt = q[0]
template_values['page_title'] = site.title + u' › 重新设置密码'
template_values['token'] = prt
path = os.path.join(os.path.dirname(__file__), 'tpl', 'desktop', 'reset_password.html')
output = template.render(path, template_values)
self.response.out.write(output)
else:
path = os.path.join(os.path.dirname(__file__), 'tpl', 'desktop', 'token_not_found.html')
output = template.render(path, template_values)
self.response.out.write(output)
def post(self, token):
site = GetSite()
template_values = {}
template_values['site'] = site
member = False
l10n = GetMessages(self, member, site)
template_values['l10n'] = l10n
token = str(token.strip().lower())
q = db.GqlQuery("SELECT * FROM PasswordResetToken WHERE token = :1 AND valid = 1", token)
if q.count() == 1:
prt = q[0]
template_values['page_title'] = site.title + u' › 重新设置密码'
template_values['token'] = prt
# Verification
errors = 0
new_password = str(self.request.get('new_password').strip())
new_password_again = str(self.request.get('new_password_again').strip())
if new_password is '' or new_password_again is '':
errors = errors + 1
error_message = '请输入两次新密码'
if errors == 0:
if new_password != new_password_again:
errors = errors + 1
error_message = '两次输入的新密码不一致'
if errors == 0:
if len(new_password) > 32:
errors = errors + 1
error_message = '新密码长度不能超过 32 个字符'
if errors == 0:
q2 = db.GqlQuery("SELECT * FROM Member WHERE num = :1", prt.member.num)
one = q2[0]
one.password = hashlib.sha1(new_password).hexdigest()
one.auth = hashlib.sha1(str(one.num) + ':' + one.password).hexdigest()
one.put()
prt.valid = 0
prt.put()
path = os.path.join(os.path.dirname(__file__), 'tpl', 'desktop', 'reset_password_ok.html')
output = template.render(path, template_values)
self.response.out.write(output)
else:
template_values['errors'] = errors
template_values['error_message'] = error_message
path = os.path.join(os.path.dirname(__file__), 'tpl', 'desktop', 'reset_password.html')
output = template.render(path, template_values)
self.response.out.write(output)
else:
path = os.path.join(os.path.dirname(__file__), 'tpl', 'desktop', 'token_not_found.html')
output = template.render(path, template_values)
self.response.out.write(output)
class NodeGraphHandler(BaseHandler):
def get(self, node_name):
site = GetSite()
browser = detect(self.request)
self.session = Session()
template_values = {}
template_values['site'] = site
template_values['rnd'] = random.randrange(1, 100)
template_values['system_version'] = SYSTEM_VERSION
member = CheckAuth(self)
if member:
template_values['member'] = member
can_create = False
can_manage = False
if site.topic_create_level > 999:
if member:
can_create = True
else:
if member:
if member.level <= site.topic_create_level:
can_create = True
if member:
if member.level == 0:
can_manage = True
template_values['can_create'] = can_create
template_values['can_manage'] = can_manage
l10n = GetMessages(self, member, site)
template_values['l10n'] = l10n
node = GetKindByName('Node', node_name)
template_values['node'] = node
if node:
template_values['feed_link'] = '/feed/' + node.name + '.xml'
template_values['feed_title'] = site.title + u' › ' + node.title
template_values['canonical'] = 'http://' + site.domain + '/go/' + node.name
if node.parent_node_name is None:
siblings = []
else:
siblings = db.GqlQuery("SELECT * FROM Node WHERE parent_node_name = :1 AND name != :2", node.parent_node_name, node.name)
template_values['siblings'] = siblings
if member:
favorited = member.hasFavorited(node)
template_values['favorited'] = favorited
recent_nodes = memcache.get('member::' + str(member.num) + '::recent_nodes')
recent_nodes_ids = memcache.get('member::' + str(member.num) + '::recent_nodes_ids')
if recent_nodes and recent_nodes_ids:
if (node.num in recent_nodes_ids) is not True:
recent_nodes.insert(0, node)
recent_nodes_ids.insert(0, node.num)
memcache.set('member::' + str(member.num) + '::recent_nodes', recent_nodes, 7200)
memcache.set('member::' + str(member.num) + '::recent_nodes_ids', recent_nodes_ids, 7200)
else:
recent_nodes = []
recent_nodes.append(node)
recent_nodes_ids = []
recent_nodes_ids.append(node.num)
memcache.set('member::' + str(member.num) + '::recent_nodes', recent_nodes, 7200)
memcache.set('member::' + str(member.num) + '::recent_nodes_ids', recent_nodes_ids, 7200)
template_values['recent_nodes'] = recent_nodes
template_values['page_title'] = site.title + u' › ' + node.title
else:
template_values['page_title'] = site.title + u' › 节点未找到'
section = False
if node:
section = GetKindByNum('Section', node.section_num)
template_values['section'] = section
if browser['ios']:
if (node):
path = os.path.join(os.path.dirname(__file__), 'tpl', 'mobile', 'node_graph.html')
else:
path = os.path.join(os.path.dirname(__file__), 'tpl', 'mobile', 'node_not_found.html')
else:
if (node):
path = os.path.join(os.path.dirname(__file__), 'tpl', 'desktop', 'node_graph.html')
else:
path = os.path.join(os.path.dirname(__file__), 'tpl', 'desktop', 'node_not_found.html')
output = template.render(path, template_values)
self.response.out.write(output)
class NodeHandler(webapp.RequestHandler):
def get(self, node_name):
site = GetSite()
browser = detect(self.request)
self.session = Session()
template_values = {}
template_values['site'] = site
template_values['rnd'] = random.randrange(1, 100)
template_values['system_version'] = SYSTEM_VERSION
member = CheckAuth(self)
if member:
template_values['member'] = member
can_create = False
can_manage = False
if site.topic_create_level > 999:
if member:
can_create = True
else:
if member:
if member.level <= site.topic_create_level:
can_create = True
if member:
if member.level == 0:
can_manage = True
template_values['can_create'] = can_create
template_values['can_manage'] = can_manage
l10n = GetMessages(self, member, site)
template_values['l10n'] = l10n
node = GetKindByName('Node', node_name)
template_values['node'] = node
pagination = False
pages = 1
page = 1
page_size = 15
start = 0
has_more = False
more = 1
has_previous = False
previous = 1
if node:
template_values['feed_link'] = '/feed/' + node.name + '.xml'
template_values['feed_title'] = site.title + u' › ' + node.title
template_values['canonical'] = 'http://' + site.domain + '/go/' + node.name
if member:
favorited = member.hasFavorited(node)
template_values['favorited'] = favorited
recent_nodes = memcache.get('member::' + str(member.num) + '::recent_nodes')
recent_nodes_ids = memcache.get('member::' + str(member.num) + '::recent_nodes_ids')
if recent_nodes and recent_nodes_ids:
if (node.num in recent_nodes_ids) is not True:
recent_nodes.insert(0, node)
recent_nodes_ids.insert(0, node.num)
memcache.set('member::' + str(member.num) + '::recent_nodes', recent_nodes, 7200)
memcache.set('member::' + str(member.num) + '::recent_nodes_ids', recent_nodes_ids, 7200)
else:
recent_nodes = []
recent_nodes.append(node)
recent_nodes_ids = []
recent_nodes_ids.append(node.num)
memcache.set('member::' + str(member.num) + '::recent_nodes', recent_nodes, 7200)
memcache.set('member::' + str(member.num) + '::recent_nodes_ids', recent_nodes_ids, 7200)
template_values['recent_nodes'] = recent_nodes
template_values['page_title'] = site.title + u' › ' + node.title
# Pagination
if node.topics > page_size:
pagination = True
else:
pagination = False
if pagination:
if node.topics % page_size == 0:
pages = int(node.topics / page_size)
else:
pages = int(node.topics / page_size) + 1
page = self.request.get('p')
if (page == '') or (page is None):
page = 1
else:
page = int(page)
if page > pages:
page = pages
else:
if page < 1:
page = 1
if page < pages:
has_more = True
more = page + 1
if page > 1:
has_previous = True
previous = page - 1
start = (page - 1) * page_size
template_values['canonical'] = 'http://' + site.domain + '/go/' + node.name + '?p=' + str(page)
else:
template_values['page_title'] = site.title + u' › 节点未找到'
template_values['pagination'] = pagination
template_values['pages'] = pages
template_values['page'] = page
template_values['page_size'] = page_size
template_values['has_more'] = has_more
template_values['more'] = more
template_values['has_previous'] = has_previous
template_values['previous'] = previous
section = False
if node:
section = GetKindByNum('Section', node.section_num)
template_values['section'] = section
topics = False
if node:
q3 = db.GqlQuery("SELECT * FROM Topic WHERE node_num = :1 ORDER BY last_touched DESC LIMIT " + str(start) + ", " + str(page_size), node.num)
topics = q3
template_values['latest'] = topics
if browser['ios']:
if (node):
path = os.path.join(os.path.dirname(__file__), 'tpl', 'mobile', 'node.html')
else:
path = os.path.join(os.path.dirname(__file__), 'tpl', 'mobile', 'node_not_found.html')
else:
if (node):
path = os.path.join(os.path.dirname(__file__), 'tpl', 'desktop', 'node.html')
else:
path = os.path.join(os.path.dirname(__file__), 'tpl', 'desktop', 'node_not_found.html')
output = template.render(path, template_values)
self.response.out.write(output)
class NodeApiHandler(webapp.RequestHandler):
def get(self, node_name):
site = GetSite()
node = GetKindByName('Node', node_name)
if node:
template_values = {}
template_values['site'] = site
template_values['node'] = node
path = os.path.join(os.path.dirname(__file__), 'tpl', 'api', 'node.json')
self.response.headers['Content-type'] = 'application/json;charset=UTF-8'
output = template.render(path, template_values)
self.response.out.write(output)
else:
self.error(404)
class SearchHandler(webapp.RequestHandler):
def get(self, q):
site = GetSite()
q = urllib.unquote(q)
template_values = {}
template_values['site'] = site
member = CheckAuth(self)
l10n = GetMessages(self, member, site)
template_values['l10n'] = l10n
if member:
template_values['member'] = member
template_values['page_title'] = site.title + u' › 搜索 ' + q.decode('utf-8')
template_values['q'] = q
if config.fts_enabled is not True:
path = os.path.join(os.path.dirname(__file__), 'tpl', 'desktop', 'search_unavailable.html')
output = template.render(path, template_values)
self.response.out.write(output)
else:
if re.findall('^([a-zA-Z0-9\_]+)$', q):
node = GetKindByName('Node', q.lower())
if node is not None:
template_values['node'] = node
# Fetch result
q_lowered = q.lower()
q_md5 = hashlib.md5(q_lowered).hexdigest()
topics = memcache.get('q::' + q_md5)
if topics is None:
try:
if os.environ['SERVER_SOFTWARE'] == 'Development/1.0':
fts = u'http://127.0.0.1:20000/search?q=' + str(urllib.quote(q_lowered))
else:
fts = u'http://' + config.fts_server + '/search?q=' + str(urllib.quote(q_lowered))
response = urlfetch.fetch(fts, headers = {"Authorization" : "Basic %s" % base64.b64encode(config.fts_username + ':' + config.fts_password)})
if response.status_code == 200:
results = json.loads(response.content)
topics = []
for num in results:
topics.append(GetKindByNum('Topic', num))
template_values['topics'] = topics
memcache.set('q::' + q_md5, topics, 86400)
except:
template_values['topics'] = []
else:
template_values['topics'] = topics
path = os.path.join(os.path.dirname(__file__), 'tpl', 'desktop', 'search.html')
output = template.render(path, template_values)
self.response.out.write(output)
class DispatcherHandler(webapp.RequestHandler):
def post(self):
referer = self.request.headers['Referer']
q = self.request.get('q').strip()
if len(q) > 0:
self.redirect('/q/' + q)
else:
self.redirect(referer)
class RouterHandler(webapp.RequestHandler):
def get(self, path):
if path.find('/') != -1:
# Page
parts = path.split('/')
if len(parts) == 2:
minisite_name = parts[0]
if parts[1] == '':
page_name = 'index.html'
else:
page_name = parts[1]
minisite = GetKindByName('Minisite', minisite_name)
if minisite is not False:
page = memcache.get(path)
if page is None:
q = db.GqlQuery("SELECT * FROM Page WHERE name = :1 AND minisite = :2", page_name, minisite)
if q.count() == 1:
page = q[0]
memcache.set(path, page, 864000)
if page.mode == 1:
# Dynamic embedded page
template_values = {}
site = GetSite()
template_values['site'] = site
member = CheckAuth(self)
if member:
template_values['member'] = member
l10n = GetMessages(self, member, site)
template_values['l10n'] = l10n
template_values['rnd'] = random.randrange(1, 100)
template_values['page'] = page
template_values['minisite'] = page.minisite
template_values['page_title'] = site.title + u' › ' + page.minisite.title.decode('utf-8') + u' › ' + page.title.decode('utf-8')
taskqueue.add(url='/hit/page/' + str(page.key()))
path = os.path.join(os.path.dirname(__file__), 'tpl', 'desktop', 'page.html')
output = template.render(path, template_values)
self.response.out.write(output)
else:
# Static standalone page
taskqueue.add(url='/hit/page/' + str(page.key()))
expires_date = datetime.datetime.utcnow() + datetime.timedelta(days=10)
expires_str = expires_date.strftime("%d %b %Y %H:%M:%S GMT")
self.response.headers.add_header("Expires", expires_str)
self.response.headers['Cache-Control'] = 'max-age=864000, must-revalidate'
self.response.headers['Content-Type'] = page.content_type
self.response.out.write(page.content)
else:
minisite_name = parts[0]
page_name = 'index.html'
minisite = GetKindByName('Minisite', minisite_name)
if minisite is not False:
page = memcache.get(path)
if page is None:
q = db.GqlQuery("SELECT * FROM Page WHERE name = :1 AND minisite = :2", page_name, minisite)
if q.count() == 1:
page = q[0]
memcache.set(path, page, 864000)
if page.mode == 1:
# Dynamic embedded page
template_values = {}
site = GetSite()
template_values['site'] = site
member = CheckAuth(self)
if member:
template_values['member'] = member
l10n = GetMessages(self, member, site)
template_values['l10n'] = l10n
template_values['rnd'] = random.randrange(1, 100)
template_values['page'] = page
template_values['minisite'] = page.minisite
template_values['page_title'] = site.title + u' › ' + page.minisite.title.decode('utf-8') + u' › ' + page.title.decode('utf-8')
taskqueue.add(url='/hit/page/' + str(page.key()))
path = os.path.join(os.path.dirname(__file__), 'tpl', 'desktop', 'page.html')
output = template.render(path, template_values)
self.response.out.write(output)
else:
# Static standalone page
taskqueue.add(url='/hit/page/' + str(page.key()))
expires_date = datetime.datetime.utcnow() + datetime.timedelta(days=10)
expires_str = expires_date.strftime("%d %b %Y %H:%M:%S GMT")
self.response.headers.add_header("Expires", expires_str)
self.response.headers['Cache-Control'] = 'max-age=864000, must-revalidate'
self.response.headers['Content-Type'] = page.content_type
self.response.out.write(page.content)
else:
# Site
page = memcache.get(path + '/index.html')
if page:
taskqueue.add(url='/hit/page/' + str(page.key()))
if page.mode == 1:
# Dynamic embedded page
template_values = {}
site = GetSite()
template_values['site'] = site
member = CheckAuth(self)
if member:
template_values['member'] = member
l10n = GetMessages(self, member, site)
template_values['l10n'] = l10n
template_values['rnd'] = random.randrange(1, 100)
template_values['page'] = page
template_values['minisite'] = page.minisite
template_values['page_title'] = site.title + u' › ' + page.minisite.title.decode('utf-8') + u' › ' + page.title.decode('utf-8')
taskqueue.add(url='/hit/page/' + str(page.key()))
path = os.path.join(os.path.dirname(__file__), 'tpl', 'desktop', 'page.html')
output = template.render(path, template_values)
self.response.out.write(output)
else:
expires_date = datetime.datetime.utcnow() + datetime.timedelta(days=10)
expires_str = expires_date.strftime("%d %b %Y %H:%M:%S GMT")
self.response.headers.add_header("Expires", expires_str)
self.response.headers['Cache-Control'] = 'max-age=864000, must-revalidate'
self.response.headers['Content-Type'] = page.content_type
self.response.out.write(page.content)
else:
minisite_name = path
minisite = GetKindByName('Minisite', minisite_name)
q = db.GqlQuery("SELECT * FROM Page WHERE name = :1 AND minisite = :2", 'index.html', minisite)
if q.count() == 1:
page = q[0]
memcache.set(path + '/index.html', page, 864000)
if page.mode == 1:
# Dynamic embedded page
template_values = {}
site = GetSite()
template_values['site'] = site
member = CheckAuth(self)
if member:
template_values['member'] = member
l10n = GetMessages(self, member, site)
template_values['l10n'] = l10n
template_values['rnd'] = random.randrange(1, 100)
template_values['page'] = page
template_values['minisite'] = page.minisite
template_values['page_title'] = site.title + u' › ' + page.minisite.title.decode('utf-8') + u' › ' + page.title.decode('utf-8')
taskqueue.add(url='/hit/page/' + str(page.key()))
path = os.path.join(os.path.dirname(__file__), 'tpl', 'desktop', 'page.html')
output = template.render(path, template_values)
self.response.out.write(output)
else:
# Static standalone page
taskqueue.add(url='/hit/page/' + str(page.key()))
expires_date = datetime.datetime.utcnow() + datetime.timedelta(days=10)
expires_str = expires_date.strftime("%d %b %Y %H:%M:%S GMT")
self.response.headers.add_header("Expires", expires_str)
self.response.headers['Cache-Control'] = 'max-age=864000, must-revalidate'
self.response.headers['Content-Type'] = page.content_type
self.response.out.write(page.content)
class ChangesHandler(webapp.RequestHandler):
def get(self):
site = GetSite()
browser = detect(self.request)
template_values = {}
template_values['site'] = site
template_values['rnd'] = random.randrange(1, 100)
template_values['system_version'] = SYSTEM_VERSION
template_values['page_title'] = site.title + u' › 全站最新更改记录'
member = CheckAuth(self)
template_values['member'] = member
l10n = GetMessages(self, member, site)
template_values['l10n'] = l10n
topic_total = memcache.get('topic_total')
if topic_total is None:
q2 = db.GqlQuery("SELECT * FROM Counter WHERE name = 'topic.total'")
if (q2.count() > 0):
topic_total = q2[0].value
else:
topic_total = 0
memcache.set('topic_total', topic_total, 600)
template_values['topic_total'] = topic_total
page_size = 60
pages = 1
if topic_total > page_size:
if (topic_total % page_size) > 0:
pages = int(math.floor(topic_total / page_size)) + 1
else:
pages = int(math.floor(topic_total / page_size))
try:
page_current = int(self.request.get('p'))
if page_current < 1:
page_current = 1
if page_current > pages:
page_current = pages
except:
page_current = 1
page_start = (page_current - 1) * page_size
template_values['pages'] = pages
template_values['page_current'] = page_current
i = 1
ps = []
while i <= pages:
ps.append(i)
i = i + 1
template_values['ps'] = ps
latest = memcache.get('q_changes_' + str(page_current))
if (latest):
template_values['latest'] = latest
else:
q1 = db.GqlQuery("SELECT * FROM Topic ORDER BY last_touched DESC LIMIT " + str(page_start) + "," + str(page_size))
topics = []
for topic in q1:
topics.append(topic)
memcache.set('q_changes_' + str(page_current), topics, 120)
template_values['latest'] = topics
template_values['latest_total'] = len(topics)
if browser['ios']:
path = os.path.join(os.path.dirname(__file__), 'tpl', 'mobile', 'changes.html')
else:
path = os.path.join(os.path.dirname(__file__), 'tpl', 'desktop', 'changes.html')
output = template.render(path, template_values)
self.response.out.write(output)
def main():
application = webapp.WSGIApplication([
('/', HomeHandler),
('/planes/?', PlanesHandler),
('/recent', RecentHandler),
('/ua', UAHandler),
('/signin', SigninHandler),
('/signup', SignupHandler),
('/signout', SignoutHandler),
('/forgot', ForgotHandler),
('/reset/([0-9]+)', PasswordResetHandler),
('/go/([a-zA-Z0-9]+)/graph', NodeGraphHandler),
('/go/([a-zA-Z0-9]+)', NodeHandler),
('/n/([a-zA-Z0-9]+).json', NodeApiHandler),
('/q/(.*)', SearchHandler),
('/_dispatcher', DispatcherHandler),
('/changes', ChangesHandler),
('/(.*)', RouterHandler)
],
debug=True)
util.run_wsgi_app(application)
if __name__ == '__main__':
main()
| bsd-3-clause |
wileeam/airflow | airflow/providers/microsoft/azure/operators/adls_list.py | 5 | 2549 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from typing import Iterable
from airflow.models import BaseOperator
from airflow.providers.microsoft.azure.hooks.azure_data_lake import AzureDataLakeHook
from airflow.utils.decorators import apply_defaults
class AzureDataLakeStorageListOperator(BaseOperator):
"""
List all files from the specified path
This operator returns a python list with the names of files which can be used by
`xcom` in the downstream tasks.
:param path: The Azure Data Lake path to find the objects. Supports glob
strings (templated)
:type path: str
:param azure_data_lake_conn_id: The connection ID to use when
connecting to Azure Data Lake Storage.
:type azure_data_lake_conn_id: str
**Example**:
The following Operator would list all the Parquet files from ``folder/output/``
folder in the specified ADLS account ::
adls_files = AzureDataLakeStorageListOperator(
task_id='adls_files',
path='folder/output/*.parquet',
azure_data_lake_conn_id='azure_data_lake_default'
)
"""
template_fields = ('path',) # type: Iterable[str]
ui_color = '#901dd2'
@apply_defaults
def __init__(self,
path,
azure_data_lake_conn_id='azure_data_lake_default',
*args,
**kwargs):
super().__init__(*args, **kwargs)
self.path = path
self.azure_data_lake_conn_id = azure_data_lake_conn_id
def execute(self, context):
hook = AzureDataLakeHook(
azure_data_lake_conn_id=self.azure_data_lake_conn_id
)
self.log.info('Getting list of ADLS files in path: %s', self.path)
return hook.list(path=self.path)
| apache-2.0 |
nhuthep91/aBasic | plugins/ti.alloy/plugin.py | 1729 | 5251 | import os, sys, subprocess, hashlib
import subprocess
def check_output(*popenargs, **kwargs):
r"""Run command with arguments and return its output as a byte string.
Backported from Python 2.7 as it's implemented as pure python on stdlib.
>>> check_output(['/usr/bin/python', '--version'])
Python 2.6.2
"""
process = subprocess.Popen(stdout=subprocess.PIPE, *popenargs, **kwargs)
output, unused_err = process.communicate()
retcode = process.poll()
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
error = subprocess.CalledProcessError(retcode, cmd)
error.output = output
raise error
return output
def compile(config):
paths = {}
binaries = ["alloy","node"]
dotAlloy = os.path.abspath(os.path.join(config['project_dir'], 'build', '.alloynewcli'))
if os.path.exists(dotAlloy):
print "[DEBUG] build/.alloynewcli file found, skipping plugin..."
os.remove(dotAlloy)
else:
for binary in binaries:
try:
# see if the environment variable is defined
paths[binary] = os.environ["ALLOY_" + ("NODE_" if binary == "node" else "") + "PATH"]
except KeyError as ex:
# next try PATH, and then our guess paths
if sys.platform == "darwin" or sys.platform.startswith('linux'):
userPath = os.environ["HOME"]
guessPaths = [
"/usr/local/bin/"+binary,
"/opt/local/bin/"+binary,
userPath+"/local/bin/"+binary,
"/opt/bin/"+binary,
"/usr/bin/"+binary,
"/usr/local/share/npm/bin/"+binary
]
try:
binaryPath = check_output(["which",binary], stderr=subprocess.STDOUT).strip()
print "[DEBUG] %s installed at '%s'" % (binary,binaryPath)
except:
print "[WARN] Couldn't find %s on your PATH:" % binary
print "[WARN] %s" % os.environ["PATH"]
print "[WARN]"
print "[WARN] Checking for %s in a few default locations:" % binary
for p in guessPaths:
sys.stdout.write("[WARN] %s -> " % p)
if os.path.exists(p):
binaryPath = p
print "FOUND"
break
else:
print "not found"
binaryPath = None
if binaryPath is None:
print "[ERROR] Couldn't find %s" % binary
sys.exit(1)
else:
paths[binary] = binaryPath
# no guesses on windows, just use the PATH
elif sys.platform == "win32":
paths["alloy"] = "alloy.cmd"
f = os.path.abspath(os.path.join(config['project_dir'], 'app'))
if os.path.exists(f):
print "[INFO] alloy app found at %s" % f
rd = os.path.abspath(os.path.join(config['project_dir'], 'Resources'))
devicefamily = 'none'
simtype = 'none'
version = '0'
deploytype = 'development'
if config['platform']==u'ios':
version = config['iphone_version']
devicefamily = config['devicefamily']
deploytype = config['deploytype']
if config['platform']==u'android':
builder = config['android_builder']
version = builder.tool_api_level
deploytype = config['deploy_type']
if config['platform']==u'mobileweb':
builder = config['mobileweb_builder']
deploytype = config['deploytype']
cfg = "platform=%s,version=%s,simtype=%s,devicefamily=%s,deploytype=%s," % (config['platform'],version,simtype,devicefamily,deploytype)
if sys.platform == "win32":
cmd = [paths["alloy"], "compile", f, "--no-colors", "--config", cfg]
else:
cmd = [paths["node"], paths["alloy"], "compile", f, "--no-colors", "--config", cfg]
print "[INFO] Executing Alloy compile:"
print "[INFO] %s" % " ".join(cmd)
try:
print check_output(cmd, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as ex:
if hasattr(ex, 'output'):
print ex.output
print "[ERROR] Alloy compile failed"
retcode = 1
if hasattr(ex, 'returncode'):
retcode = ex.returncode
sys.exit(retcode)
except EnvironmentError as ex:
print "[ERROR] Unexpected error with Alloy compiler plugin: %s" % ex.strerror
sys.exit(2)
| apache-2.0 |
mclaughlin6464/pdnn | models/dnn_reg.py | 1 | 11677 | '''
@Author Sean McLaughiln
This is my copy of the dnn module. I'm adding some features so I can use DNN for regression rather than just classification.
'''
import cPickle
import gzip
import os
import sys
import time
import collections
import numpy
import theano
import theano.tensor as T
from theano.tensor.shared_randomstreams import RandomStreams
from layers.regressions_sgd import Regression
from layers.mlp import HiddenLayer, DropoutHiddenLayer, _dropout_from_layer
from models.dnn import DNN
from io_func import smart_open
from io_func.model_io import _nnet2file, _file2nnet
class DNN_REG(object):
def __init__(self, numpy_rng, theano_rng=None,
cfg = None, # the network configuration
dnn_shared = None, shared_layers=[], input = None):
self.layers = []
self.params = []
self.delta_params = []
self.cfg = cfg
self.n_ins = cfg.n_ins; self.n_outs = cfg.n_outs
self.hidden_layers_sizes = cfg.hidden_layers_sizes
self.hidden_layers_number = len(self.hidden_layers_sizes)
self.activation = cfg.activation
self.do_maxout = cfg.do_maxout; self.pool_size = cfg.pool_size
self.max_col_norm = cfg.max_col_norm
self.l1_reg = cfg.l1_reg
self.l2_reg = cfg.l2_reg
self.non_updated_layers = cfg.non_updated_layers
if not theano_rng:
theano_rng = RandomStreams(numpy_rng.randint(2 ** 30))
# allocate symbolic variables for the data
if input == None:
#TODO CHANGE BACK
self.x = T.matrix('x')
else:
self.x = input
self.y = T.matrix('y')
for i in xrange(self.hidden_layers_number):
# construct the hidden layer
if i == 0:
input_size = self.n_ins
layer_input = self.x
else:
input_size = self.hidden_layers_sizes[i - 1]
layer_input = self.layers[-1].output
W = None; b = None
if (i in shared_layers) :
W = dnn_shared.layers[i].W; b = dnn_shared.layers[i].b
if self.do_maxout == True:
hidden_layer = HiddenLayer(rng=numpy_rng,
input=layer_input,
n_in=input_size,
n_out=self.hidden_layers_sizes[i] * self.pool_size,
W = W, b = b,
activation = (lambda x: 1.0*x),
do_maxout = True, pool_size = self.pool_size)
else:
hidden_layer = HiddenLayer(rng=numpy_rng,
input=layer_input,
n_in=input_size,
n_out=self.hidden_layers_sizes[i],
W = W, b = b,
activation=self.activation)
# add the layer to our list of layers
self.layers.append(hidden_layer)
# if the layer index is included in self.non_updated_layers, parameters of this layer will not be updated
if (i not in self.non_updated_layers):
self.params.extend(hidden_layer.params)
self.delta_params.extend(hidden_layer.delta_params)
# We now need to add a logistic layer on top of the MLP
self.regLayer = Regression(
input= self.layers[-1].output if self.hidden_layers_number>0 else self.x,
n_in=self.hidden_layers_sizes[-1] if self.hidden_layers_number>0 else self.n_ins, n_out=self.n_outs)
#print self.hidden_layers_sizes[-1]
#print self.n_outs
if self.n_outs > 0:
self.layers.append(self.regLayer)
self.params.extend(self.regLayer.params)
self.delta_params.extend(self.regLayer.delta_params)
# compute the cost for second phase of training,
# defined as the negative log likelihood
self.finetune_cost = self.regLayer.negative_log_likelihood(self.y)
self.errors = self.finetune_cost
if self.l1_reg is not None:
for i in xrange(self.hidden_layers_number):
W = self.layers[i].W
self.finetune_cost += self.l1_reg * (abs(W).sum())
if self.l2_reg is not None:
for i in xrange(self.hidden_layers_number):
W = self.layers[i].W
self.finetune_cost += self.l2_reg * T.sqr(W).sum()
def build_finetune_functions(self, train_shared_xy, valid_shared_xy, batch_size):
#print len(self.layers)
#print [T.shape(l.W)[0] for l in self.layers]
(train_set_x, train_set_y) = train_shared_xy
(valid_set_x, valid_set_y) = valid_shared_xy
#print T.shape(train_set_x), T.shape(train_set_y)
index = T.lscalar('index') # index to a [mini]batch
learning_rate = T.fscalar('learning_rate')
momentum = T.fscalar('momentum')
#theano.printing.pydotprint(self.finetune_cost, outfile="finetune_cost.png", var_with_name_simple=True)
# compute the gradients with respect to the model parameters
gparams = T.grad(self.finetune_cost, self.params)
#theano.printing.pydotprint(gparams, outfile="gparams.png", var_with_name_simple=True)
# compute list of fine-tuning updates
#updates = collections.OrderedDict()
updates = theano.compat.python2x.OrderedDict()
for dparam, gparam in zip(self.delta_params, gparams):
updates[dparam] = momentum * dparam - gparam*learning_rate
for dparam, param in zip(self.delta_params, self.params):
updates[param] = param + updates[dparam]
if self.max_col_norm is not None:
for i in xrange(self.hidden_layers_number):
W = self.layers[i].W
if W in updates:
updated_W = updates[W]
col_norms = T.sqrt(T.sum(T.sqr(updated_W), axis=0))
desired_norms = T.clip(col_norms, 0, self.max_col_norm)
updates[W] = updated_W * (desired_norms / (1e-7 + col_norms))
#theano.printing.pydotprint(self.errors, outfile="errors.png", var_with_name_simple=True)
train_fn = theano.function(inputs=[index, theano.Param(learning_rate, default = 0.0001),
theano.Param(momentum, default = 0.5)],
outputs=self.errors,
updates=updates,
givens={
self.x: train_set_x[index * batch_size:
(index + 1) * batch_size],
self.y: train_set_y[index * batch_size:
(index + 1) * batch_size]})
#theano.printing.pydotprint(train_fn , outfile="train_fn.png", var_with_name_simple=True)
valid_fn = theano.function(inputs=[index],
outputs=self.errors,
givens={
self.x: valid_set_x[index * batch_size:
(index + 1) * batch_size],
self.y: valid_set_y[index * batch_size:
(index + 1) * batch_size]})
return train_fn, valid_fn
def build_extract_feat_function(self, output_layer):
feat = T.matrix('feat')
out_da = theano.function([feat], self.layers[output_layer].output, updates = None, givens={self.x:feat}, on_unused_input='warn')
return out_da
def build_finetune_functions_kaldi(self, train_shared_xy, valid_shared_xy):
(train_set_x, train_set_y) = train_shared_xy
(valid_set_x, valid_set_y) = valid_shared_xy
index = T.lscalar('index') # index to a [mini]batch
learning_rate = T.fscalar('learning_rate')
momentum = T.fscalar('momentum')
# compute the gradients with respect to the model parameters
gparams = T.grad(self.finetune_cost, self.params)
# compute list of fine-tuning updates
updates = collections.OrderedDict()
for dparam, gparam in zip(self.delta_params, gparams):
updates[dparam] = momentum * dparam - gparam*learning_rate
for dparam, param in zip(self.delta_params, self.params):
updates[param] = param + updates[dparam]
if self.max_col_norm is not None:
for i in xrange(self.hidden_layers_number):
W = self.layers[i].W
if W in updates:
updated_W = updates[W]
col_norms = T.sqrt(T.sum(T.sqr(updated_W), axis=0))
desired_norms = T.clip(col_norms, 0, self.max_col_norm)
updates[W] = updated_W * (desired_norms / (1e-7 + col_norms))
train_fn = theano.function(inputs=[theano.Param(learning_rate, default = 0.0001),
theano.Param(momentum, default = 0.5)],
outputs=self.errors,
updates=updates,
givens={self.x: train_set_x, self.y: train_set_y})
valid_fn = theano.function(inputs=[],
outputs=self.errors,
givens={self.x: valid_set_x, self.y: valid_set_y})
return train_fn, valid_fn
def write_model_to_raw(self, file_path):
# output the model to tmp_path; this format is readable by PDNN
_nnet2file(self.layers, filename=file_path)
def write_model_to_kaldi(self, file_path, with_softmax = True):
# determine whether it's BNF based on layer sizes
output_layer_number = -1;
for layer_index in range(1, self.hidden_layers_number - 1):
cur_layer_size = self.hidden_layers_sizes[layer_index]
prev_layer_size = self.hidden_layers_sizes[layer_index-1]
next_layer_size = self.hidden_layers_sizes[layer_index+1]
if cur_layer_size < prev_layer_size and cur_layer_size < next_layer_size:
output_layer_number = layer_index+1; break
layer_number = len(self.layers)
if output_layer_number == -1:
output_layer_number = layer_number
fout = smart_open(file_path, 'wb')
for i in xrange(output_layer_number):
activation_text = '<' + self.cfg.activation_text + '>'
if i == (layer_number-1) and with_softmax: # we assume that the last layer is a softmax layer
activation_text = '<softmax>'
W_mat = self.layers[i].W.get_value()
b_vec = self.layers[i].b.get_value()
input_size, output_size = W_mat.shape
W_layer = []; b_layer = ''
for rowX in xrange(output_size):
W_layer.append('')
for x in xrange(input_size):
for t in xrange(output_size):
W_layer[t] = W_layer[t] + str(W_mat[x][t]) + ' '
for x in xrange(output_size):
b_layer = b_layer + str(b_vec[x]) + ' '
fout.write('<affinetransform> ' + str(output_size) + ' ' + str(input_size) + '\n')
fout.write('[' + '\n')
for x in xrange(output_size):
fout.write(W_layer[x].strip() + '\n')
fout.write(']' + '\n')
fout.write('[ ' + b_layer.strip() + ' ]' + '\n')
if activation_text == '<maxout>':
fout.write(activation_text + ' ' + str(output_size/self.pool_size) + ' ' + str(output_size) + '\n')
else:
fout.write(activation_text + ' ' + str(output_size) + ' ' + str(output_size) + '\n')
fout.close()
| apache-2.0 |
nevercast/home-assistant | homeassistant/components/sensor/sabnzbd.py | 3 | 4100 | """
homeassistant.components.sensor.sabnzbd
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Monitors SABnzbd NZB client API.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.sabnzbd/
"""
from datetime import timedelta
import logging
from homeassistant.helpers.entity import Entity
from homeassistant.util import Throttle
REQUIREMENTS = ['https://github.com/jamespcole/home-assistant-nzb-clients/'
'archive/616cad59154092599278661af17e2a9f2cf5e2a9.zip'
'#python-sabnzbd==0.1']
SENSOR_TYPES = {
'current_status': ['Status', ''],
'speed': ['Speed', 'MB/s'],
'queue_size': ['Queue', 'MB'],
'queue_remaining': ['Left', 'MB'],
'disk_size': ['Disk', 'GB'],
'disk_free': ['Disk Free', 'GB'],
}
_LOGGER = logging.getLogger(__name__)
_THROTTLED_REFRESH = None
# pylint: disable=unused-argument
def setup_platform(hass, config, add_devices, discovery_info=None):
""" Sets up the SABnzbd sensors. """
from pysabnzbd import SabnzbdApi, SabnzbdApiException
api_key = config.get("api_key")
base_url = config.get("base_url")
name = config.get("name", "SABnzbd")
if not base_url:
_LOGGER.error('Missing config variable base_url')
return False
if not api_key:
_LOGGER.error('Missing config variable api_key')
return False
sab_api = SabnzbdApi(base_url, api_key)
try:
sab_api.check_available()
except SabnzbdApiException:
_LOGGER.exception("Connection to SABnzbd API failed.")
return False
# pylint: disable=global-statement
global _THROTTLED_REFRESH
_THROTTLED_REFRESH = Throttle(timedelta(seconds=1))(sab_api.refresh_queue)
dev = []
for variable in config['monitored_variables']:
if variable['type'] not in SENSOR_TYPES:
_LOGGER.error('Sensor type: "%s" does not exist', variable['type'])
else:
dev.append(SabnzbdSensor(variable['type'], sab_api, name))
add_devices(dev)
class SabnzbdSensor(Entity):
""" Represents an SABnzbd sensor. """
def __init__(self, sensor_type, sabnzb_client, client_name):
self._name = SENSOR_TYPES[sensor_type][0]
self.sabnzb_client = sabnzb_client
self.type = sensor_type
self.client_name = client_name
self._state = None
self._unit_of_measurement = SENSOR_TYPES[sensor_type][1]
@property
def name(self):
return self.client_name + ' ' + self._name
@property
def state(self):
""" Returns the state of the device. """
return self._state
@property
def unit_of_measurement(self):
""" Unit of measurement of this entity, if any. """
return self._unit_of_measurement
def refresh_sabnzbd_data(self):
""" Calls the throttled SABnzbd refresh method. """
if _THROTTLED_REFRESH is not None:
from pysabnzbd import SabnzbdApiException
try:
_THROTTLED_REFRESH()
except SabnzbdApiException:
_LOGGER.exception(
self.name + " Connection to SABnzbd API failed."
)
def update(self):
self.refresh_sabnzbd_data()
if self.sabnzb_client.queue:
if self.type == 'current_status':
self._state = self.sabnzb_client.queue.get('status')
elif self.type == 'speed':
mb_spd = float(self.sabnzb_client.queue.get('kbpersec')) / 1024
self._state = round(mb_spd, 1)
elif self.type == 'queue_size':
self._state = self.sabnzb_client.queue.get('mb')
elif self.type == 'queue_remaining':
self._state = self.sabnzb_client.queue.get('mbleft')
elif self.type == 'disk_size':
self._state = self.sabnzb_client.queue.get('diskspacetotal1')
elif self.type == 'disk_free':
self._state = self.sabnzb_client.queue.get('diskspace1')
else:
self._state = 'Unknown'
| mit |
creativcoder/servo | tests/wpt/css-tests/tools/wptserve/wptserve/constants.py | 141 | 4619 | from . import utils
content_types = utils.invert_dict({"text/html": ["htm", "html"],
"application/json": ["json"],
"application/xhtml+xml": ["xht", "xhtm", "xhtml"],
"application/xml": ["xml"],
"application/x-xpinstall": ["xpi"],
"text/javascript": ["js"],
"text/css": ["css"],
"text/plain": ["txt", "md"],
"image/svg+xml": ["svg"],
"image/gif": ["gif"],
"image/jpeg": ["jpg", "jpeg"],
"image/png": ["png"],
"image/bmp": ["bmp"],
"text/event-stream": ["event_stream"],
"text/cache-manifest": ["manifest"],
"video/mp4": ["mp4", "m4v"],
"audio/mp4": ["m4a"],
"audio/mpeg": ["mp3"],
"video/webm": ["webm"],
"audio/webm": ["weba"],
"video/ogg": ["ogg", "ogv"],
"audio/ogg": ["oga"],
"audio/x-wav": ["wav"],
"text/vtt": ["vtt"],})
response_codes = {
100: ('Continue', 'Request received, please continue'),
101: ('Switching Protocols',
'Switching to new protocol; obey Upgrade header'),
200: ('OK', 'Request fulfilled, document follows'),
201: ('Created', 'Document created, URL follows'),
202: ('Accepted',
'Request accepted, processing continues off-line'),
203: ('Non-Authoritative Information', 'Request fulfilled from cache'),
204: ('No Content', 'Request fulfilled, nothing follows'),
205: ('Reset Content', 'Clear input form for further input.'),
206: ('Partial Content', 'Partial content follows.'),
300: ('Multiple Choices',
'Object has several resources -- see URI list'),
301: ('Moved Permanently', 'Object moved permanently -- see URI list'),
302: ('Found', 'Object moved temporarily -- see URI list'),
303: ('See Other', 'Object moved -- see Method and URL list'),
304: ('Not Modified',
'Document has not changed since given time'),
305: ('Use Proxy',
'You must use proxy specified in Location to access this '
'resource.'),
307: ('Temporary Redirect',
'Object moved temporarily -- see URI list'),
400: ('Bad Request',
'Bad request syntax or unsupported method'),
401: ('Unauthorized',
'No permission -- see authorization schemes'),
402: ('Payment Required',
'No payment -- see charging schemes'),
403: ('Forbidden',
'Request forbidden -- authorization will not help'),
404: ('Not Found', 'Nothing matches the given URI'),
405: ('Method Not Allowed',
'Specified method is invalid for this resource.'),
406: ('Not Acceptable', 'URI not available in preferred format.'),
407: ('Proxy Authentication Required', 'You must authenticate with '
'this proxy before proceeding.'),
408: ('Request Timeout', 'Request timed out; try again later.'),
409: ('Conflict', 'Request conflict.'),
410: ('Gone',
'URI no longer exists and has been permanently removed.'),
411: ('Length Required', 'Client must specify Content-Length.'),
412: ('Precondition Failed', 'Precondition in headers is false.'),
413: ('Request Entity Too Large', 'Entity is too large.'),
414: ('Request-URI Too Long', 'URI is too long.'),
415: ('Unsupported Media Type', 'Entity body in unsupported format.'),
416: ('Requested Range Not Satisfiable',
'Cannot satisfy request range.'),
417: ('Expectation Failed',
'Expect condition could not be satisfied.'),
500: ('Internal Server Error', 'Server got itself in trouble'),
501: ('Not Implemented',
'Server does not support this operation'),
502: ('Bad Gateway', 'Invalid responses from another server/proxy.'),
503: ('Service Unavailable',
'The server cannot process the request due to a high load'),
504: ('Gateway Timeout',
'The gateway server did not receive a timely response'),
505: ('HTTP Version Not Supported', 'Cannot fulfill request.'),
}
| mpl-2.0 |
jGaboardi/LP_MIP | Gurobi_Dual_Standard.py | 1 | 3689 | '''
GNU LESSER GENERAL PUBLIC LICENSE
Version 3, 29 June 2007
Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
'''
# Building a Standard Dual Linear Programming Problem
# in Python/Gurobi[gurobipy]
'''
Adapted from:
Daskin, M. S.
1995
Network and Discrete Location: Models, Algorithms, and Applications
Hoboken, NJ, USA: John Wiley & Sons, Inc.
'''
# Imports
import numpy as np
import gurobipy as gbp
import datetime as dt
def GbpDualStd():
# Constants
Aij = np.random.randint(5, 50, 25)
Aij = Aij.reshape(5,5)
AijSum = np.sum(Aij)
Cj = np.random.randint(10, 20, 5)
CjSum = np.sum(Cj)
Bi = np.random.randint(10, 20, 5)
BiSum = np.sum(Bi)
# Matrix Shape
rows = range(len(Aij))
cols = range(len(Aij[0]))
# Instantiate Model
mDual_Standard_GUROBI = gbp.Model(' -- Standard Dual Linear Programming Problem -- ')
# Set Focus to Optimality
gbp.setParam('MIPFocus', 2)
# Decision Variables
desc_var = []
for orig in rows:
desc_var.append([])
desc_var[orig].append(mDual_Standard_GUROBI.addVar(vtype=gbp.GRB.CONTINUOUS,
name='u'+str(orig+1)))
# Slack Variables
slack_var = []
for dest in cols:
slack_var.append([])
slack_var[dest].append(mDual_Standard_GUROBI.addVar(vtype=gbp.GRB.CONTINUOUS,
name='t'+str(dest+1)))
# Update Model
mDual_Standard_GUROBI.update()
#Objective Function
mDual_Standard_GUROBI.setObjective(gbp.quicksum(Bi[orig]*desc_var[orig][0]
for orig in rows),
gbp.GRB.MAXIMIZE)
# Constraints
for dest in cols:
mDual_Standard_GUROBI.addConstr(gbp.quicksum(Aij[orig][dest]*desc_var[orig][0]
for orig in rows) +
slack_var[dest][0] -
Cj[dest] == 0)
# Optimize
try:
mDual_Standard_GUROBI.optimize()
except Exception as e:
print ' ################################################################'
print ' < ISSUE : ', e, ' >'
print ' ################################################################'
# Write LP file
mDual_Standard_GUROBI.write('LP.lp')
print '\n*************************************************************************'
print ' | Decision Variables'
for v in mDual_Standard_GUROBI.getVars():
print ' | ', v.VarName, '=', v.x
print '*************************************************************************'
val = mDual_Standard_GUROBI.objVal
print ' | Objective Value ------------------ ', val
print ' | Aij Sum -------------------------- ', AijSum
print ' | Cj Sum --------------------------- ', CjSum
print ' | Bi Sum --------------------------- ', BiSum
print ' | Matrix Dimensions ---------------- ', Aij.shape
print ' | Date/Time ------------------------ ', dt.datetime.now()
print '*************************************************************************'
print '-- Gurobi Standard Dual Linear Programming Problem --'
try:
GbpDualCStd()
print '\nJames Gaboardi, 2015'
except Exception as e:
print ' ################################################################'
print ' < ISSUE : ', e, ' >'
print ' ################################################################' | lgpl-3.0 |
OpenMined/PySyft | packages/syft/src/syft/lib/pandas/categorical_dtype.py | 1 | 1173 | # third party
import pandas as pd
# syft relative
from ...generate_wrapper import GenerateWrapper
from ...lib.python.list import List
from ...lib.python.primitive_factory import PrimitiveFactory
from ...proto.lib.pandas.categorical_pb2 import (
PandasCategoricalDtype as PandasCategoricalDtype_PB,
)
def object2proto(obj: pd.CategoricalDtype) -> PandasCategoricalDtype_PB:
# since pd.Index type is not integrated converted obj.categories to List
pd_cat_list = PrimitiveFactory.generate_primitive(value=obj.categories.tolist())
cat_list_proto = pd_cat_list._object2proto()
return PandasCategoricalDtype_PB(
id=cat_list_proto.id, categories=cat_list_proto, ordered=obj.ordered
)
def proto2object(proto: PandasCategoricalDtype_PB) -> pd.CategoricalDtype:
categories = List._proto2object(proto.categories).upcast()
ordered = proto.ordered
return pd.CategoricalDtype(categories=categories, ordered=ordered)
GenerateWrapper(
wrapped_type=pd.CategoricalDtype,
import_path="pandas.CategoricalDtype",
protobuf_scheme=PandasCategoricalDtype_PB,
type_object2proto=object2proto,
type_proto2object=proto2object,
)
| apache-2.0 |
boltnev/iktomi | iktomi/forms/fields.py | 3 | 14203 | # -*- coding: utf-8 -*-
import logging
import six
import cgi
import re
from . import convs, widgets
from ..utils import cached_property
from collections import OrderedDict
from .perms import FieldPerm
logger = logging.getLogger(__name__)
__all__ = ['BaseField', 'Field', 'FieldBlock', 'FieldSet', 'FieldList', 'FileField']
class BaseField(object):
'''
Simple container class which ancestors represents various parts of Form.
Encapsulates converter, various fields attributes, methods for data
access control
'''
# obsolete parameters from previous versions
_obsolete = frozenset(['default', 'get_default', 'template', 'media',
'render_type', 'render', 'required'])
#: :class:`FieldPerm` instance determining field's access permissions.
#: Can be set by field inheritance or throught constructor.
perm_getter = FieldPerm()
# defaults
#: :class:`Converter` instance determining field's convertation method
conv = convs.Char()
#: :class:`Widget` instance determining field's render method
widget = widgets.TextInput
#: Unicode label of the field
label = None
#: Short description of the field
hint = None
help = ''
def __init__(self, name, conv=None, parent=None, permissions=None, **kwargs):
if self._obsolete & set(kwargs):
raise TypeError(
'Obsolete parameters are used: {}'.format(
list(self._obsolete & set(kwargs))))
kwargs.update(
parent=parent,
name=name,
conv=(conv or self.conv)(field=self),
widget=(kwargs.get('widget') or self.widget)(field=self),
)
if permissions is not None:
kwargs['perm_getter'] = FieldPerm(permissions)
self._init_kwargs = kwargs
self.__dict__.update(kwargs)
def __call__(self, **kwargs):
'''
Creates current object's copy with extra constructor arguments passed.
'''
params = dict(self._init_kwargs, **kwargs)
return self.__class__(**params)
@property
def multiple(self):
return self.conv.multiple
@property
def env(self):
return self.parent.env
@property
def form(self):
return self.parent.form
@property
def input_name(self):
'''
Name of field's input element generated in account to possible
nesting of fields. The input name is to be used in templates as value
of Input (Select, etc) element's Name attribute and Label element's For
attribute.
'''
return self.parent.prefix + self.name
@property
def error(self):
'''
String description of validation error in this field during last accept.
`None` if there is no error.
'''
return self.form.errors.get(self.input_name)
@property
def help_message(self):
return self.help or self.form.get_help(self.input_name)
@cached_property
def clean_value(self):
'''
Current field's converted value from form's python_data.
'''
# XXX cached_property is used only for set initial state
# this property should be set every time field data
# has been changed, for instance, in accept method
python_data = self.parent.python_data
if self.name in python_data:
return python_data[self.name]
return self.get_initial()
@property
def id(self):
if self.form.id:
# We use template names in list to replace, so we must use it here to
# insure unique IDs.
return '{}-{}'.format(self.form.id, self.input_name)
return self.input_name
def from_python(self, value):
return self.conv.from_python(value)
@cached_property
def permissions(self):
'''
Field's access permissions. By default, is filled from perm_getter.
'''
return self.perm_getter.get_perms(self)
@cached_property
def writable(self):
return 'w' in self.permissions
@cached_property
def readable(self):
return 'r' in self.permissions
@cached_property
def field_names(self):
return [self.name]
def load_initial(self, initial, raw_data):
value = initial.get(self.name, self.get_initial())
self.set_raw_value(raw_data,
self.from_python(value))
return {self.name: value}
def __repr__(self):
args = ', '.join([k+'='+repr(v)
for k, v in self._init_kwargs.items()
if k not in ['widget', 'conv', 'parent']])
return '{}({})'.format(self.__class__.__name__, args)
class Field(BaseField):
'''
Atomic field
'''
conv = convs.Char()
_null_value = ''
def get_initial(self):
if hasattr(self, 'initial'):
return self.initial
if self.multiple:
return []
return None
@property
def raw_value(self):
if self.multiple:
return self.form.raw_data.getall(self.input_name)
else:
return self.form.raw_data.get(self.input_name, '')
def set_raw_value(self, raw_data, value):
if self.multiple:
try:
del raw_data[self.input_name]
except KeyError:
pass
for v in value:
raw_data.add(self.input_name, v)
else:
raw_data[self.input_name] = value
def _check_value_type(self, values):
if not self.multiple:
values = [values]
for value in values:
if not isinstance(value, six.string_types):
self.form.errors[self.input_name] = 'Given value has incompatible type'
return False
return True
def accept(self):
'''Extracts raw value from form's raw data and passes it to converter'''
value = self.raw_value
if not self._check_value_type(value):
# XXX should this be silent or TypeError?
value = [] if self.multiple else self._null_value
self.clean_value = self.conv.accept(value)
return {self.name: self.clean_value}
class AggregateField(BaseField):
@property
def python_data(self):
'''Representation of aggregate value as dictionary.'''
try:
value = self.clean_value
except LookupError:
# XXX is this necessary?
value = self.get_initial()
return self.from_python(value)
class FieldSet(AggregateField):
'''
Container field aggregating a couple of other different fields
'''
conv = convs.Converter()
widget = widgets.FieldSetWidget()
fields = []
def __init__(self, name, conv=None, fields=None, **kwargs):
fields = fields if fields is not None else self.fields
if kwargs.get('parent'):
conv = (conv or self.conv)(field=self)
fields = [field(parent=self) for field in fields]
kwargs.update(
name=name,
conv=conv,
fields=fields,
)
BaseField.__init__(self, **kwargs)
@property
def prefix(self):
return self.input_name+'.'
def get_field(self, name):
names = name.split('.', 1)
for field in self.fields:
if isinstance(field, FieldBlock):
result = field.get_field(name)
if result is not None:
return result
if field.name == names[0]:
if len(names) > 1:
return field.get_field(names[1])
return field
return None
def get_initial(self):
field_names = sum([x.field_names for x in self.fields], [])
result = dict((name, self.get_field(name).get_initial())
for name in field_names)
return self.conv.accept(result, silent=True)
def set_raw_value(self, raw_data, value):
# fills in raw_data multidict, resulting keys are field's absolute names
assert isinstance(value, dict), \
'To set raw value on {!r} need dict, got {!r}'\
.format(self.input_name, value)
if not value:
# Field set can be optional
return
field_names = sum([x.field_names for x in self.fields], [])
for field_name in field_names:
subvalue = value[field_name]
field = self.get_field(field_name)
field.set_raw_value(raw_data, field.from_python(subvalue))
def accept(self):
'''
Accepts all children fields, collects resulting values into dict and
passes that dict to converter.
Returns result of converter as separate value in parent `python_data`
'''
result = dict(self.python_data)
for field in self.fields:
if field.writable:
result.update(field.accept())
else:
# readonly field
field.set_raw_value(self.form.raw_data,
field.from_python(result[field.name]))
self.clean_value = self.conv.accept(result)
return {self.name: self.clean_value}
class FieldBlock(FieldSet):
'''
Anonymous FieldSet, values of one are accepted as they are children
of FieldBlock's parent.
FieldBlock is used to logically organize fields and do validation
of group of fields without naming that group and without dedicating
result of accept to separate object.
'''
conv = convs.FieldBlockConv()
widget = widgets.FieldBlockWidget()
prefix = ''
def __init__(self, title, fields=[], **kwargs):
kwargs.update(
title=title,
fields=fields,
)
kwargs.setdefault('name', '') # XXX generate unique name
FieldSet.__init__(self, **kwargs)
@cached_property
def prefix(self):
return self.parent.prefix
def accept(self):
'''
Acts as `Field.accepts` but returns result of every child field
as value in parent `python_data`.
'''
result = FieldSet.accept(self)
self.clean_value = result[self.name]
return self.clean_value
def load_initial(self, initial, raw_data):
result = {}
for field in self.fields:
result.update(field.load_initial(initial, raw_data))
return result
@cached_property
def field_names(self):
result = []
for field in self.fields:
result += field.field_names
return result
@property
def python_data(self):
# we need only subfield values in python data
result = {}
for field_name in self.field_names:
if field_name in self.parent.python_data:
result[field_name] = self.parent.python_data[field_name]
return result
class FieldList(AggregateField):
'''
Container aggregating an ordered set of similar fields
'''
order = True
conv = convs.List()
widget = widgets.FieldListWidget()
_digit_re = re.compile('\d+$')
def __init__(self, name, conv=None, field=Field(None),
parent=None, **kwargs):
if parent:
conv = (conv or self.conv)(field=self)
field = field(parent=self)
kwargs.update(
parent=parent,
name=name,
conv=conv,
field=field,
)
BaseField.__init__(self, **kwargs)
@property
def prefix(self):
# NOTE: There was '-' instead of '.' and get_field('list-1') was broken
return self.input_name+'.'
def get_initial(self):
return []
def get_field(self, name):
names = name.split('.', 1)
if not self._digit_re.match(names[0]):
# XXX is this needed?
return None
field = self.field(name=names[0])
if len(names) > 1:
return field.get_field(names[1])
return field
@property
def indices_input_name(self):
return self.input_name+'-indices'
def accept(self):
old = self.python_data
result = OrderedDict()
for index in self.form.raw_data.getall(self.indices_input_name):
try:
#XXX: we do not convert index to int, just check it.
# is it good idea?
int(index)
except ValueError:
logger.warning('Got incorrect index from form: %r', index)
continue
#TODO: describe this
field = self.field(name=str(index))
if not field.writable:
# readonly field
if index in old:
result[field.name] = old[field.name]
else:
result.update(field.accept())
self.clean_value = self.conv.accept(result)
return {self.name: self.clean_value}
def set_raw_value(self, raw_data, value):
indices = []
for index in range(1, len(value)+1):
index = str(index)
subvalue = value[index]
subfield = self.field(name=index)
subfield.set_raw_value(raw_data, subfield.from_python(subvalue))
indices.append(index)
try:
del raw_data[self.indices_input_name]
except KeyError:
pass
for index in indices:
raw_data.add(self.indices_input_name, index)
class FileField(Field):
'''
The simpliest file field
'''
_null_value = None
conv = convs.SimpleFile()
def set_raw_value(self, raw_data, value):
pass
def _check_value_type(self, values):
if not self.multiple:
values = [values]
for value in values:
if not isinstance(value, cgi.FieldStorage) and \
value and \
not hasattr(value, 'read'): # XXX is this right?
self.form.errors[self.input_name] = 'Given value is not file'
return False
return True
| mit |
catkin/xylem | xylem/load_url.py | 1 | 2512 | # Copyright 2014 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper to download content from url."""
from __future__ import unicode_literals
import socket
import time
from six.moves.urllib.error import HTTPError
from six.moves.urllib.error import URLError
from six.moves.urllib.request import urlopen
import cgi
from xylem.exception import raise_from
from xylem.exception import XylemError
from xylem.text_utils import to_str
class DownloadFailure(XylemError):
"""Failure downloading data for I/O or other reasons."""
def load_url(url, retry=2, retry_period=1, timeout=10):
"""Load a given url with retries, retry_periods, and timeouts.
:param str url: URL to load and return contents of
:param int retry: number of times to retry the url on 503 or timeout
:param float retry_period: time to wait between retries in seconds
:param float timeout: timeout for opening the URL in seconds
:retunrs: loaded data as string
:rtype: str
:raises DownloadFailure: if loading fails even after retries
"""
retry = max(retry, 0) # negative retry count causes infinite loop
while True:
try:
req = urlopen(url, timeout=timeout)
except HTTPError as e:
if e.code == 503 and retry:
retry -= 1
time.sleep(retry_period)
else:
raise_from(DownloadFailure, "Failed to load url '{0}'.".
format(url), e)
except URLError as e:
if isinstance(e.reason, socket.timeout) and retry:
retry -= 1
time.sleep(retry_period)
else:
raise_from(DownloadFailure, "Failed to load url '{0}'.".
format(url), e)
else:
break
_, params = cgi.parse_header(req.headers.get('Content-Type', ''))
encoding = params.get('charset', 'utf-8')
data = req.read()
return to_str(data, encoding=encoding)
| apache-2.0 |
firebitsbr/infernal-twin | build/pillow/Scripts/painter.py | 11 | 2037 | #!/usr/bin/env python
#
# The Python Imaging Library
# $Id$
#
# this demo script illustrates pasting into an already displayed
# photoimage. note that the current version of Tk updates the whole
# image everytime we paste, so to get decent performance, we split
# the image into a set of tiles.
#
try:
from tkinter import Tk, Canvas, NW
except ImportError:
from Tkinter import Tk, Canvas, NW
from PIL import Image, ImageTk
import sys
#
# painter widget
class PaintCanvas(Canvas):
def __init__(self, master, image):
Canvas.__init__(self, master, width=image.size[0], height=image.size[1])
# fill the canvas
self.tile = {}
self.tilesize = tilesize = 32
xsize, ysize = image.size
for x in range(0, xsize, tilesize):
for y in range(0, ysize, tilesize):
box = x, y, min(xsize, x+tilesize), min(ysize, y+tilesize)
tile = ImageTk.PhotoImage(image.crop(box))
self.create_image(x, y, image=tile, anchor=NW)
self.tile[(x, y)] = box, tile
self.image = image
self.bind("<B1-Motion>", self.paint)
def paint(self, event):
xy = event.x - 10, event.y - 10, event.x + 10, event.y + 10
im = self.image.crop(xy)
# process the image in some fashion
im = im.convert("L")
self.image.paste(im, xy)
self.repair(xy)
def repair(self, box):
# update canvas
dx = box[0] % self.tilesize
dy = box[1] % self.tilesize
for x in range(box[0]-dx, box[2]+1, self.tilesize):
for y in range(box[1]-dy, box[3]+1, self.tilesize):
try:
xy, tile = self.tile[(x, y)]
tile.paste(self.image.crop(xy))
except KeyError:
pass # outside the image
self.update_idletasks()
#
# main
root = Tk()
im = Image.open(sys.argv[1])
if im.mode != "RGB":
im = im.convert("RGB")
PaintCanvas(root, im).pack()
root.mainloop()
| gpl-3.0 |
Neamar/django | django/core/files/storage.py | 281 | 13339 | import errno
import os
import warnings
from datetime import datetime
from django.conf import settings
from django.core.exceptions import SuspiciousFileOperation
from django.core.files import File, locks
from django.core.files.move import file_move_safe
from django.utils._os import abspathu, safe_join
from django.utils.crypto import get_random_string
from django.utils.deconstruct import deconstructible
from django.utils.deprecation import RemovedInDjango110Warning
from django.utils.encoding import filepath_to_uri, force_text
from django.utils.functional import LazyObject
from django.utils.inspect import func_supports_parameter
from django.utils.module_loading import import_string
from django.utils.six.moves.urllib.parse import urljoin
from django.utils.text import get_valid_filename
__all__ = ('Storage', 'FileSystemStorage', 'DefaultStorage', 'default_storage')
class Storage(object):
"""
A base storage class, providing some default behaviors that all other
storage systems can inherit or override, as necessary.
"""
# The following methods represent a public interface to private methods.
# These shouldn't be overridden by subclasses unless absolutely necessary.
def open(self, name, mode='rb'):
"""
Retrieves the specified file from storage.
"""
return self._open(name, mode)
def save(self, name, content, max_length=None):
"""
Saves new content to the file specified by name. The content should be
a proper File object or any python file-like object, ready to be read
from the beginning.
"""
# Get the proper name for the file, as it will actually be saved.
if name is None:
name = content.name
if not hasattr(content, 'chunks'):
content = File(content)
if func_supports_parameter(self.get_available_name, 'max_length'):
name = self.get_available_name(name, max_length=max_length)
else:
warnings.warn(
'Backwards compatibility for storage backends without '
'support for the `max_length` argument in '
'Storage.get_available_name() will be removed in Django 1.10.',
RemovedInDjango110Warning, stacklevel=2
)
name = self.get_available_name(name)
name = self._save(name, content)
# Store filenames with forward slashes, even on Windows
return force_text(name.replace('\\', '/'))
# These methods are part of the public API, with default implementations.
def get_valid_name(self, name):
"""
Returns a filename, based on the provided filename, that's suitable for
use in the target storage system.
"""
return get_valid_filename(name)
def get_available_name(self, name, max_length=None):
"""
Returns a filename that's free on the target storage system, and
available for new content to be written to.
"""
dir_name, file_name = os.path.split(name)
file_root, file_ext = os.path.splitext(file_name)
# If the filename already exists, add an underscore and a random 7
# character alphanumeric string (before the file extension, if one
# exists) to the filename until the generated filename doesn't exist.
# Truncate original name if required, so the new filename does not
# exceed the max_length.
while self.exists(name) or (max_length and len(name) > max_length):
# file_ext includes the dot.
name = os.path.join(dir_name, "%s_%s%s" % (file_root, get_random_string(7), file_ext))
if max_length is None:
continue
# Truncate file_root if max_length exceeded.
truncation = len(name) - max_length
if truncation > 0:
file_root = file_root[:-truncation]
# Entire file_root was truncated in attempt to find an available filename.
if not file_root:
raise SuspiciousFileOperation(
'Storage can not find an available filename for "%s". '
'Please make sure that the corresponding file field '
'allows sufficient "max_length".' % name
)
name = os.path.join(dir_name, "%s_%s%s" % (file_root, get_random_string(7), file_ext))
return name
def path(self, name):
"""
Returns a local filesystem path where the file can be retrieved using
Python's built-in open() function. Storage systems that can't be
accessed using open() should *not* implement this method.
"""
raise NotImplementedError("This backend doesn't support absolute paths.")
# The following methods form the public API for storage systems, but with
# no default implementations. Subclasses must implement *all* of these.
def delete(self, name):
"""
Deletes the specified file from the storage system.
"""
raise NotImplementedError('subclasses of Storage must provide a delete() method')
def exists(self, name):
"""
Returns True if a file referenced by the given name already exists in the
storage system, or False if the name is available for a new file.
"""
raise NotImplementedError('subclasses of Storage must provide an exists() method')
def listdir(self, path):
"""
Lists the contents of the specified path, returning a 2-tuple of lists;
the first item being directories, the second item being files.
"""
raise NotImplementedError('subclasses of Storage must provide a listdir() method')
def size(self, name):
"""
Returns the total size, in bytes, of the file specified by name.
"""
raise NotImplementedError('subclasses of Storage must provide a size() method')
def url(self, name):
"""
Returns an absolute URL where the file's contents can be accessed
directly by a Web browser.
"""
raise NotImplementedError('subclasses of Storage must provide a url() method')
def accessed_time(self, name):
"""
Returns the last accessed time (as datetime object) of the file
specified by name.
"""
raise NotImplementedError('subclasses of Storage must provide an accessed_time() method')
def created_time(self, name):
"""
Returns the creation time (as datetime object) of the file
specified by name.
"""
raise NotImplementedError('subclasses of Storage must provide a created_time() method')
def modified_time(self, name):
"""
Returns the last modified time (as datetime object) of the file
specified by name.
"""
raise NotImplementedError('subclasses of Storage must provide a modified_time() method')
@deconstructible
class FileSystemStorage(Storage):
"""
Standard filesystem storage
"""
def __init__(self, location=None, base_url=None, file_permissions_mode=None,
directory_permissions_mode=None):
if location is None:
location = settings.MEDIA_ROOT
self.base_location = location
self.location = abspathu(self.base_location)
if base_url is None:
base_url = settings.MEDIA_URL
elif not base_url.endswith('/'):
base_url += '/'
self.base_url = base_url
self.file_permissions_mode = (
file_permissions_mode if file_permissions_mode is not None
else settings.FILE_UPLOAD_PERMISSIONS
)
self.directory_permissions_mode = (
directory_permissions_mode if directory_permissions_mode is not None
else settings.FILE_UPLOAD_DIRECTORY_PERMISSIONS
)
def _open(self, name, mode='rb'):
return File(open(self.path(name), mode))
def _save(self, name, content):
full_path = self.path(name)
# Create any intermediate directories that do not exist.
# Note that there is a race between os.path.exists and os.makedirs:
# if os.makedirs fails with EEXIST, the directory was created
# concurrently, and we can continue normally. Refs #16082.
directory = os.path.dirname(full_path)
if not os.path.exists(directory):
try:
if self.directory_permissions_mode is not None:
# os.makedirs applies the global umask, so we reset it,
# for consistency with file_permissions_mode behavior.
old_umask = os.umask(0)
try:
os.makedirs(directory, self.directory_permissions_mode)
finally:
os.umask(old_umask)
else:
os.makedirs(directory)
except OSError as e:
if e.errno != errno.EEXIST:
raise
if not os.path.isdir(directory):
raise IOError("%s exists and is not a directory." % directory)
# There's a potential race condition between get_available_name and
# saving the file; it's possible that two threads might return the
# same name, at which point all sorts of fun happens. So we need to
# try to create the file, but if it already exists we have to go back
# to get_available_name() and try again.
while True:
try:
# This file has a file path that we can move.
if hasattr(content, 'temporary_file_path'):
file_move_safe(content.temporary_file_path(), full_path)
# This is a normal uploadedfile that we can stream.
else:
# This fun binary flag incantation makes os.open throw an
# OSError if the file already exists before we open it.
flags = (os.O_WRONLY | os.O_CREAT | os.O_EXCL |
getattr(os, 'O_BINARY', 0))
# The current umask value is masked out by os.open!
fd = os.open(full_path, flags, 0o666)
_file = None
try:
locks.lock(fd, locks.LOCK_EX)
for chunk in content.chunks():
if _file is None:
mode = 'wb' if isinstance(chunk, bytes) else 'wt'
_file = os.fdopen(fd, mode)
_file.write(chunk)
finally:
locks.unlock(fd)
if _file is not None:
_file.close()
else:
os.close(fd)
except OSError as e:
if e.errno == errno.EEXIST:
# Ooops, the file exists. We need a new file name.
name = self.get_available_name(name)
full_path = self.path(name)
else:
raise
else:
# OK, the file save worked. Break out of the loop.
break
if self.file_permissions_mode is not None:
os.chmod(full_path, self.file_permissions_mode)
return name
def delete(self, name):
assert name, "The name argument is not allowed to be empty."
name = self.path(name)
# If the file exists, delete it from the filesystem.
# Note that there is a race between os.path.exists and os.remove:
# if os.remove fails with ENOENT, the file was removed
# concurrently, and we can continue normally.
if os.path.exists(name):
try:
os.remove(name)
except OSError as e:
if e.errno != errno.ENOENT:
raise
def exists(self, name):
return os.path.exists(self.path(name))
def listdir(self, path):
path = self.path(path)
directories, files = [], []
for entry in os.listdir(path):
if os.path.isdir(os.path.join(path, entry)):
directories.append(entry)
else:
files.append(entry)
return directories, files
def path(self, name):
return safe_join(self.location, name)
def size(self, name):
return os.path.getsize(self.path(name))
def url(self, name):
if self.base_url is None:
raise ValueError("This file is not accessible via a URL.")
return urljoin(self.base_url, filepath_to_uri(name))
def accessed_time(self, name):
return datetime.fromtimestamp(os.path.getatime(self.path(name)))
def created_time(self, name):
return datetime.fromtimestamp(os.path.getctime(self.path(name)))
def modified_time(self, name):
return datetime.fromtimestamp(os.path.getmtime(self.path(name)))
def get_storage_class(import_path=None):
return import_string(import_path or settings.DEFAULT_FILE_STORAGE)
class DefaultStorage(LazyObject):
def _setup(self):
self._wrapped = get_storage_class()()
default_storage = DefaultStorage()
| bsd-3-clause |
Mirantis/swift-encrypt | swift/common/db_replicator.py | 3 | 29348 | # Copyright (c) 2010-2012 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import with_statement
import os
import random
import math
import time
import shutil
import uuid
import errno
import re
from eventlet import GreenPool, sleep, Timeout
from eventlet.green import subprocess
import simplejson
import swift.common.db
from swift.common.utils import get_logger, whataremyips, storage_directory, \
renamer, mkdirs, lock_parent_directory, config_true_value, \
unlink_older_than, dump_recon_cache, rsync_ip
from swift.common import ring
from swift.common.http import HTTP_NOT_FOUND, HTTP_INSUFFICIENT_STORAGE
from swift.common.bufferedhttp import BufferedHTTPConnection
from swift.common.exceptions import DriveNotMounted, ConnectionTimeout
from swift.common.daemon import Daemon
from swift.common.swob import Response, HTTPNotFound, HTTPNoContent, \
HTTPAccepted, HTTPBadRequest
DEBUG_TIMINGS_THRESHOLD = 10
def quarantine_db(object_file, server_type):
"""
In the case that a corrupt file is found, move it to a quarantined area to
allow replication to fix it.
:param object_file: path to corrupt file
:param server_type: type of file that is corrupt
('container' or 'account')
"""
object_dir = os.path.dirname(object_file)
quarantine_dir = os.path.abspath(
os.path.join(object_dir, '..', '..', '..', '..', 'quarantined',
server_type + 's', os.path.basename(object_dir)))
try:
renamer(object_dir, quarantine_dir)
except OSError, e:
if e.errno not in (errno.EEXIST, errno.ENOTEMPTY):
raise
quarantine_dir = "%s-%s" % (quarantine_dir, uuid.uuid4().hex)
renamer(object_dir, quarantine_dir)
def roundrobin_datadirs(datadirs):
"""
Generator to walk the data dirs in a round robin manner, evenly
hitting each device on the system, and yielding any .db files
found (in their proper places). The partitions within each data
dir are walked randomly, however.
:param datadirs: a list of (path, node_id) to walk
:returns: A generator of (partition, path_to_db_file, node_id)
"""
def walk_datadir(datadir, node_id):
partitions = os.listdir(datadir)
random.shuffle(partitions)
for partition in partitions:
part_dir = os.path.join(datadir, partition)
if not os.path.isdir(part_dir):
continue
suffixes = os.listdir(part_dir)
for suffix in suffixes:
suff_dir = os.path.join(part_dir, suffix)
if not os.path.isdir(suff_dir):
continue
hashes = os.listdir(suff_dir)
for hsh in hashes:
hash_dir = os.path.join(suff_dir, hsh)
if not os.path.isdir(hash_dir):
continue
object_file = os.path.join(hash_dir, hsh + '.db')
if os.path.exists(object_file):
yield (partition, object_file, node_id)
its = [walk_datadir(datadir, node_id) for datadir, node_id in datadirs]
while its:
for it in its:
try:
yield it.next()
except StopIteration:
its.remove(it)
class ReplConnection(BufferedHTTPConnection):
"""
Helper to simplify REPLICATEing to a remote server.
"""
def __init__(self, node, partition, hash_, logger):
""
self.logger = logger
self.node = node
BufferedHTTPConnection.__init__(self, '%(ip)s:%(port)s' % node)
self.path = '/%s/%s/%s' % (node['device'], partition, hash_)
def replicate(self, *args):
"""
Make an HTTP REPLICATE request
:param args: list of json-encodable objects
:returns: httplib response object
"""
try:
body = simplejson.dumps(args)
self.request('REPLICATE', self.path, body,
{'Content-Type': 'application/json'})
response = self.getresponse()
response.data = response.read()
return response
except (Exception, Timeout):
self.logger.exception(
_('ERROR reading HTTP response from %s'), self.node)
return None
class Replicator(Daemon):
"""
Implements the logic for directing db replication.
"""
def __init__(self, conf):
self.conf = conf
self.logger = get_logger(conf, log_route='replicator')
self.root = conf.get('devices', '/srv/node')
self.mount_check = config_true_value(conf.get('mount_check', 'true'))
self.port = int(conf.get('bind_port', self.default_port))
concurrency = int(conf.get('concurrency', 8))
self.cpool = GreenPool(size=concurrency)
swift_dir = conf.get('swift_dir', '/etc/swift')
self.ring = ring.Ring(swift_dir, ring_name=self.server_type)
self.per_diff = int(conf.get('per_diff', 1000))
self.max_diffs = int(conf.get('max_diffs') or 100)
self.interval = int(conf.get('interval') or
conf.get('run_pause') or 30)
self.vm_test_mode = config_true_value(conf.get('vm_test_mode', 'no'))
self.node_timeout = int(conf.get('node_timeout', 10))
self.conn_timeout = float(conf.get('conn_timeout', 0.5))
self.reclaim_age = float(conf.get('reclaim_age', 86400 * 7))
swift.common.db.DB_PREALLOCATION = \
config_true_value(conf.get('db_preallocation', 'f'))
self._zero_stats()
self.recon_cache_path = conf.get('recon_cache_path',
'/var/cache/swift')
self.recon_replicator = '%s.recon' % self.server_type
self.rcache = os.path.join(self.recon_cache_path,
self.recon_replicator)
self.extract_device_re = re.compile('%s%s([^%s]+)' % (
self.root, os.path.sep, os.path.sep))
def _zero_stats(self):
"""Zero out the stats."""
self.stats = {'attempted': 0, 'success': 0, 'failure': 0, 'ts_repl': 0,
'no_change': 0, 'hashmatch': 0, 'rsync': 0, 'diff': 0,
'remove': 0, 'empty': 0, 'remote_merge': 0,
'start': time.time(), 'diff_capped': 0}
def _report_stats(self):
"""Report the current stats to the logs."""
self.logger.info(
_('Attempted to replicate %(count)d dbs in %(time).5f seconds '
'(%(rate).5f/s)'),
{'count': self.stats['attempted'],
'time': time.time() - self.stats['start'],
'rate': self.stats['attempted'] /
(time.time() - self.stats['start'] + 0.0000001)})
self.logger.info(_('Removed %(remove)d dbs') % self.stats)
self.logger.info(_('%(success)s successes, %(failure)s failures')
% self.stats)
dump_recon_cache(
{'replication_stats': self.stats,
'replication_time': time.time() - self.stats['start'],
'replication_last': time.time()},
self.rcache, self.logger)
self.logger.info(' '.join(['%s:%s' % item for item in
self.stats.items() if item[0] in
('no_change', 'hashmatch', 'rsync', 'diff', 'ts_repl',
'empty', 'diff_capped')]))
def _rsync_file(self, db_file, remote_file, whole_file=True):
"""
Sync a single file using rsync. Used by _rsync_db to handle syncing.
:param db_file: file to be synced
:param remote_file: remote location to sync the DB file to
:param whole-file: if True, uses rsync's --whole-file flag
:returns: True if the sync was successful, False otherwise
"""
popen_args = ['rsync', '--quiet', '--no-motd',
'--timeout=%s' % int(math.ceil(self.node_timeout)),
'--contimeout=%s' % int(math.ceil(self.conn_timeout))]
if whole_file:
popen_args.append('--whole-file')
popen_args.extend([db_file, remote_file])
proc = subprocess.Popen(popen_args)
proc.communicate()
if proc.returncode != 0:
self.logger.error(_('ERROR rsync failed with %(code)s: %(args)s'),
{'code': proc.returncode, 'args': popen_args})
return proc.returncode == 0
def _rsync_db(self, broker, device, http, local_id,
replicate_method='complete_rsync', replicate_timeout=None):
"""
Sync a whole db using rsync.
:param broker: DB broker object of DB to be synced
:param device: device to sync to
:param http: ReplConnection object
:param local_id: unique ID of the local database replica
:param replicate_method: remote operation to perform after rsync
:param replicate_timeout: timeout to wait in seconds
"""
device_ip = rsync_ip(device['ip'])
if self.vm_test_mode:
remote_file = '%s::%s%s/%s/tmp/%s' % (
device_ip, self.server_type, device['port'], device['device'],
local_id)
else:
remote_file = '%s::%s/%s/tmp/%s' % (
device_ip, self.server_type, device['device'], local_id)
mtime = os.path.getmtime(broker.db_file)
if not self._rsync_file(broker.db_file, remote_file):
return False
# perform block-level sync if the db was modified during the first sync
if os.path.exists(broker.db_file + '-journal') or \
os.path.getmtime(broker.db_file) > mtime:
# grab a lock so nobody else can modify it
with broker.lock():
if not self._rsync_file(broker.db_file, remote_file, False):
return False
with Timeout(replicate_timeout or self.node_timeout):
response = http.replicate(replicate_method, local_id)
return response and response.status >= 200 and response.status < 300
def _usync_db(self, point, broker, http, remote_id, local_id):
"""
Sync a db by sending all records since the last sync.
:param point: synchronization high water mark between the replicas
:param broker: database broker object
:param http: ReplConnection object for the remote server
:param remote_id: database id for the remote replica
:param local_id: database id for the local replica
:returns: boolean indicating completion and success
"""
self.stats['diff'] += 1
self.logger.increment('diffs')
self.logger.debug(_('Syncing chunks with %s'), http.host)
sync_table = broker.get_syncs()
objects = broker.get_items_since(point, self.per_diff)
diffs = 0
while len(objects) and diffs < self.max_diffs:
diffs += 1
with Timeout(self.node_timeout):
response = http.replicate('merge_items', objects, local_id)
if not response or response.status >= 300 or response.status < 200:
if response:
self.logger.error(_('ERROR Bad response %(status)s from '
'%(host)s'),
{'status': response.status,
'host': http.host})
return False
point = objects[-1]['ROWID']
objects = broker.get_items_since(point, self.per_diff)
if objects:
self.logger.debug(_(
'Synchronization for %s has fallen more than '
'%s rows behind; moving on and will try again next pass.') %
(broker.db_file, self.max_diffs * self.per_diff))
self.stats['diff_capped'] += 1
self.logger.increment('diff_caps')
else:
with Timeout(self.node_timeout):
response = http.replicate('merge_syncs', sync_table)
if response and response.status >= 200 and response.status < 300:
broker.merge_syncs([{'remote_id': remote_id,
'sync_point': point}],
incoming=False)
return True
return False
def _in_sync(self, rinfo, info, broker, local_sync):
"""
Determine whether or not two replicas of a databases are considered
to be in sync.
:param rinfo: remote database info
:param info: local database info
:param broker: database broker object
:param local_sync: cached last sync point between replicas
:returns: boolean indicating whether or not the replicas are in sync
"""
if max(rinfo['point'], local_sync) >= info['max_row']:
self.stats['no_change'] += 1
self.logger.increment('no_changes')
return True
if rinfo['hash'] == info['hash']:
self.stats['hashmatch'] += 1
self.logger.increment('hashmatches')
broker.merge_syncs([{'remote_id': rinfo['id'],
'sync_point': rinfo['point']}],
incoming=False)
return True
def _http_connect(self, node, partition, db_file):
"""
Make an http_connection using ReplConnection
:param node: node dictionary from the ring
:param partition: partition partition to send in the url
:param db_file: DB file
:returns: ReplConnection object
"""
return ReplConnection(node, partition,
os.path.basename(db_file).split('.', 1)[0],
self.logger)
def _repl_to_node(self, node, broker, partition, info):
"""
Replicate a database to a node.
:param node: node dictionary from the ring to be replicated to
:param broker: DB broker for the DB to be replication
:param partition: partition on the node to replicate to
:param info: DB info as a dictionary of {'max_row', 'hash', 'id',
'created_at', 'put_timestamp', 'delete_timestamp',
'metadata'}
:returns: True if successful, False otherwise
"""
with ConnectionTimeout(self.conn_timeout):
http = self._http_connect(node, partition, broker.db_file)
if not http:
self.logger.error(
_('ERROR Unable to connect to remote server: %s'), node)
return False
with Timeout(self.node_timeout):
response = http.replicate(
'sync', info['max_row'], info['hash'], info['id'],
info['created_at'], info['put_timestamp'],
info['delete_timestamp'], info['metadata'])
if not response:
return False
elif response.status == HTTP_NOT_FOUND: # completely missing, rsync
self.stats['rsync'] += 1
self.logger.increment('rsyncs')
return self._rsync_db(broker, node, http, info['id'])
elif response.status == HTTP_INSUFFICIENT_STORAGE:
raise DriveNotMounted()
elif response.status >= 200 and response.status < 300:
rinfo = simplejson.loads(response.data)
local_sync = broker.get_sync(rinfo['id'], incoming=False)
if self._in_sync(rinfo, info, broker, local_sync):
return True
# if the difference in rowids between the two differs by
# more than 50%, rsync then do a remote merge.
if rinfo['max_row'] / float(info['max_row']) < 0.5:
self.stats['remote_merge'] += 1
self.logger.increment('remote_merges')
return self._rsync_db(broker, node, http, info['id'],
replicate_method='rsync_then_merge',
replicate_timeout=(info['count'] / 2000))
# else send diffs over to the remote server
return self._usync_db(max(rinfo['point'], local_sync),
broker, http, rinfo['id'], info['id'])
def _replicate_object(self, partition, object_file, node_id):
"""
Replicate the db, choosing method based on whether or not it
already exists on peers.
:param partition: partition to be replicated to
:param object_file: DB file name to be replicated
:param node_id: node id of the node to be replicated to
"""
start_time = time.time()
self.logger.debug(_('Replicating db %s'), object_file)
self.stats['attempted'] += 1
self.logger.increment('attempts')
try:
broker = self.brokerclass(object_file, pending_timeout=30)
broker.reclaim(time.time() - self.reclaim_age,
time.time() - (self.reclaim_age * 2))
info = broker.get_replication_info()
full_info = broker.get_info()
except (Exception, Timeout), e:
if 'no such table' in str(e):
self.logger.error(_('Quarantining DB %s'), object_file)
quarantine_db(broker.db_file, broker.db_type)
else:
self.logger.exception(_('ERROR reading db %s'), object_file)
self.stats['failure'] += 1
self.logger.increment('failures')
return
# The db is considered deleted if the delete_timestamp value is greater
# than the put_timestamp, and there are no objects.
delete_timestamp = 0
try:
delete_timestamp = float(info['delete_timestamp'])
except ValueError:
pass
put_timestamp = 0
try:
put_timestamp = float(info['put_timestamp'])
except ValueError:
pass
if delete_timestamp < (time.time() - self.reclaim_age) and \
delete_timestamp > put_timestamp and \
info['count'] in (None, '', 0, '0'):
if self.report_up_to_date(full_info):
self.delete_db(object_file)
self.logger.timing_since('timing', start_time)
return
responses = []
nodes = self.ring.get_part_nodes(int(partition))
shouldbehere = bool([n for n in nodes if n['id'] == node_id])
# See Footnote [1] for an explanation of the repl_nodes assignment.
i = 0
while i < len(nodes) and nodes[i]['id'] != node_id:
i += 1
repl_nodes = nodes[i + 1:] + nodes[:i]
more_nodes = self.ring.get_more_nodes(int(partition))
for node in repl_nodes:
success = False
try:
success = self._repl_to_node(node, broker, partition, info)
except DriveNotMounted:
repl_nodes.append(more_nodes.next())
self.logger.error(_('ERROR Remote drive not mounted %s'), node)
except (Exception, Timeout):
self.logger.exception(_('ERROR syncing %(file)s with node'
' %(node)s'),
{'file': object_file, 'node': node})
self.stats['success' if success else 'failure'] += 1
self.logger.increment('successes' if success else 'failures')
responses.append(success)
if not shouldbehere and all(responses):
# If the db shouldn't be on this node and has been successfully
# synced to all of its peers, it can be removed.
self.delete_db(object_file)
self.logger.timing_since('timing', start_time)
def delete_db(self, object_file):
hash_dir = os.path.dirname(object_file)
suf_dir = os.path.dirname(hash_dir)
with lock_parent_directory(object_file):
shutil.rmtree(hash_dir, True)
try:
os.rmdir(suf_dir)
except OSError, err:
if err.errno not in (errno.ENOENT, errno.ENOTEMPTY):
self.logger.exception(
_('ERROR while trying to clean up %s') % suf_dir)
self.stats['remove'] += 1
device_name = self.extract_device(object_file)
self.logger.increment('removes.' + device_name)
def extract_device(self, object_file):
"""
Extract the device name from an object path. Returns "UNKNOWN" if the
path could not be extracted successfully for some reason.
:param object_file: the path to a database file.
"""
match = self.extract_device_re.match(object_file)
if match:
return match.groups()[0]
return "UNKNOWN"
def report_up_to_date(self, full_info):
return True
def run_once(self, *args, **kwargs):
"""Run a replication pass once."""
self._zero_stats()
dirs = []
ips = whataremyips()
if not ips:
self.logger.error(_('ERROR Failed to get my own IPs?'))
return
for node in self.ring.devs:
if node and node['ip'] in ips and node['port'] == self.port:
if self.mount_check and not os.path.ismount(
os.path.join(self.root, node['device'])):
self.logger.warn(
_('Skipping %(device)s as it is not mounted') % node)
continue
unlink_older_than(
os.path.join(self.root, node['device'], 'tmp'),
time.time() - self.reclaim_age)
datadir = os.path.join(self.root, node['device'], self.datadir)
if os.path.isdir(datadir):
dirs.append((datadir, node['id']))
self.logger.info(_('Beginning replication run'))
for part, object_file, node_id in roundrobin_datadirs(dirs):
self.cpool.spawn_n(
self._replicate_object, part, object_file, node_id)
self.cpool.waitall()
self.logger.info(_('Replication run OVER'))
self._report_stats()
def run_forever(self, *args, **kwargs):
"""
Replicate dbs under the given root in an infinite loop.
"""
sleep(random.random() * self.interval)
while True:
begin = time.time()
try:
self.run_once()
except (Exception, Timeout):
self.logger.exception(_('ERROR trying to replicate'))
elapsed = time.time() - begin
if elapsed < self.interval:
sleep(self.interval - elapsed)
class ReplicatorRpc(object):
"""Handle Replication RPC calls. TODO(redbo): document please :)"""
def __init__(self, root, datadir, broker_class, mount_check=True,
logger=None):
self.root = root
self.datadir = datadir
self.broker_class = broker_class
self.mount_check = mount_check
self.logger = logger or get_logger({}, log_route='replicator-rpc')
def dispatch(self, replicate_args, args):
if not hasattr(args, 'pop'):
return HTTPBadRequest(body='Invalid object type')
op = args.pop(0)
drive, partition, hsh = replicate_args
if self.mount_check and \
not os.path.ismount(os.path.join(self.root, drive)):
return Response(status='507 %s is not mounted' % drive)
db_file = os.path.join(self.root, drive,
storage_directory(self.datadir, partition, hsh),
hsh + '.db')
if op == 'rsync_then_merge':
return self.rsync_then_merge(drive, db_file, args)
if op == 'complete_rsync':
return self.complete_rsync(drive, db_file, args)
else:
# someone might be about to rsync a db to us,
# make sure there's a tmp dir to receive it.
mkdirs(os.path.join(self.root, drive, 'tmp'))
if not os.path.exists(db_file):
return HTTPNotFound()
return getattr(self, op)(self.broker_class(db_file), args)
def sync(self, broker, args):
(remote_sync, hash_, id_, created_at, put_timestamp,
delete_timestamp, metadata) = args
timemark = time.time()
try:
info = broker.get_replication_info()
except (Exception, Timeout), e:
if 'no such table' in str(e):
self.logger.error(_("Quarantining DB %s") % broker.db_file)
quarantine_db(broker.db_file, broker.db_type)
return HTTPNotFound()
raise
timespan = time.time() - timemark
if timespan > DEBUG_TIMINGS_THRESHOLD:
self.logger.debug(_('replicator-rpc-sync time for info: %.02fs') %
timespan)
if metadata:
timemark = time.time()
broker.update_metadata(simplejson.loads(metadata))
timespan = time.time() - timemark
if timespan > DEBUG_TIMINGS_THRESHOLD:
self.logger.debug(_('replicator-rpc-sync time for '
'update_metadata: %.02fs') % timespan)
if info['put_timestamp'] != put_timestamp or \
info['created_at'] != created_at or \
info['delete_timestamp'] != delete_timestamp:
timemark = time.time()
broker.merge_timestamps(
created_at, put_timestamp, delete_timestamp)
timespan = time.time() - timemark
if timespan > DEBUG_TIMINGS_THRESHOLD:
self.logger.debug(_('replicator-rpc-sync time for '
'merge_timestamps: %.02fs') % timespan)
timemark = time.time()
info['point'] = broker.get_sync(id_)
timespan = time.time() - timemark
if timespan > DEBUG_TIMINGS_THRESHOLD:
self.logger.debug(_('replicator-rpc-sync time for get_sync: '
'%.02fs') % timespan)
if hash_ == info['hash'] and info['point'] < remote_sync:
timemark = time.time()
broker.merge_syncs([{'remote_id': id_,
'sync_point': remote_sync}])
info['point'] = remote_sync
timespan = time.time() - timemark
if timespan > DEBUG_TIMINGS_THRESHOLD:
self.logger.debug(_('replicator-rpc-sync time for '
'merge_syncs: %.02fs') % timespan)
return Response(simplejson.dumps(info))
def merge_syncs(self, broker, args):
broker.merge_syncs(args[0])
return HTTPAccepted()
def merge_items(self, broker, args):
broker.merge_items(args[0], args[1])
return HTTPAccepted()
def complete_rsync(self, drive, db_file, args):
old_filename = os.path.join(self.root, drive, 'tmp', args[0])
if os.path.exists(db_file):
return HTTPNotFound()
if not os.path.exists(old_filename):
return HTTPNotFound()
broker = self.broker_class(old_filename)
broker.newid(args[0])
renamer(old_filename, db_file)
return HTTPNoContent()
def rsync_then_merge(self, drive, db_file, args):
old_filename = os.path.join(self.root, drive, 'tmp', args[0])
if not os.path.exists(db_file) or not os.path.exists(old_filename):
return HTTPNotFound()
new_broker = self.broker_class(old_filename)
existing_broker = self.broker_class(db_file)
point = -1
objects = existing_broker.get_items_since(point, 1000)
while len(objects):
new_broker.merge_items(objects)
point = objects[-1]['ROWID']
objects = existing_broker.get_items_since(point, 1000)
sleep()
new_broker.newid(args[0])
renamer(old_filename, db_file)
return HTTPNoContent()
# Footnote [1]:
# This orders the nodes so that, given nodes a b c, a will contact b then c,
# b will contact c then a, and c will contact a then b -- in other words, each
# node will always contact the next node in the list first.
# This helps in the case where databases are all way out of sync, so each
# node is likely to be sending to a different node than it's receiving from,
# rather than two nodes talking to each other, starving out the third.
# If the third didn't even have a copy and the first two nodes were way out
# of sync, such starvation would mean the third node wouldn't get any copy
# until the first two nodes finally got in sync, which could take a while.
# This new ordering ensures such starvation doesn't occur, making the data
# more durable.
| apache-2.0 |
acshan/odoo | openerp/addons/base/res/__init__.py | 384 | 1261 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import res_country
import res_lang
import res_partner
import res_bank
import res_config
import res_currency
import res_font
import res_company
import res_users
import res_request
import res_lang
import ir_property
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
ruschelp/cortex-vfx | python/IECoreMaya/TemporaryAttributeValues.py | 12 | 4259 | ##########################################################################
#
# Copyright (c) 2009-2012, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import maya.cmds
import maya.OpenMaya
import IECore
import StringUtil
## A context manager for controlling attribute values in with statements. It
# sets attributes to requested values on entering the block and resets them to
# their previous values on exiting the block.
class TemporaryAttributeValues :
def __init__( self, attributeAndValues = {}, **kw ) :
self.__attributesAndValues = attributeAndValues
self.__attributesAndValues.update( kw )
def __enter__( self ) :
handlers = {
"enum" : self.__simpleAttrHandler,
"bool" : self.__simpleAttrHandler,
"float" : self.__simpleAttrHandler,
"long" : self.__simpleAttrHandler,
"short" : self.__simpleAttrHandler,
"float2" : IECore.curry( self.__numeric2AttrHandler, attributeType="float2" ),
"long2" : IECore.curry( self.__numeric2AttrHandler, attributeType="long2" ),
"short2" : IECore.curry( self.__numeric2AttrHandler, attributeType="short2" ),
"float3" : IECore.curry( self.__numeric3AttrHandler, attributeType="float3" ),
"long3" : IECore.curry( self.__numeric3AttrHandler, attributeType="long3" ),
"short3" : IECore.curry( self.__numeric3AttrHandler, attributeType="short3" ),
"string" : self.__stringAttrHandler,
}
self.__restoreCommands = []
for attr, value in self.__attributesAndValues.items() :
# check we can handle this type
attrType = maya.cmds.getAttr( attr, type=True )
handler = handlers.get( attrType, None )
if not handler :
raise TypeError( "Attribute \"%s\" has unsupported type \"%s\"." % ( attr, attrType ) )
# store a command to restore the attribute value later
origValue = maya.cmds.getAttr( attr )
if isinstance( origValue, list ) and isinstance( origValue[0], tuple ) :
origValue = origValue[0]
self.__restoreCommands.append( IECore.curry( handler, attr, origValue ) )
# and change the attribute value
handler( attr, value )
def __exit__( self, type, value, traceBack ) :
for cmd in self.__restoreCommands :
cmd()
def __simpleAttrHandler( self, attr, value ) :
maya.cmds.setAttr( attr, value )
def __numeric2AttrHandler( self, attr, value, attributeType ) :
maya.cmds.setAttr( attr, value[0], value[1], type=attributeType )
def __numeric3AttrHandler( self, attr, value, attributeType ) :
maya.cmds.setAttr( attr, value[0], value[1], value[2], type=attributeType )
def __stringAttrHandler( self, attr, value ) :
maya.cmds.setAttr( attr, value, type="string" )
| bsd-3-clause |
jeremiahmarks/sl4a | python/src/Lib/test/test_netrc.py | 99 | 1116 |
import netrc, os, unittest, sys
from test import test_support
TEST_NETRC = """
machine foo login log1 password pass1 account acct1
macdef macro1
line1
line2
macdef macro2
line3
line4
default login log2 password pass2
"""
temp_filename = test_support.TESTFN
class NetrcTestCase(unittest.TestCase):
def setUp (self):
mode = 'w'
if sys.platform not in ['cygwin']:
mode += 't'
fp = open(temp_filename, mode)
fp.write(TEST_NETRC)
fp.close()
self.netrc = netrc.netrc(temp_filename)
def tearDown (self):
del self.netrc
os.unlink(temp_filename)
def test_case_1(self):
self.assert_(self.netrc.macros == {'macro1':['line1\n', 'line2\n'],
'macro2':['line3\n', 'line4\n']}
)
self.assert_(self.netrc.hosts['foo'] == ('log1', 'acct1', 'pass1'))
self.assert_(self.netrc.hosts['default'] == ('log2', None, 'pass2'))
def test_main():
test_support.run_unittest(NetrcTestCase)
if __name__ == "__main__":
test_main()
| apache-2.0 |
elmerdpadilla/iv | addons/hw_escpos/escpos/exceptions.py | 151 | 1974 | """ ESC/POS Exceptions classes """
import os
class Error(Exception):
""" Base class for ESC/POS errors """
def __init__(self, msg, status=None):
Exception.__init__(self)
self.msg = msg
self.resultcode = 1
if status is not None:
self.resultcode = status
def __str__(self):
return self.msg
# Result/Exit codes
# 0 = success
# 10 = No Barcode type defined
# 20 = Barcode size values are out of range
# 30 = Barcode text not supplied
# 40 = Image height is too large
# 50 = No string supplied to be printed
# 60 = Invalid pin to send Cash Drawer pulse
class BarcodeTypeError(Error):
def __init__(self, msg=""):
Error.__init__(self, msg)
self.msg = msg
self.resultcode = 10
def __str__(self):
return "No Barcode type is defined"
class BarcodeSizeError(Error):
def __init__(self, msg=""):
Error.__init__(self, msg)
self.msg = msg
self.resultcode = 20
def __str__(self):
return "Barcode size is out of range"
class BarcodeCodeError(Error):
def __init__(self, msg=""):
Error.__init__(self, msg)
self.msg = msg
self.resultcode = 30
def __str__(self):
return "Code was not supplied"
class ImageSizeError(Error):
def __init__(self, msg=""):
Error.__init__(self, msg)
self.msg = msg
self.resultcode = 40
def __str__(self):
return "Image height is longer than 255px and can't be printed"
class TextError(Error):
def __init__(self, msg=""):
Error.__init__(self, msg)
self.msg = msg
self.resultcode = 50
def __str__(self):
return "Text string must be supplied to the text() method"
class CashDrawerError(Error):
def __init__(self, msg=""):
Error.__init__(self, msg)
self.msg = msg
self.resultcode = 60
def __str__(self):
return "Valid pin must be set to send pulse"
| agpl-3.0 |
nuagenetworks/vspk-python | vspk/v5_0/nuvirtualfirewallrule.py | 1 | 40053 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2015, Alcatel-Lucent Inc, 2017 Nokia
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from .fetchers import NUMetadatasFetcher
from .fetchers import NUGlobalMetadatasFetcher
from bambou import NURESTObject
class NUVirtualFirewallRule(NURESTObject):
""" Represents a VirtualFirewallRule in the VSD
Notes:
Virtual firewall rules define intent based security policy entries to control traffic between source/destinations in the network. Virtual firewall rules are inherently stateful and are enforced as Ingress/Egress stateful ACLs in Nuage policy enforcement points
"""
__rest_name__ = "virtualfirewallrule"
__resource_name__ = "virtualfirewallrules"
## Constants
CONST_NETWORK_TYPE_NETWORK_MACRO_GROUP = "NETWORK_MACRO_GROUP"
CONST_NETWORK_TYPE_ENTERPRISE_NETWORK = "ENTERPRISE_NETWORK"
CONST_LOCATION_TYPE_ZONE = "ZONE"
CONST_ENTITY_SCOPE_ENTERPRISE = "ENTERPRISE"
CONST_ACTION_FORWARD = "FORWARD"
CONST_NETWORK_TYPE_POLICYGROUP = "POLICYGROUP"
CONST_LOCATION_TYPE_UNDERLAY_INTERNET_POLICYGROUP = "UNDERLAY_INTERNET_POLICYGROUP"
CONST_LOCATION_TYPE_ANY = "ANY"
CONST_LOCATION_TYPE_PGEXPRESSION = "PGEXPRESSION"
CONST_ACTION_DROP = "DROP"
CONST_LOCATION_TYPE_ENTERPRISE_NETWORK = "ENTERPRISE_NETWORK"
CONST_NETWORK_TYPE_ANY = "ANY"
CONST_LOCATION_TYPE_POLICYGROUP = "POLICYGROUP"
CONST_NETWORK_TYPE_SUBNET = "SUBNET"
CONST_LOCATION_TYPE_NETWORK_MACRO_GROUP = "NETWORK_MACRO_GROUP"
CONST_NETWORK_TYPE_ZONE = "ZONE"
CONST_ASSOCIATED_TRAFFIC_TYPE_L4_SERVICE_GROUP = "L4_SERVICE_GROUP"
CONST_LOCATION_TYPE_SUBNET = "SUBNET"
CONST_POLICY_STATE_DRAFT = "DRAFT"
CONST_ASSOCIATED_TRAFFIC_TYPE_L4_SERVICE = "L4_SERVICE"
CONST_WEB_FILTER_TYPE_WEB_DOMAIN_NAME = "WEB_DOMAIN_NAME"
CONST_POLICY_STATE_LIVE = "LIVE"
CONST_ENTITY_SCOPE_GLOBAL = "GLOBAL"
CONST_WEB_FILTER_TYPE_WEB_CATEGORY = "WEB_CATEGORY"
CONST_NETWORK_TYPE_PGEXPRESSION = "PGEXPRESSION"
CONST_NETWORK_TYPE_INTERNET_POLICYGROUP = "INTERNET_POLICYGROUP"
def __init__(self, **kwargs):
""" Initializes a VirtualFirewallRule instance
Notes:
You can specify all parameters while calling this methods.
A special argument named `data` will enable you to load the
object from a Python dictionary
Examples:
>>> virtualfirewallrule = NUVirtualFirewallRule(id=u'xxxx-xxx-xxx-xxx', name=u'VirtualFirewallRule')
>>> virtualfirewallrule = NUVirtualFirewallRule(data=my_dict)
"""
super(NUVirtualFirewallRule, self).__init__()
# Read/Write Attributes
self._acl_template_name = None
self._icmp_code = None
self._icmp_type = None
self._ipv6_address_override = None
self._dscp = None
self._last_updated_by = None
self._action = None
self._address_override = None
self._web_filter_id = None
self._web_filter_type = None
self._description = None
self._destination_port = None
self._network_id = None
self._network_type = None
self._mirror_destination_id = None
self._flow_logging_enabled = None
self._enterprise_name = None
self._entity_scope = None
self._location_id = None
self._location_type = None
self._policy_state = None
self._domain_name = None
self._source_port = None
self._priority = None
self._protocol = None
self._associated_egress_entry_id = None
self._associated_ingress_entry_id = None
self._associated_l7_application_signature_id = None
self._associated_live_entity_id = None
self._associated_live_template_id = None
self._associated_traffic_type = None
self._associated_traffic_type_id = None
self._stateful = None
self._stats_id = None
self._stats_logging_enabled = None
self._ether_type = None
self._overlay_mirror_destination_id = None
self._external_id = None
self.expose_attribute(local_name="acl_template_name", remote_name="ACLTemplateName", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="icmp_code", remote_name="ICMPCode", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="icmp_type", remote_name="ICMPType", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="ipv6_address_override", remote_name="IPv6AddressOverride", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="dscp", remote_name="DSCP", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="last_updated_by", remote_name="lastUpdatedBy", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="action", remote_name="action", attribute_type=str, is_required=True, is_unique=False, choices=[u'DROP', u'FORWARD'])
self.expose_attribute(local_name="address_override", remote_name="addressOverride", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="web_filter_id", remote_name="webFilterID", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="web_filter_type", remote_name="webFilterType", attribute_type=str, is_required=False, is_unique=False, choices=[u'WEB_CATEGORY', u'WEB_DOMAIN_NAME'])
self.expose_attribute(local_name="description", remote_name="description", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="destination_port", remote_name="destinationPort", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="network_id", remote_name="networkID", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="network_type", remote_name="networkType", attribute_type=str, is_required=False, is_unique=False, choices=[u'ANY', u'ENTERPRISE_NETWORK', u'INTERNET_POLICYGROUP', u'NETWORK_MACRO_GROUP', u'PGEXPRESSION', u'POLICYGROUP', u'SUBNET', u'ZONE'])
self.expose_attribute(local_name="mirror_destination_id", remote_name="mirrorDestinationID", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="flow_logging_enabled", remote_name="flowLoggingEnabled", attribute_type=bool, is_required=False, is_unique=False)
self.expose_attribute(local_name="enterprise_name", remote_name="enterpriseName", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="entity_scope", remote_name="entityScope", attribute_type=str, is_required=False, is_unique=False, choices=[u'ENTERPRISE', u'GLOBAL'])
self.expose_attribute(local_name="location_id", remote_name="locationID", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="location_type", remote_name="locationType", attribute_type=str, is_required=True, is_unique=False, choices=[u'ANY', u'ENTERPRISE_NETWORK', u'NETWORK_MACRO_GROUP', u'PGEXPRESSION', u'POLICYGROUP', u'SUBNET', u'UNDERLAY_INTERNET_POLICYGROUP', u'ZONE'])
self.expose_attribute(local_name="policy_state", remote_name="policyState", attribute_type=str, is_required=False, is_unique=False, choices=[u'DRAFT', u'LIVE'])
self.expose_attribute(local_name="domain_name", remote_name="domainName", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="source_port", remote_name="sourcePort", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="priority", remote_name="priority", attribute_type=int, is_required=False, is_unique=False)
self.expose_attribute(local_name="protocol", remote_name="protocol", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="associated_egress_entry_id", remote_name="associatedEgressEntryID", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="associated_ingress_entry_id", remote_name="associatedIngressEntryID", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="associated_l7_application_signature_id", remote_name="associatedL7ApplicationSignatureID", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="associated_live_entity_id", remote_name="associatedLiveEntityID", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="associated_live_template_id", remote_name="associatedLiveTemplateID", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="associated_traffic_type", remote_name="associatedTrafficType", attribute_type=str, is_required=False, is_unique=False, choices=[u'L4_SERVICE', u'L4_SERVICE_GROUP'])
self.expose_attribute(local_name="associated_traffic_type_id", remote_name="associatedTrafficTypeID", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="stateful", remote_name="stateful", attribute_type=bool, is_required=False, is_unique=False)
self.expose_attribute(local_name="stats_id", remote_name="statsID", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="stats_logging_enabled", remote_name="statsLoggingEnabled", attribute_type=bool, is_required=False, is_unique=False)
self.expose_attribute(local_name="ether_type", remote_name="etherType", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="overlay_mirror_destination_id", remote_name="overlayMirrorDestinationID", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="external_id", remote_name="externalID", attribute_type=str, is_required=False, is_unique=True)
# Fetchers
self.metadatas = NUMetadatasFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.global_metadatas = NUGlobalMetadatasFetcher.fetcher_with_object(parent_object=self, relationship="child")
self._compute_args(**kwargs)
# Properties
@property
def acl_template_name(self):
""" Get acl_template_name value.
Notes:
The name of the parent template for this rule entry
This attribute is named `ACLTemplateName` in VSD API.
"""
return self._acl_template_name
@acl_template_name.setter
def acl_template_name(self, value):
""" Set acl_template_name value.
Notes:
The name of the parent template for this rule entry
This attribute is named `ACLTemplateName` in VSD API.
"""
self._acl_template_name = value
@property
def icmp_code(self):
""" Get icmp_code value.
Notes:
The ICMP Code when protocol selected is ICMP.
This attribute is named `ICMPCode` in VSD API.
"""
return self._icmp_code
@icmp_code.setter
def icmp_code(self, value):
""" Set icmp_code value.
Notes:
The ICMP Code when protocol selected is ICMP.
This attribute is named `ICMPCode` in VSD API.
"""
self._icmp_code = value
@property
def icmp_type(self):
""" Get icmp_type value.
Notes:
The ICMP Type when protocol selected is ICMP.
This attribute is named `ICMPType` in VSD API.
"""
return self._icmp_type
@icmp_type.setter
def icmp_type(self, value):
""" Set icmp_type value.
Notes:
The ICMP Type when protocol selected is ICMP.
This attribute is named `ICMPType` in VSD API.
"""
self._icmp_type = value
@property
def ipv6_address_override(self):
""" Get ipv6_address_override value.
Notes:
Overrides the source IPV6 for Ingress and destination IPV6 for Egress, macentries will use this address as the match criteria.
This attribute is named `IPv6AddressOverride` in VSD API.
"""
return self._ipv6_address_override
@ipv6_address_override.setter
def ipv6_address_override(self, value):
""" Set ipv6_address_override value.
Notes:
Overrides the source IPV6 for Ingress and destination IPV6 for Egress, macentries will use this address as the match criteria.
This attribute is named `IPv6AddressOverride` in VSD API.
"""
self._ipv6_address_override = value
@property
def dscp(self):
""" Get dscp value.
Notes:
DSCP match condition to be set in the rule. It is either * or from 0-63
This attribute is named `DSCP` in VSD API.
"""
return self._dscp
@dscp.setter
def dscp(self, value):
""" Set dscp value.
Notes:
DSCP match condition to be set in the rule. It is either * or from 0-63
This attribute is named `DSCP` in VSD API.
"""
self._dscp = value
@property
def last_updated_by(self):
""" Get last_updated_by value.
Notes:
ID of the user who last updated the object.
This attribute is named `lastUpdatedBy` in VSD API.
"""
return self._last_updated_by
@last_updated_by.setter
def last_updated_by(self, value):
""" Set last_updated_by value.
Notes:
ID of the user who last updated the object.
This attribute is named `lastUpdatedBy` in VSD API.
"""
self._last_updated_by = value
@property
def action(self):
""" Get action value.
Notes:
The action of the rule, DROP or FORWARD. Possible values are DROP, FORWARD.
"""
return self._action
@action.setter
def action(self, value):
""" Set action value.
Notes:
The action of the rule, DROP or FORWARD. Possible values are DROP, FORWARD.
"""
self._action = value
@property
def address_override(self):
""" Get address_override value.
Notes:
Overrides the source IP for Ingress and destination IP for Egress, macentries will use this address as the match criteria.
This attribute is named `addressOverride` in VSD API.
"""
return self._address_override
@address_override.setter
def address_override(self, value):
""" Set address_override value.
Notes:
Overrides the source IP for Ingress and destination IP for Egress, macentries will use this address as the match criteria.
This attribute is named `addressOverride` in VSD API.
"""
self._address_override = value
@property
def web_filter_id(self):
""" Get web_filter_id value.
Notes:
ID of web filter
This attribute is named `webFilterID` in VSD API.
"""
return self._web_filter_id
@web_filter_id.setter
def web_filter_id(self, value):
""" Set web_filter_id value.
Notes:
ID of web filter
This attribute is named `webFilterID` in VSD API.
"""
self._web_filter_id = value
@property
def web_filter_type(self):
""" Get web_filter_type value.
Notes:
Indicates type of web filter being set
This attribute is named `webFilterType` in VSD API.
"""
return self._web_filter_type
@web_filter_type.setter
def web_filter_type(self, value):
""" Set web_filter_type value.
Notes:
Indicates type of web filter being set
This attribute is named `webFilterType` in VSD API.
"""
self._web_filter_type = value
@property
def description(self):
""" Get description value.
Notes:
Description of the rule entry
"""
return self._description
@description.setter
def description(self, value):
""" Set description value.
Notes:
Description of the rule entry
"""
self._description = value
@property
def destination_port(self):
""" Get destination_port value.
Notes:
The destination port to be matched if protocol is UDP or TCP. Value should be either * or a single port number or a port range like 1,2.. or 1 - 10
This attribute is named `destinationPort` in VSD API.
"""
return self._destination_port
@destination_port.setter
def destination_port(self, value):
""" Set destination_port value.
Notes:
The destination port to be matched if protocol is UDP or TCP. Value should be either * or a single port number or a port range like 1,2.. or 1 - 10
This attribute is named `destinationPort` in VSD API.
"""
self._destination_port = value
@property
def network_id(self):
""" Get network_id value.
Notes:
The ID of the destination endpoint (Subnet/Zone/PortGroup/PolicyGroupExpression/NetworkMacro/Internet Policy Group/Enterprise Network)
This attribute is named `networkID` in VSD API.
"""
return self._network_id
@network_id.setter
def network_id(self, value):
""" Set network_id value.
Notes:
The ID of the destination endpoint (Subnet/Zone/PortGroup/PolicyGroupExpression/NetworkMacro/Internet Policy Group/Enterprise Network)
This attribute is named `networkID` in VSD API.
"""
self._network_id = value
@property
def network_type(self):
""" Get network_type value.
Notes:
Type of the destination endpoint (Subnet/Zone/PortGroup/PolicyGroupExpression/NetworkMacro/Internet Policy Group/Enterprise Network)
This attribute is named `networkType` in VSD API.
"""
return self._network_type
@network_type.setter
def network_type(self, value):
""" Set network_type value.
Notes:
Type of the destination endpoint (Subnet/Zone/PortGroup/PolicyGroupExpression/NetworkMacro/Internet Policy Group/Enterprise Network)
This attribute is named `networkType` in VSD API.
"""
self._network_type = value
@property
def mirror_destination_id(self):
""" Get mirror_destination_id value.
Notes:
Destination ID of the mirror destination object.
This attribute is named `mirrorDestinationID` in VSD API.
"""
return self._mirror_destination_id
@mirror_destination_id.setter
def mirror_destination_id(self, value):
""" Set mirror_destination_id value.
Notes:
Destination ID of the mirror destination object.
This attribute is named `mirrorDestinationID` in VSD API.
"""
self._mirror_destination_id = value
@property
def flow_logging_enabled(self):
""" Get flow_logging_enabled value.
Notes:
Is flow logging enabled for this particular template
This attribute is named `flowLoggingEnabled` in VSD API.
"""
return self._flow_logging_enabled
@flow_logging_enabled.setter
def flow_logging_enabled(self, value):
""" Set flow_logging_enabled value.
Notes:
Is flow logging enabled for this particular template
This attribute is named `flowLoggingEnabled` in VSD API.
"""
self._flow_logging_enabled = value
@property
def enterprise_name(self):
""" Get enterprise_name value.
Notes:
The name of the enterprise for the domain's parent
This attribute is named `enterpriseName` in VSD API.
"""
return self._enterprise_name
@enterprise_name.setter
def enterprise_name(self, value):
""" Set enterprise_name value.
Notes:
The name of the enterprise for the domain's parent
This attribute is named `enterpriseName` in VSD API.
"""
self._enterprise_name = value
@property
def entity_scope(self):
""" Get entity_scope value.
Notes:
Specify if scope of entity is Data center or Enterprise level
This attribute is named `entityScope` in VSD API.
"""
return self._entity_scope
@entity_scope.setter
def entity_scope(self, value):
""" Set entity_scope value.
Notes:
Specify if scope of entity is Data center or Enterprise level
This attribute is named `entityScope` in VSD API.
"""
self._entity_scope = value
@property
def location_id(self):
""" Get location_id value.
Notes:
The ID of the source endpoint (Subnet/Zone/PortGroup/PolicyGroupExpression/NetworkMacro/Internet Policy Group/Enterprise Network)
This attribute is named `locationID` in VSD API.
"""
return self._location_id
@location_id.setter
def location_id(self, value):
""" Set location_id value.
Notes:
The ID of the source endpoint (Subnet/Zone/PortGroup/PolicyGroupExpression/NetworkMacro/Internet Policy Group/Enterprise Network)
This attribute is named `locationID` in VSD API.
"""
self._location_id = value
@property
def location_type(self):
""" Get location_type value.
Notes:
Type of the source endpoint (Subnet/Zone/PortGroup/PolicyGroupExpression/NetworkMacro/Internet Policy Group/Enterprise Network)
This attribute is named `locationType` in VSD API.
"""
return self._location_type
@location_type.setter
def location_type(self, value):
""" Set location_type value.
Notes:
Type of the source endpoint (Subnet/Zone/PortGroup/PolicyGroupExpression/NetworkMacro/Internet Policy Group/Enterprise Network)
This attribute is named `locationType` in VSD API.
"""
self._location_type = value
@property
def policy_state(self):
""" Get policy_state value.
Notes:
State of the policy.
This attribute is named `policyState` in VSD API.
"""
return self._policy_state
@policy_state.setter
def policy_state(self, value):
""" Set policy_state value.
Notes:
State of the policy.
This attribute is named `policyState` in VSD API.
"""
self._policy_state = value
@property
def domain_name(self):
""" Get domain_name value.
Notes:
The name of the domain/domain template for the Rule TemplateName.
This attribute is named `domainName` in VSD API.
"""
return self._domain_name
@domain_name.setter
def domain_name(self, value):
""" Set domain_name value.
Notes:
The name of the domain/domain template for the Rule TemplateName.
This attribute is named `domainName` in VSD API.
"""
self._domain_name = value
@property
def source_port(self):
""" Get source_port value.
Notes:
Source port to be matched if protocol is UDP or TCP. Value should be either * or a single port number or a port range like 1,2.. or 1 - 10
This attribute is named `sourcePort` in VSD API.
"""
return self._source_port
@source_port.setter
def source_port(self, value):
""" Set source_port value.
Notes:
Source port to be matched if protocol is UDP or TCP. Value should be either * or a single port number or a port range like 1,2.. or 1 - 10
This attribute is named `sourcePort` in VSD API.
"""
self._source_port = value
@property
def priority(self):
""" Get priority value.
Notes:
The priority of the rule entry that determines the order of entries
"""
return self._priority
@priority.setter
def priority(self, value):
""" Set priority value.
Notes:
The priority of the rule entry that determines the order of entries
"""
self._priority = value
@property
def protocol(self):
""" Get protocol value.
Notes:
Protocol number that must be matched
"""
return self._protocol
@protocol.setter
def protocol(self, value):
""" Set protocol value.
Notes:
Protocol number that must be matched
"""
self._protocol = value
@property
def associated_egress_entry_id(self):
""" Get associated_egress_entry_id value.
Notes:
In the draft mode, the ACL entry refers to this LiveEntity. In non-drafted mode, this is null.
This attribute is named `associatedEgressEntryID` in VSD API.
"""
return self._associated_egress_entry_id
@associated_egress_entry_id.setter
def associated_egress_entry_id(self, value):
""" Set associated_egress_entry_id value.
Notes:
In the draft mode, the ACL entry refers to this LiveEntity. In non-drafted mode, this is null.
This attribute is named `associatedEgressEntryID` in VSD API.
"""
self._associated_egress_entry_id = value
@property
def associated_ingress_entry_id(self):
""" Get associated_ingress_entry_id value.
Notes:
In the draft mode, the ACL entry refers to this LiveEntity. In non-drafted mode, this is null.
This attribute is named `associatedIngressEntryID` in VSD API.
"""
return self._associated_ingress_entry_id
@associated_ingress_entry_id.setter
def associated_ingress_entry_id(self, value):
""" Set associated_ingress_entry_id value.
Notes:
In the draft mode, the ACL entry refers to this LiveEntity. In non-drafted mode, this is null.
This attribute is named `associatedIngressEntryID` in VSD API.
"""
self._associated_ingress_entry_id = value
@property
def associated_l7_application_signature_id(self):
""" Get associated_l7_application_signature_id value.
Notes:
The UUID of the associated L7 Application Signature
This attribute is named `associatedL7ApplicationSignatureID` in VSD API.
"""
return self._associated_l7_application_signature_id
@associated_l7_application_signature_id.setter
def associated_l7_application_signature_id(self, value):
""" Set associated_l7_application_signature_id value.
Notes:
The UUID of the associated L7 Application Signature
This attribute is named `associatedL7ApplicationSignatureID` in VSD API.
"""
self._associated_l7_application_signature_id = value
@property
def associated_live_entity_id(self):
""" Get associated_live_entity_id value.
Notes:
In the draft mode, the rule entry refers to this LiveEntity. In live mode, this is null.
This attribute is named `associatedLiveEntityID` in VSD API.
"""
return self._associated_live_entity_id
@associated_live_entity_id.setter
def associated_live_entity_id(self, value):
""" Set associated_live_entity_id value.
Notes:
In the draft mode, the rule entry refers to this LiveEntity. In live mode, this is null.
This attribute is named `associatedLiveEntityID` in VSD API.
"""
self._associated_live_entity_id = value
@property
def associated_live_template_id(self):
""" Get associated_live_template_id value.
Notes:
In the draft mode, the ACL entity refers to this live entity parent. In non-drafted mode, this is null
This attribute is named `associatedLiveTemplateID` in VSD API.
"""
return self._associated_live_template_id
@associated_live_template_id.setter
def associated_live_template_id(self, value):
""" Set associated_live_template_id value.
Notes:
In the draft mode, the ACL entity refers to this live entity parent. In non-drafted mode, this is null
This attribute is named `associatedLiveTemplateID` in VSD API.
"""
self._associated_live_template_id = value
@property
def associated_traffic_type(self):
""" Get associated_traffic_type value.
Notes:
This property reflects the type of traffic in case a rule entry is created using an Service or Service Group. In case a protocol and port are specified for the ACL entry, this property has to be empty (null). Supported values are L4_SERVICE, L4_SERVICE_GROUP and empty.
This attribute is named `associatedTrafficType` in VSD API.
"""
return self._associated_traffic_type
@associated_traffic_type.setter
def associated_traffic_type(self, value):
""" Set associated_traffic_type value.
Notes:
This property reflects the type of traffic in case a rule entry is created using an Service or Service Group. In case a protocol and port are specified for the ACL entry, this property has to be empty (null). Supported values are L4_SERVICE, L4_SERVICE_GROUP and empty.
This attribute is named `associatedTrafficType` in VSD API.
"""
self._associated_traffic_type = value
@property
def associated_traffic_type_id(self):
""" Get associated_traffic_type_id value.
Notes:
If a traffic type is specified as Service or Service Group, then the associated Id of Service / Service Group should be specifed here
This attribute is named `associatedTrafficTypeID` in VSD API.
"""
return self._associated_traffic_type_id
@associated_traffic_type_id.setter
def associated_traffic_type_id(self, value):
""" Set associated_traffic_type_id value.
Notes:
If a traffic type is specified as Service or Service Group, then the associated Id of Service / Service Group should be specifed here
This attribute is named `associatedTrafficTypeID` in VSD API.
"""
self._associated_traffic_type_id = value
@property
def stateful(self):
""" Get stateful value.
Notes:
True means that this ACL entry is stateful, so there will be a corresponding rule that will be created by OVS in the network. False means that there is no corresponding rule created by OVS in the network.
"""
return self._stateful
@stateful.setter
def stateful(self, value):
""" Set stateful value.
Notes:
True means that this ACL entry is stateful, so there will be a corresponding rule that will be created by OVS in the network. False means that there is no corresponding rule created by OVS in the network.
"""
self._stateful = value
@property
def stats_id(self):
""" Get stats_id value.
Notes:
The statsID that is created in the VSD and identifies this Rule Template Entry. This is auto-generated by VSD
This attribute is named `statsID` in VSD API.
"""
return self._stats_id
@stats_id.setter
def stats_id(self, value):
""" Set stats_id value.
Notes:
The statsID that is created in the VSD and identifies this Rule Template Entry. This is auto-generated by VSD
This attribute is named `statsID` in VSD API.
"""
self._stats_id = value
@property
def stats_logging_enabled(self):
""" Get stats_logging_enabled value.
Notes:
Is stats logging enabled for this particular template
This attribute is named `statsLoggingEnabled` in VSD API.
"""
return self._stats_logging_enabled
@stats_logging_enabled.setter
def stats_logging_enabled(self, value):
""" Set stats_logging_enabled value.
Notes:
Is stats logging enabled for this particular template
This attribute is named `statsLoggingEnabled` in VSD API.
"""
self._stats_logging_enabled = value
@property
def ether_type(self):
""" Get ether_type value.
Notes:
Ether type of the packet to be matched. etherType can be * or a valid hexadecimal value
This attribute is named `etherType` in VSD API.
"""
return self._ether_type
@ether_type.setter
def ether_type(self, value):
""" Set ether_type value.
Notes:
Ether type of the packet to be matched. etherType can be * or a valid hexadecimal value
This attribute is named `etherType` in VSD API.
"""
self._ether_type = value
@property
def overlay_mirror_destination_id(self):
""" Get overlay_mirror_destination_id value.
Notes:
ID of the overlay mirror destination
This attribute is named `overlayMirrorDestinationID` in VSD API.
"""
return self._overlay_mirror_destination_id
@overlay_mirror_destination_id.setter
def overlay_mirror_destination_id(self, value):
""" Set overlay_mirror_destination_id value.
Notes:
ID of the overlay mirror destination
This attribute is named `overlayMirrorDestinationID` in VSD API.
"""
self._overlay_mirror_destination_id = value
@property
def external_id(self):
""" Get external_id value.
Notes:
External object ID. Used for integration with third party systems
This attribute is named `externalID` in VSD API.
"""
return self._external_id
@external_id.setter
def external_id(self, value):
""" Set external_id value.
Notes:
External object ID. Used for integration with third party systems
This attribute is named `externalID` in VSD API.
"""
self._external_id = value
| bsd-3-clause |
jomolinare/kobocat | onadata/apps/stats/views.py | 3 | 1326 | from django.contrib.admin.views.decorators import staff_member_required
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.shortcuts import get_object_or_404
from django.shortcuts import render
from onadata.apps.logger.models import XForm
from onadata.apps.stats.utils import get_form_submissions_per_day
@login_required
def stats(request, username=None, id_string=None):
if id_string:
xform = get_object_or_404(
XForm, user=request.user, id_string__exact=id_string)
data = {
'xform': xform,
'context.submission_stats': get_form_submissions_per_day(xform)
}
else:
data = {'xforms': XForm.objects.filter(user=request.user)}
return render(request, 'form-stats.html', data)
@staff_member_required
def submissions(request):
stats = {}
stats['submission_count'] = {}
stats['submission_count']['total_submission_count'] = 0
users = User.objects.all()
for user in users:
stats['submission_count'][user.username] = 0
stats['submission_count'][user.username] += user.instances.count()
stats['submission_count'][
'total_submission_count'] += user.instances.count()
return render(request, "submissions.html", {'stats': stats})
| bsd-2-clause |
rangma/p2pool | p2pool/test/test_p2p.py | 269 | 2724 | import random
from twisted.internet import defer, endpoints, protocol, reactor
from twisted.trial import unittest
from p2pool import networks, p2p
from p2pool.bitcoin import data as bitcoin_data
from p2pool.util import deferral
class Test(unittest.TestCase):
@defer.inlineCallbacks
def test_sharereq(self):
class MyNode(p2p.Node):
def __init__(self, df):
p2p.Node.__init__(self, lambda: None, 29333, networks.nets['bitcoin'], {}, set([('127.0.0.1', 9333)]), 0, 0, 0, 0)
self.df = df
def handle_share_hashes(self, hashes, peer):
peer.get_shares(
hashes=[hashes[0]],
parents=5,
stops=[],
).chainDeferred(self.df)
df = defer.Deferred()
n = MyNode(df)
n.start()
try:
yield df
finally:
yield n.stop()
@defer.inlineCallbacks
def test_tx_limit(self):
class MyNode(p2p.Node):
def __init__(self, df):
p2p.Node.__init__(self, lambda: None, 29333, networks.nets['bitcoin'], {}, set([('127.0.0.1', 9333)]), 0, 0, 0, 0)
self.df = df
self.sent_time = 0
@defer.inlineCallbacks
def got_conn(self, conn):
p2p.Node.got_conn(self, conn)
yield deferral.sleep(.5)
new_mining_txs = dict(self.mining_txs_var.value)
for i in xrange(3):
huge_tx = dict(
version=0,
tx_ins=[],
tx_outs=[dict(
value=0,
script='x'*900000,
)],
lock_time=i,
)
new_mining_txs[bitcoin_data.hash256(bitcoin_data.tx_type.pack(huge_tx))] = huge_tx
self.mining_txs_var.set(new_mining_txs)
self.sent_time = reactor.seconds()
def lost_conn(self, conn, reason):
self.df.callback(None)
try:
p2p.Protocol.max_remembered_txs_size *= 10
df = defer.Deferred()
n = MyNode(df)
n.start()
yield df
if not (n.sent_time <= reactor.seconds() <= n.sent_time + 1):
raise ValueError('node did not disconnect within 1 seconds of receiving too much tx data')
yield n.stop()
finally:
p2p.Protocol.max_remembered_txs_size //= 10
| gpl-3.0 |
lshain-android-source/external-chromium_org | third_party/tlslite/tlslite/utils/keyfactory.py | 361 | 8791 | """Factory functions for asymmetric cryptography.
@sort: generateRSAKey, parseXMLKey, parsePEMKey, parseAsPublicKey,
parseAsPrivateKey
"""
from compat import *
from RSAKey import RSAKey
from Python_RSAKey import Python_RSAKey
import cryptomath
if cryptomath.m2cryptoLoaded:
from OpenSSL_RSAKey import OpenSSL_RSAKey
if cryptomath.pycryptoLoaded:
from PyCrypto_RSAKey import PyCrypto_RSAKey
# **************************************************************************
# Factory Functions for RSA Keys
# **************************************************************************
def generateRSAKey(bits, implementations=["openssl", "python"]):
"""Generate an RSA key with the specified bit length.
@type bits: int
@param bits: Desired bit length of the new key's modulus.
@rtype: L{tlslite.utils.RSAKey.RSAKey}
@return: A new RSA private key.
"""
for implementation in implementations:
if implementation == "openssl" and cryptomath.m2cryptoLoaded:
return OpenSSL_RSAKey.generate(bits)
elif implementation == "python":
return Python_RSAKey.generate(bits)
raise ValueError("No acceptable implementations")
def parseXMLKey(s, private=False, public=False, implementations=["python"]):
"""Parse an XML-format key.
The XML format used here is specific to tlslite and cryptoIDlib. The
format can store the public component of a key, or the public and
private components. For example::
<publicKey xmlns="http://trevp.net/rsa">
<n>4a5yzB8oGNlHo866CAspAC47M4Fvx58zwK8pou...
<e>Aw==</e>
</publicKey>
<privateKey xmlns="http://trevp.net/rsa">
<n>4a5yzB8oGNlHo866CAspAC47M4Fvx58zwK8pou...
<e>Aw==</e>
<d>JZ0TIgUxWXmL8KJ0VqyG1V0J3ern9pqIoB0xmy...
<p>5PreIj6z6ldIGL1V4+1C36dQFHNCQHJvW52GXc...
<q>/E/wDit8YXPCxx126zTq2ilQ3IcW54NJYyNjiZ...
<dP>mKc+wX8inDowEH45Qp4slRo1YveBgExKPROu6...
<dQ>qDVKtBz9lk0shL5PR3ickXDgkwS576zbl2ztB...
<qInv>j6E8EA7dNsTImaXexAmLA1DoeArsYeFAInr...
</privateKey>
@type s: str
@param s: A string containing an XML public or private key.
@type private: bool
@param private: If True, a L{SyntaxError} will be raised if the private
key component is not present.
@type public: bool
@param public: If True, the private key component (if present) will be
discarded, so this function will always return a public key.
@rtype: L{tlslite.utils.RSAKey.RSAKey}
@return: An RSA key.
@raise SyntaxError: If the key is not properly formatted.
"""
for implementation in implementations:
if implementation == "python":
key = Python_RSAKey.parseXML(s)
break
else:
raise ValueError("No acceptable implementations")
return _parseKeyHelper(key, private, public)
#Parse as an OpenSSL or Python key
def parsePEMKey(s, private=False, public=False, passwordCallback=None,
implementations=["openssl", "python"]):
"""Parse a PEM-format key.
The PEM format is used by OpenSSL and other tools. The
format is typically used to store both the public and private
components of a key. For example::
-----BEGIN RSA PRIVATE KEY-----
MIICXQIBAAKBgQDYscuoMzsGmW0pAYsmyHltxB2TdwHS0dImfjCMfaSDkfLdZY5+
dOWORVns9etWnr194mSGA1F0Pls/VJW8+cX9+3vtJV8zSdANPYUoQf0TP7VlJxkH
dSRkUbEoz5bAAs/+970uos7n7iXQIni+3erUTdYEk2iWnMBjTljfgbK/dQIDAQAB
AoGAJHoJZk75aKr7DSQNYIHuruOMdv5ZeDuJvKERWxTrVJqE32/xBKh42/IgqRrc
esBN9ZregRCd7YtxoL+EVUNWaJNVx2mNmezEznrc9zhcYUrgeaVdFO2yBF1889zO
gCOVwrO8uDgeyj6IKa25H6c1N13ih/o7ZzEgWbGG+ylU1yECQQDv4ZSJ4EjSh/Fl
aHdz3wbBa/HKGTjC8iRy476Cyg2Fm8MZUe9Yy3udOrb5ZnS2MTpIXt5AF3h2TfYV
VoFXIorjAkEA50FcJmzT8sNMrPaV8vn+9W2Lu4U7C+K/O2g1iXMaZms5PC5zV5aV
CKXZWUX1fq2RaOzlbQrpgiolhXpeh8FjxwJBAOFHzSQfSsTNfttp3KUpU0LbiVvv
i+spVSnA0O4rq79KpVNmK44Mq67hsW1P11QzrzTAQ6GVaUBRv0YS061td1kCQHnP
wtN2tboFR6lABkJDjxoGRvlSt4SOPr7zKGgrWjeiuTZLHXSAnCY+/hr5L9Q3ZwXG
6x6iBdgLjVIe4BZQNtcCQQDXGv/gWinCNTN3MPWfTW/RGzuMYVmyBFais0/VrgdH
h1dLpztmpQqfyH/zrBXQ9qL/zR4ojS6XYneO/U18WpEe
-----END RSA PRIVATE KEY-----
To generate a key like this with OpenSSL, run::
openssl genrsa 2048 > key.pem
This format also supports password-encrypted private keys. TLS
Lite can only handle password-encrypted private keys when OpenSSL
and M2Crypto are installed. In this case, passwordCallback will be
invoked to query the user for the password.
@type s: str
@param s: A string containing a PEM-encoded public or private key.
@type private: bool
@param private: If True, a L{SyntaxError} will be raised if the
private key component is not present.
@type public: bool
@param public: If True, the private key component (if present) will
be discarded, so this function will always return a public key.
@type passwordCallback: callable
@param passwordCallback: This function will be called, with no
arguments, if the PEM-encoded private key is password-encrypted.
The callback should return the password string. If the password is
incorrect, SyntaxError will be raised. If no callback is passed
and the key is password-encrypted, a prompt will be displayed at
the console.
@rtype: L{tlslite.utils.RSAKey.RSAKey}
@return: An RSA key.
@raise SyntaxError: If the key is not properly formatted.
"""
for implementation in implementations:
if implementation == "openssl" and cryptomath.m2cryptoLoaded:
key = OpenSSL_RSAKey.parse(s, passwordCallback)
break
elif implementation == "python":
key = Python_RSAKey.parsePEM(s)
break
else:
raise ValueError("No acceptable implementations")
return _parseKeyHelper(key, private, public)
def _parseKeyHelper(key, private, public):
if private:
if not key.hasPrivateKey():
raise SyntaxError("Not a private key!")
if public:
return _createPublicKey(key)
if private:
if hasattr(key, "d"):
return _createPrivateKey(key)
else:
return key
return key
def parseAsPublicKey(s):
"""Parse an XML or PEM-formatted public key.
@type s: str
@param s: A string containing an XML or PEM-encoded public or private key.
@rtype: L{tlslite.utils.RSAKey.RSAKey}
@return: An RSA public key.
@raise SyntaxError: If the key is not properly formatted.
"""
try:
return parsePEMKey(s, public=True)
except:
return parseXMLKey(s, public=True)
def parsePrivateKey(s):
"""Parse an XML or PEM-formatted private key.
@type s: str
@param s: A string containing an XML or PEM-encoded private key.
@rtype: L{tlslite.utils.RSAKey.RSAKey}
@return: An RSA private key.
@raise SyntaxError: If the key is not properly formatted.
"""
try:
return parsePEMKey(s, private=True)
except:
return parseXMLKey(s, private=True)
def _createPublicKey(key):
"""
Create a new public key. Discard any private component,
and return the most efficient key possible.
"""
if not isinstance(key, RSAKey):
raise AssertionError()
return _createPublicRSAKey(key.n, key.e)
def _createPrivateKey(key):
"""
Create a new private key. Return the most efficient key possible.
"""
if not isinstance(key, RSAKey):
raise AssertionError()
if not key.hasPrivateKey():
raise AssertionError()
return _createPrivateRSAKey(key.n, key.e, key.d, key.p, key.q, key.dP,
key.dQ, key.qInv)
def _createPublicRSAKey(n, e, implementations = ["openssl", "pycrypto",
"python"]):
for implementation in implementations:
if implementation == "openssl" and cryptomath.m2cryptoLoaded:
return OpenSSL_RSAKey(n, e)
elif implementation == "pycrypto" and cryptomath.pycryptoLoaded:
return PyCrypto_RSAKey(n, e)
elif implementation == "python":
return Python_RSAKey(n, e)
raise ValueError("No acceptable implementations")
def _createPrivateRSAKey(n, e, d, p, q, dP, dQ, qInv,
implementations = ["pycrypto", "python"]):
for implementation in implementations:
if implementation == "pycrypto" and cryptomath.pycryptoLoaded:
return PyCrypto_RSAKey(n, e, d, p, q, dP, dQ, qInv)
elif implementation == "python":
return Python_RSAKey(n, e, d, p, q, dP, dQ, qInv)
raise ValueError("No acceptable implementations")
| bsd-3-clause |
abergeron/pylearn2 | pylearn2/training_algorithms/learning_rule.py | 32 | 17648 | """
A module containing different learning rules for use with the SGD training
algorithm.
"""
import numpy as np
import warnings
from theano.compat import six
from theano import config
from theano import tensor as T
from pylearn2.compat import OrderedDict
from pylearn2.space import NullSpace
from pylearn2.train_extensions import TrainExtension
from pylearn2.utils import sharedX
from pylearn2.utils import wraps
from pylearn2.monitor import Monitor
class LearningRule():
"""
A pylearn2 learning rule is an object which computes new parameter values
given (1) a learning rate (2) current parameter values and (3) the current
estimated gradient.
"""
def add_channels_to_monitor(self, monitor, monitoring_dataset):
"""
Method called by the training algorithm, which allows LearningRules to
add monitoring channels.
Parameters
----------
monitor : pylearn2.monitor.Monitor
Monitor object, to which the rule should register additional
monitoring channels.
monitoring_dataset : pylearn2.datasets.dataset.Dataset or dict
Dataset instance or dictionary whose values are Dataset objects.
"""
pass
def get_updates(self, learning_rate, grads, lr_scalers=None):
"""
Provides the symbolic (theano) description of the updates needed to
perform this learning rule.
Parameters
----------
learning_rate : float
Learning rate coefficient.
grads : dict
A dictionary mapping from the model's parameters to their
gradients.
lr_scalers : dict
A dictionary mapping from the model's parameters to a learning
rate multiplier.
Returns
-------
updates : OrderdDict
A dictionary mapping from the old model parameters, to their new
values after a single iteration of the learning rule.
Notes
-----
e.g. for standard SGD, one would return `sgd_rule_updates` defined
below. Note that such a `LearningRule` object is not implemented, as
these updates are implemented by default when the `learning_rule`
parameter of sgd.SGD.__init__ is None.
.. code-block:: python
sgd_rule_updates = OrderedDict()
for (param, grad) in grads.iteritems():
sgd_rule_updates[k] = (param - learning_rate *
lr_scalers.get(param, 1.) * grad)
"""
raise NotImplementedError(str(type(self)) + " does not implement "
"get_updates.")
class Momentum(LearningRule):
"""
Implements momentum as described in Section 9 of
"A Practical Guide to Training Restricted Boltzmann Machines",
Geoffrey Hinton.
Parameters are updated by the formula:
inc := momentum * inc - learning_rate * d cost / d param
param := param + inc
Parameters
----------
init_momentum : float
Initial value for the momentum coefficient. It remains fixed during
training unless used with a `MomentumAdjustor`
extension.
nesterov_momentum: bool
Use the accelerated momentum technique described in:
"Advances in Optimizing Recurrent Networks", Yoshua Bengio, et al.
"""
def __init__(self, init_momentum, nesterov_momentum=False):
assert init_momentum >= 0.
assert init_momentum < 1.
self.momentum = sharedX(init_momentum, 'momentum')
self.nesterov_momentum = nesterov_momentum
def add_channels_to_monitor(self, monitor, monitoring_dataset):
"""
Activates monitoring of the momentum.
Parameters
----------
monitor : pylearn2.monitor.Monitor
Monitor object, to which the rule should register additional
monitoring channels.
monitoring_dataset : pylearn2.datasets.dataset.Dataset or dict
Dataset instance or dictionary whose values are Dataset objects.
"""
monitor.add_channel(
name='momentum',
ipt=None,
val=self.momentum,
data_specs=(NullSpace(), ''),
dataset=monitoring_dataset)
def get_updates(self, learning_rate, grads, lr_scalers=None):
"""
Provides the updates for learning with gradient descent + momentum.
Parameters
----------
learning_rate : float
Learning rate coefficient.
grads : dict
A dictionary mapping from the model's parameters to their
gradients.
lr_scalers : dict
A dictionary mapping from the model's parameters to a learning
rate multiplier.
"""
updates = OrderedDict()
for (param, grad) in six.iteritems(grads):
vel = sharedX(param.get_value() * 0.)
assert param.dtype == vel.dtype
assert grad.dtype == param.dtype
if param.name is not None:
vel.name = 'vel_' + param.name
scaled_lr = learning_rate * lr_scalers.get(param, 1.)
updates[vel] = self.momentum * vel - scaled_lr * grad
inc = updates[vel]
if self.nesterov_momentum:
inc = self.momentum * inc - scaled_lr * grad
assert inc.dtype == vel.dtype
updates[param] = param + inc
return updates
class MomentumAdjustor(TrainExtension):
"""
A TrainExtension that implements a linear momentum schedule.
Parameters
----------
final_momentum : float
The momentum coefficient to use at the end of learning.
start : int
The epoch on which to start growing the momentum coefficient.
saturate : int
The epoch on which the moment should reach its final value.
"""
def __init__(self, final_momentum, start, saturate):
if saturate < start:
raise TypeError("Momentum can't saturate at its maximum value " +
"before it starts increasing.")
self.__dict__.update(locals())
del self.self
self._initialized = False
self._count = 0
def setup(self, model, dataset, algorithm):
"""
Initializes the momentum schedule based on epochs_seen.
Parameters
----------
model : pylearn2.models.Model
The model to which the training algorithm is applied.
dataset : pylearn2.datasets.Dataset
The dataset to which the model is applied.
algorithm : pylearn2.training_algorithms.TrainingAlgorithm
Describes how gradients should be updated.
"""
monitor = Monitor.get_monitor(model)
self._count = monitor.get_epochs_seen()
self._apply_momentum(algorithm)
def on_monitor(self, model, dataset, algorithm):
"""
Updates the momentum according to the linear schedule.
Parameters
----------
model : pylearn2.models.Model
The model to which the training algorithm is applied.
dataset : pylearn2.datasets.Dataset
The dataset to which the model is applied.
algorithm : pylearn2.training_algorithms.TrainingAlgorithm
Describes how gradients should be updated.
"""
self._count += 1
self._apply_momentum(algorithm)
def _apply_momentum(self, algorithm):
"""Updates the momentum on algorithm based on the epochs elapsed."""
if not hasattr(algorithm, 'learning_rule'):
raise ValueError(
'For MomentumAdjustor to work, you need to use a '
'TrainingAlgorithm that supports learning rules '
'(for instance, SGD), and specify a learning_rule '
'(for instance, Momentum) for that training algorithm.')
momentum = algorithm.learning_rule.momentum
if not self._initialized:
self._init_momentum = momentum.get_value()
self._initialized = True
momentum.set_value(np.cast[config.floatX](self.current_momentum()))
def current_momentum(self):
"""Returns the momentum currently desired by the schedule."""
w = self.saturate - self.start
if w == 0:
# saturate=start, so just jump straight to final momentum
if self._count >= self.start:
return self.final_momentum
return self._init_momentum
alpha = float(self._count - self.start) / float(w)
if alpha < 0.:
alpha = 0.
if alpha > 1.:
alpha = 1.
return self._init_momentum * (1 - alpha) + alpha * self.final_momentum
class AdaDelta(LearningRule):
"""
Implements the AdaDelta learning rule as described in:
"AdaDelta: An Adaptive Learning Rate Method", Matthew D. Zeiler.
Parameters
----------
decay : float, optional
Decay rate :math:`\\rho` in Algorithm 1 of the aforementioned
paper.
"""
def __init__(self, decay=0.95):
assert decay >= 0.
assert decay < 1.
self.decay = decay
def get_updates(self, learning_rate, grads, lr_scalers=None):
"""
Compute the AdaDelta updates
Parameters
----------
learning_rate : float
Learning rate coefficient.
grads : dict
A dictionary mapping from the model's parameters to their
gradients.
lr_scalers : dict
A dictionary mapping from the model's parameters to a learning
rate multiplier.
"""
updates = OrderedDict()
for param in grads.keys():
# mean_squared_grad := E[g^2]_{t-1}
mean_square_grad = sharedX(param.get_value() * 0.)
# mean_square_dx := E[(\Delta x)^2]_{t-1}
mean_square_dx = sharedX(param.get_value() * 0.)
if param.name is not None:
mean_square_grad.name = 'mean_square_grad_' + param.name
mean_square_dx.name = 'mean_square_dx_' + param.name
# Accumulate gradient
new_mean_squared_grad = (
self.decay * mean_square_grad +
(1 - self.decay) * T.sqr(grads[param])
)
# Compute update
epsilon = lr_scalers.get(param, 1.) * learning_rate
rms_dx_tm1 = T.sqrt(mean_square_dx + epsilon)
rms_grad_t = T.sqrt(new_mean_squared_grad + epsilon)
delta_x_t = - rms_dx_tm1 / rms_grad_t * grads[param]
# Accumulate updates
new_mean_square_dx = (
self.decay * mean_square_dx +
(1 - self.decay) * T.sqr(delta_x_t)
)
# Apply update
updates[mean_square_grad] = new_mean_squared_grad
updates[mean_square_dx] = new_mean_square_dx
updates[param] = param + delta_x_t
return updates
class AdaGrad(LearningRule):
"""
Implements the AdaGrad learning rule as described in:
"Adaptive subgradient methods for online learning and
stochastic optimization", Duchi J, Hazan E, Singer Y.
Parameters
----------
max_scaling: float, optional
Restrict the gradient scaling coefficient to values
below `max_scaling`. This prevents corner cases (like all-zero weights)
to generate NaNs (see #1496).
"""
def __init__(self, max_scaling=1e5):
assert max_scaling > 0
self.eps = 1. / max_scaling
def get_updates(self, learning_rate, grads, lr_scalers=None):
"""
Compute the AdaGrad updates
Parameters
----------
learning_rate : float
Learning rate coefficient.
grads : dict
A dictionary mapping from the model's parameters to their
gradients.
lr_scalers : dict
A dictionary mapping from the model's parameters to a learning
rate multiplier.
"""
updates = OrderedDict()
for param in grads.keys():
# sum_square_grad := \sum g^2
sum_square_grad = sharedX(param.get_value() * 0.)
if param.name is not None:
sum_square_grad.name = 'sum_square_grad_' + param.name
# Accumulate gradient
new_sum_squared_grad = (
sum_square_grad + T.sqr(grads[param])
)
# Compute update
epsilon = lr_scalers.get(param, 1.) * learning_rate
scale = T.maximum(self.eps, T.sqrt(new_sum_squared_grad))
delta_x_t = (-epsilon / scale * grads[param])
# Apply update
updates[sum_square_grad] = new_sum_squared_grad
updates[param] = param + delta_x_t
return updates
class RMSProp(LearningRule):
"""
Implements the RMSProp learning rule.
The RMSProp learning rule is described by Hinton in `lecture 6
<http://www.cs.toronto.edu/~tijmen/csc321/slides/lecture_slides_lec6.pdf>`
of the Coursera Neural Networks for Machine Learning course.
In short, Hinton suggests "[the] magnitude of the gradient can be very
different for different weights and can change during learning. This
makes it hard to choose a global learning rate." RMSProp solves this
problem by "[dividing] the learning rate for a weight by a running
average of the magnitudes of recent gradients for that weight."
Parameters
----------
decay : float, optional
Decay constant similar to that used in AdaDelta and Momentum methods.
max_scaling: float, optional
Restrict the RMSProp gradient scaling coefficient to values
below `max_scaling`.
Notes
-----
An instance of this LearningRule should only be used with one
TrainingAlgorithm, and its get_updates method should be called
only once. This is required in order to make the monitoring
channels correctly report the moving averages.
"""
def __init__(self, decay=0.9, max_scaling=1e5):
assert 0. <= decay < 1.
assert max_scaling > 0
self.decay = sharedX(decay, 'decay')
self.epsilon = 1. / max_scaling
self.mean_square_grads = OrderedDict()
@wraps(LearningRule.add_channels_to_monitor)
def add_channels_to_monitor(self, monitor, monitoring_dataset):
"""
The channels added are the min, mean, and max of the
mean_square_grad of each parameter.
"""
channel_mapping = {
'_min': T.min,
'_max': T.max,
'_mean': T.mean
}
for mean_square_grad in self.mean_square_grads.values():
for suffix, op in channel_mapping.items():
monitor.add_channel(
name=(mean_square_grad.name + suffix),
ipt=None,
val=op(mean_square_grad),
data_specs=(NullSpace(), ''),
dataset=monitoring_dataset)
return
def get_updates(self, learning_rate, grads, lr_scalers=None):
"""
Provides the symbolic (theano) description of the updates needed to
perform this learning rule. See Notes for side-effects.
Parameters
----------
learning_rate : float
Learning rate coefficient.
grads : dict
A dictionary mapping from the model's parameters to their
gradients.
lr_scalers : dict
A dictionary mapping from the model's parameters to a learning
rate multiplier.
Returns
-------
updates : OrderdDict
A dictionary mapping from the old model parameters, to their new
values after a single iteration of the learning rule.
Notes
-----
This method has the side effect of storing the moving average
of the square gradient in `self.mean_square_grads`. This is
necessary in order for the monitoring channels to be able
to track the value of these moving averages.
Therefore, this method should only get called once for each
instance of RMSProp.
"""
updates = OrderedDict()
for param in grads:
# mean_squared_grad := E[g^2]_{t-1}
mean_square_grad = sharedX(param.get_value() * 0.)
if param.name is None:
raise ValueError("Model parameters must be named.")
mean_square_grad.name = 'mean_square_grad_' + param.name
if param.name in self.mean_square_grads:
warnings.warn("Calling get_updates more than once on the "
"gradients of `%s` may make monitored values "
"incorrect." % param.name)
# Store variable in self.mean_square_grads for monitoring.
self.mean_square_grads[param.name] = mean_square_grad
# Accumulate gradient
new_mean_squared_grad = (self.decay * mean_square_grad +
(1 - self.decay) * T.sqr(grads[param]))
# Compute update
scaled_lr = lr_scalers.get(param, 1.) * learning_rate
rms_grad_t = T.sqrt(new_mean_squared_grad)
rms_grad_t = T.maximum(rms_grad_t, self.epsilon)
delta_x_t = - scaled_lr * grads[param] / rms_grad_t
# Apply update
updates[mean_square_grad] = new_mean_squared_grad
updates[param] = param + delta_x_t
return updates
| bsd-3-clause |
optima-ict/odoo | openerp/addons/base/module/module.py | 11 | 38925 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from docutils import nodes
from docutils.core import publish_string
from docutils.transforms import Transform, writer_aux
from docutils.writers.html4css1 import Writer
import importlib
import logging
from operator import attrgetter
import os
import re
import shutil
import tempfile
import urllib
import urllib2
import urlparse
import zipfile
import zipimport
import lxml.html
from openerp.exceptions import UserError
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO # NOQA
import openerp
import openerp.exceptions
from openerp import modules, tools
from openerp.modules.db import create_categories
from openerp.modules import get_module_resource
from openerp.tools import ormcache
from openerp.tools.parse_version import parse_version
from openerp.tools.translate import _
from openerp.tools import html_sanitize
from openerp.osv import osv, orm, fields
from openerp import api, fields as fields2
_logger = logging.getLogger(__name__)
ACTION_DICT = {
'view_type': 'form',
'view_mode': 'form',
'res_model': 'base.module.upgrade',
'target': 'new',
'type': 'ir.actions.act_window',
}
def backup(path, raise_exception=True):
path = os.path.normpath(path)
if not os.path.exists(path):
if not raise_exception:
return None
raise OSError('path does not exists')
cnt = 1
while True:
bck = '%s~%d' % (path, cnt)
if not os.path.exists(bck):
shutil.move(path, bck)
return bck
cnt += 1
class module_category(osv.osv):
_name = "ir.module.category"
_description = "Application"
def _module_nbr(self, cr, uid, ids, prop, unknow_none, context):
cr.execute('SELECT category_id, COUNT(*) \
FROM ir_module_module \
WHERE category_id IN %(ids)s \
OR category_id IN (SELECT id \
FROM ir_module_category \
WHERE parent_id IN %(ids)s) \
GROUP BY category_id', {'ids': tuple(ids)}
)
result = dict(cr.fetchall())
for id in ids:
cr.execute('select id from ir_module_category where parent_id=%s', (id,))
result[id] = sum([result.get(c, 0) for (c,) in cr.fetchall()],
result.get(id, 0))
return result
_columns = {
'name': fields.char("Name", required=True, translate=True, select=True),
'parent_id': fields.many2one('ir.module.category', 'Parent Application', select=True),
'child_ids': fields.one2many('ir.module.category', 'parent_id', 'Child Applications'),
'module_nr': fields.function(_module_nbr, string='Number of Apps', type='integer'),
'module_ids': fields.one2many('ir.module.module', 'category_id', 'Modules'),
'description': fields.text("Description", translate=True),
'sequence': fields.integer('Sequence'),
'visible': fields.boolean('Visible'),
'xml_id': fields.function(osv.osv.get_external_id, type='char', string="External ID"),
}
_order = 'name'
_defaults = {
'visible': 1,
}
class MyFilterMessages(Transform):
"""
Custom docutils transform to remove `system message` for a document and
generate warnings.
(The standard filter removes them based on some `report_level` passed in
the `settings_override` dictionary, but if we use it, we can't see them
and generate warnings.)
"""
default_priority = 870
def apply(self):
for node in self.document.traverse(nodes.system_message):
_logger.warning("docutils' system message present: %s", str(node))
node.parent.remove(node)
class MyWriter(Writer):
"""
Custom docutils html4ccs1 writer that doesn't add the warnings to the
output document.
"""
def get_transforms(self):
return [MyFilterMessages, writer_aux.Admonitions]
class module(osv.osv):
_name = "ir.module.module"
_rec_name = "shortdesc"
_description = "Module"
def fields_view_get(self, cr, uid, view_id=None, view_type='form', context=None, toolbar=False, submenu=False):
res = super(module, self).fields_view_get(cr, uid, view_id=view_id, view_type=view_type, context=context, toolbar=toolbar, submenu=False)
result = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'base', 'action_server_module_immediate_install')[1]
if view_type == 'form':
if res.get('toolbar',False):
list = [rec for rec in res['toolbar']['action'] if rec.get('id', False) != result]
res['toolbar'] = {'action': list}
return res
@classmethod
def get_module_info(cls, name):
info = {}
try:
info = modules.load_information_from_description_file(name)
except Exception:
_logger.debug('Error when trying to fetch informations for '
'module %s', name, exc_info=True)
return info
def _get_desc(self, cr, uid, ids, field_name=None, arg=None, context=None):
res = dict.fromkeys(ids, '')
for module in self.browse(cr, uid, ids, context=context):
path = get_module_resource(module.name, 'static/description/index.html')
if path:
with tools.file_open(path, 'rb') as desc_file:
doc = desc_file.read()
html = lxml.html.document_fromstring(doc)
for element, attribute, link, pos in html.iterlinks():
if element.get('src') and not '//' in element.get('src') and not 'static/' in element.get('src'):
element.set('src', "/%s/static/description/%s" % (module.name, element.get('src')))
res[module.id] = html_sanitize(lxml.html.tostring(html))
else:
overrides = {
'embed_stylesheet': False,
'doctitle_xform': False,
'output_encoding': 'unicode',
'xml_declaration': False,
}
output = publish_string(source=module.description or '', settings_overrides=overrides, writer=MyWriter())
res[module.id] = html_sanitize(output)
return res
def _get_latest_version(self, cr, uid, ids, field_name=None, arg=None, context=None):
default_version = modules.adapt_version('1.0')
res = dict.fromkeys(ids, default_version)
for m in self.browse(cr, uid, ids):
res[m.id] = self.get_module_info(m.name).get('version', default_version)
return res
def _get_views(self, cr, uid, ids, field_name=None, arg=None, context=None):
res = {}
model_data_obj = self.pool.get('ir.model.data')
dmodels = []
if field_name is None or 'views_by_module' in field_name:
dmodels.append('ir.ui.view')
if field_name is None or 'reports_by_module' in field_name:
dmodels.append('ir.actions.report.xml')
if field_name is None or 'menus_by_module' in field_name:
dmodels.append('ir.ui.menu')
assert dmodels, "no models for %s" % field_name
for module_rec in self.browse(cr, uid, ids, context=context):
res_mod_dic = res[module_rec.id] = {
'menus_by_module': [],
'reports_by_module': [],
'views_by_module': []
}
# Skip uninstalled modules below, no data to find anyway.
if module_rec.state not in ('installed', 'to upgrade', 'to remove'):
continue
# then, search and group ir.model.data records
imd_models = dict([(m, []) for m in dmodels])
imd_ids = model_data_obj.search(cr, uid, [
('module', '=', module_rec.name),
('model', 'in', tuple(dmodels))
])
for imd_res in model_data_obj.read(cr, uid, imd_ids, ['model', 'res_id'], context=context):
imd_models[imd_res['model']].append(imd_res['res_id'])
def browse(model):
M = self.pool[model]
# as this method is called before the module update, some xmlid may be invalid at this stage
# explictly filter records before reading them
ids = M.exists(cr, uid, imd_models.get(model, []), context)
return M.browse(cr, uid, ids, context)
def format_view(v):
aa = v.inherit_id and '* INHERIT ' or ''
return '%s%s (%s)' % (aa, v.name, v.type)
res_mod_dic['views_by_module'] = map(format_view, browse('ir.ui.view'))
res_mod_dic['reports_by_module'] = map(attrgetter('name'), browse('ir.actions.report.xml'))
res_mod_dic['menus_by_module'] = map(attrgetter('complete_name'), browse('ir.ui.menu'))
for key in res.iterkeys():
for k, v in res[key].iteritems():
res[key][k] = "\n".join(sorted(v))
return res
def _get_icon_image(self, cr, uid, ids, field_name=None, arg=None, context=None):
res = dict.fromkeys(ids, '')
for module in self.browse(cr, uid, ids, context=context):
path = get_module_resource(module.name, 'static', 'description', 'icon.png')
if path:
image_file = tools.file_open(path, 'rb')
try:
res[module.id] = image_file.read().encode('base64')
finally:
image_file.close()
return res
_columns = {
'name': fields.char("Technical Name", readonly=True, required=True, select=True),
'category_id': fields.many2one('ir.module.category', 'Category', readonly=True, select=True),
'shortdesc': fields.char('Module Name', readonly=True, translate=True),
'summary': fields.char('Summary', readonly=True, translate=True),
'description': fields.text("Description", readonly=True, translate=True),
'description_html': fields.function(_get_desc, string='Description HTML', type='html', method=True, readonly=True),
'author': fields.char("Author", readonly=True),
'maintainer': fields.char('Maintainer', readonly=True),
'contributors': fields.text('Contributors', readonly=True),
'website': fields.char("Website", readonly=True),
# attention: Incorrect field names !!
# installed_version refers the latest version (the one on disk)
# latest_version refers the installed version (the one in database)
# published_version refers the version available on the repository
'installed_version': fields.function(_get_latest_version, string='Latest Version', type='char'),
'latest_version': fields.char('Installed Version', readonly=True),
'published_version': fields.char('Published Version', readonly=True),
'url': fields.char('URL', readonly=True),
'sequence': fields.integer('Sequence'),
'dependencies_id': fields.one2many('ir.module.module.dependency', 'module_id', 'Dependencies', readonly=True),
'auto_install': fields.boolean('Automatic Installation',
help='An auto-installable module is automatically installed by the '
'system when all its dependencies are satisfied. '
'If the module has no dependency, it is always installed.'),
'state': fields.selection([
('uninstallable', 'Not Installable'),
('uninstalled', 'Not Installed'),
('installed', 'Installed'),
('to upgrade', 'To be upgraded'),
('to remove', 'To be removed'),
('to install', 'To be installed')
], string='Status', readonly=True, select=True),
'demo': fields.boolean('Demo Data', readonly=True),
'license': fields.selection([
('GPL-2', 'GPL Version 2'),
('GPL-2 or any later version', 'GPL-2 or later version'),
('GPL-3', 'GPL Version 3'),
('GPL-3 or any later version', 'GPL-3 or later version'),
('AGPL-3', 'Affero GPL-3'),
('LGPL-3', 'LGPL Version 3'),
('Other OSI approved licence', 'Other OSI Approved Licence'),
('OEEL-1', 'Odoo Enterprise Edition License v1.0'),
('Other proprietary', 'Other Proprietary')
], string='License', readonly=True),
'menus_by_module': fields.function(_get_views, string='Menus', type='text', multi="meta", store=True),
'reports_by_module': fields.function(_get_views, string='Reports', type='text', multi="meta", store=True),
'views_by_module': fields.function(_get_views, string='Views', type='text', multi="meta", store=True),
'application': fields.boolean('Application', readonly=True),
'icon': fields.char('Icon URL'),
'icon_image': fields.function(_get_icon_image, string='Icon', type="binary"),
}
_defaults = {
'state': 'uninstalled',
'sequence': 100,
'demo': False,
'license': 'LGPL-3',
}
_order = 'sequence,name'
def _name_uniq_msg(self, cr, uid, ids, context=None):
return _('The name of the module must be unique !')
_sql_constraints = [
('name_uniq', 'UNIQUE (name)', _name_uniq_msg),
]
def unlink(self, cr, uid, ids, context=None):
if not ids:
return True
if isinstance(ids, (int, long)):
ids = [ids]
mod_names = []
for mod in self.read(cr, uid, ids, ['state', 'name'], context):
if mod['state'] in ('installed', 'to upgrade', 'to remove', 'to install'):
raise UserError(_('You try to remove a module that is installed or will be installed'))
mod_names.append(mod['name'])
#Removing the entry from ir_model_data
#ids_meta = self.pool.get('ir.model.data').search(cr, uid, [('name', '=', 'module_meta_information'), ('module', 'in', mod_names)])
#if ids_meta:
# self.pool.get('ir.model.data').unlink(cr, uid, ids_meta, context)
self.clear_caches()
return super(module, self).unlink(cr, uid, ids, context=context)
@staticmethod
def _check_external_dependencies(terp):
depends = terp.get('external_dependencies')
if not depends:
return
for pydep in depends.get('python', []):
try:
importlib.import_module(pydep)
except ImportError:
raise ImportError('No module named %s' % (pydep,))
for binary in depends.get('bin', []):
try:
tools.find_in_path(binary)
except IOError:
raise Exception('Unable to find %r in path' % (binary,))
@classmethod
def check_external_dependencies(cls, module_name, newstate='to install'):
terp = cls.get_module_info(module_name)
try:
cls._check_external_dependencies(terp)
except Exception, e:
if newstate == 'to install':
msg = _('Unable to install module "%s" because an external dependency is not met: %s')
elif newstate == 'to upgrade':
msg = _('Unable to upgrade module "%s" because an external dependency is not met: %s')
else:
msg = _('Unable to process module "%s" because an external dependency is not met: %s')
raise UserError(msg % (module_name, e.args[0]))
@api.multi
def state_update(self, newstate, states_to_update, level=100):
if level < 1:
raise UserError(_('Recursion error in modules dependencies !'))
# whether some modules are installed with demo data
demo = False
for module in self:
# determine dependency modules to update/others
update_mods, ready_mods = self.browse(), self.browse()
for dep in module.dependencies_id:
if dep.state == 'unknown':
raise UserError(_("You try to install module '%s' that depends on module '%s'.\nBut the latter module is not available in your system.") % (module.name, dep.name,))
if dep.depend_id.state == newstate:
ready_mods += dep.depend_id
else:
update_mods += dep.depend_id
# update dependency modules that require it, and determine demo for module
update_demo = update_mods.state_update(newstate, states_to_update, level=level-1)
module_demo = module.demo or update_demo or any(mod.demo for mod in ready_mods)
demo = demo or module_demo
# check dependencies and update module itself
self.check_external_dependencies(module.name, newstate)
if module.state in states_to_update:
module.write({'state': newstate, 'demo': module_demo})
return demo
@api.multi
def button_install(self):
# domain to select auto-installable (but not yet installed) modules
auto_domain = [('state', '=', 'uninstalled'), ('auto_install', '=', True)]
# determine whether an auto-install module must be installed:
# - all its dependencies are installed or to be installed,
# - at least one dependency is 'to install'
install_states = frozenset(('installed', 'to install', 'to upgrade'))
def must_install(module):
states = set(dep.state for dep in module.dependencies_id)
return states <= install_states and 'to install' in states
modules = self
while modules:
# Mark the given modules and their dependencies to be installed.
modules.state_update('to install', ['uninstalled'])
# Determine which auto-installable modules must be installed.
modules = self.search(auto_domain).filtered(must_install)
# retrieve the installed (or to be installed) theme modules
theme_category = self.env.ref('base.module_category_theme')
theme_modules = self.search([
('state', 'in', list(install_states)),
('category_id', 'child_of', [theme_category.id]),
])
# determine all theme modules that mods depends on, including mods
def theme_deps(mods):
deps = mods.mapped('dependencies_id.depend_id')
while deps:
mods |= deps
deps = deps.mapped('dependencies_id.depend_id')
return mods & theme_modules
if any(module.state == 'to install' for module in theme_modules):
# check: the installation is valid if all installed theme modules
# correspond to one theme module and all its theme dependencies
if not any(theme_deps(module) == theme_modules for module in theme_modules):
state_labels = dict(self.fields_get(['state'])['state']['selection'])
themes_list = [
"- %s (%s)" % (module.shortdesc, state_labels[module.state])
for module in theme_modules
]
raise UserError(_(
"You are trying to install incompatible themes:\n%s\n\n" \
"Please uninstall your current theme before installing another one.\n"
"Warning: switching themes may significantly alter the look of your current website pages!"
) % ("\n".join(themes_list)))
return dict(ACTION_DICT, name=_('Install'))
def button_immediate_install(self, cr, uid, ids, context=None):
""" Installs the selected module(s) immediately and fully,
returns the next res.config action to execute
:param ids: identifiers of the modules to install
:returns: next res.config item to execute
:rtype: dict[str, object]
"""
return self._button_immediate_function(cr, uid, ids, self.button_install, context=context)
def button_install_cancel(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {'state': 'uninstalled', 'demo': False})
return True
def module_uninstall(self, cr, uid, ids, context=None):
"""Perform the various steps required to uninstall a module completely
including the deletion of all database structures created by the module:
tables, columns, constraints, etc."""
ir_model_data = self.pool.get('ir.model.data')
modules_to_remove = [m.name for m in self.browse(cr, uid, ids, context)]
ir_model_data._module_data_uninstall(cr, uid, modules_to_remove, context)
self.write(cr, uid, ids, {'state': 'uninstalled', 'latest_version': False})
return True
def downstream_dependencies(self, cr, uid, ids, known_dep_ids=None,
exclude_states=['uninstalled', 'uninstallable', 'to remove'],
context=None):
"""Return the ids of all modules that directly or indirectly depend
on the given module `ids`, and that satisfy the `exclude_states`
filter"""
if not ids:
return []
known_dep_ids = set(known_dep_ids or [])
cr.execute('''SELECT DISTINCT m.id
FROM
ir_module_module_dependency d
JOIN
ir_module_module m ON (d.module_id=m.id)
WHERE
d.name IN (SELECT name from ir_module_module where id in %s) AND
m.state NOT IN %s AND
m.id NOT IN %s ''',
(tuple(ids), tuple(exclude_states), tuple(known_dep_ids or ids)))
new_dep_ids = set([m[0] for m in cr.fetchall()])
missing_mod_ids = new_dep_ids - known_dep_ids
known_dep_ids |= new_dep_ids
if missing_mod_ids:
known_dep_ids |= set(self.downstream_dependencies(cr, uid, list(missing_mod_ids),
known_dep_ids, exclude_states, context))
return list(known_dep_ids)
def upstream_dependencies(self, cr, uid, ids, known_dep_ids=None,
exclude_states=['installed', 'uninstallable', 'to remove'],
context=None):
""" Return the dependency tree of modules of the given `ids`, and that
satisfy the `exclude_states` filter """
if not ids:
return []
known_dep_ids = set(known_dep_ids or [])
cr.execute('''SELECT DISTINCT m.id
FROM
ir_module_module_dependency d
JOIN
ir_module_module m ON (d.module_id=m.id)
WHERE
m.name IN (SELECT name from ir_module_module_dependency where module_id in %s) AND
m.state NOT IN %s AND
m.id NOT IN %s ''',
(tuple(ids), tuple(exclude_states), tuple(known_dep_ids or ids)))
new_dep_ids = set([m[0] for m in cr.fetchall()])
missing_mod_ids = new_dep_ids - known_dep_ids
known_dep_ids |= new_dep_ids
if missing_mod_ids:
known_dep_ids |= set(self.upstream_dependencies(cr, uid, list(missing_mod_ids),
known_dep_ids, exclude_states, context))
return list(known_dep_ids)
def _button_immediate_function(self, cr, uid, ids, function, context=None):
function(cr, uid, ids, context=context)
cr.commit()
api.Environment.reset()
registry = openerp.modules.registry.RegistryManager.new(cr.dbname, update_module=True)
config = registry['res.config'].next(cr, uid, [], context=context) or {}
if config.get('type') not in ('ir.actions.act_window_close',):
return config
# reload the client; open the first available root menu
menu_obj = registry['ir.ui.menu']
menu_ids = menu_obj.search(cr, uid, [('parent_id', '=', False)], context=context)
return {
'type': 'ir.actions.client',
'tag': 'reload',
'params': {'menu_id': menu_ids and menu_ids[0] or False}
}
#TODO remove me in master, not called anymore
def button_immediate_uninstall(self, cr, uid, ids, context=None):
"""
Uninstall the selected module(s) immediately and fully,
returns the next res.config action to execute
"""
return self._button_immediate_function(cr, uid, ids, self.button_uninstall, context=context)
def button_uninstall(self, cr, uid, ids, context=None):
if any(m.name == 'base' for m in self.browse(cr, uid, ids, context=context)):
raise UserError(_("The `base` module cannot be uninstalled"))
dep_ids = self.downstream_dependencies(cr, uid, ids, context=context)
self.write(cr, uid, ids + dep_ids, {'state': 'to remove'})
return dict(ACTION_DICT, name=_('Uninstall'))
def button_uninstall_cancel(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {'state': 'installed'})
return True
def button_immediate_upgrade(self, cr, uid, ids, context=None):
"""
Upgrade the selected module(s) immediately and fully,
return the next res.config action to execute
"""
return self._button_immediate_function(cr, uid, ids, self.button_upgrade, context=context)
def button_upgrade(self, cr, uid, ids, context=None):
depobj = self.pool.get('ir.module.module.dependency')
todo = list(self.browse(cr, uid, ids, context=context))
self.update_list(cr, uid)
i = 0
while i < len(todo):
mod = todo[i]
i += 1
if mod.state not in ('installed', 'to upgrade'):
raise UserError(_("Can not upgrade module '%s'. It is not installed.") % (mod.name,))
self.check_external_dependencies(mod.name, 'to upgrade')
iids = depobj.search(cr, uid, [('name', '=', mod.name)], context=context)
for dep in depobj.browse(cr, uid, iids, context=context):
if dep.module_id.state == 'installed' and dep.module_id not in todo:
todo.append(dep.module_id)
ids = map(lambda x: x.id, todo)
self.write(cr, uid, ids, {'state': 'to upgrade'}, context=context)
to_install = []
for mod in todo:
for dep in mod.dependencies_id:
if dep.state == 'unknown':
raise UserError(_('You try to upgrade a module that depends on the module: %s.\nBut this module is not available in your system.') % (dep.name,))
if dep.state == 'uninstalled':
ids2 = self.search(cr, uid, [('name', '=', dep.name)])
to_install.extend(ids2)
self.button_install(cr, uid, to_install, context=context)
return dict(ACTION_DICT, name=_('Apply Schedule Upgrade'))
def button_upgrade_cancel(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {'state': 'installed'})
return True
@staticmethod
def get_values_from_terp(terp):
return {
'description': terp.get('description', ''),
'shortdesc': terp.get('name', ''),
'author': terp.get('author', 'Unknown'),
'maintainer': terp.get('maintainer', False),
'contributors': ', '.join(terp.get('contributors', [])) or False,
'website': terp.get('website', ''),
'license': terp.get('license', 'LGPL-3'),
'sequence': terp.get('sequence', 100),
'application': terp.get('application', False),
'auto_install': terp.get('auto_install', False),
'icon': terp.get('icon', False),
'summary': terp.get('summary', ''),
}
def create(self, cr, uid, vals, context=None):
new_id = super(module, self).create(cr, uid, vals, context=context)
module_metadata = {
'name': 'module_%s' % vals['name'],
'model': 'ir.module.module',
'module': 'base',
'res_id': new_id,
'noupdate': True,
}
self.pool['ir.model.data'].create(cr, uid, module_metadata)
return new_id
# update the list of available packages
def update_list(self, cr, uid, context=None):
res = [0, 0] # [update, add]
default_version = modules.adapt_version('1.0')
known_mods = self.browse(cr, uid, self.search(cr, uid, []))
known_mods_names = dict([(m.name, m) for m in known_mods])
# iterate through detected modules and update/create them in db
for mod_name in modules.get_modules():
mod = known_mods_names.get(mod_name)
terp = self.get_module_info(mod_name)
values = self.get_values_from_terp(terp)
if mod:
updated_values = {}
for key in values:
old = getattr(mod, key)
updated = isinstance(values[key], basestring) and tools.ustr(values[key]) or values[key]
if (old or updated) and updated != old:
updated_values[key] = values[key]
if terp.get('installable', True) and mod.state == 'uninstallable':
updated_values['state'] = 'uninstalled'
if parse_version(terp.get('version', default_version)) > parse_version(mod.latest_version or default_version):
res[0] += 1
if updated_values:
self.write(cr, uid, mod.id, updated_values)
else:
mod_path = modules.get_module_path(mod_name)
if not mod_path:
continue
if not terp or not terp.get('installable', True):
continue
id = self.create(cr, uid, dict(name=mod_name, state='uninstalled', **values))
mod = self.browse(cr, uid, id)
res[1] += 1
self._update_dependencies(cr, uid, mod, terp.get('depends', []))
self._update_category(cr, uid, mod, terp.get('category', 'Uncategorized'))
return res
def download(self, cr, uid, ids, download=True, context=None):
return []
def install_from_urls(self, cr, uid, urls, context=None):
if not self.pool['res.users'].has_group(cr, uid, 'base.group_system'):
raise openerp.exceptions.AccessDenied()
apps_server = urlparse.urlparse(self.get_apps_server(cr, uid, context=context))
OPENERP = openerp.release.product_name.lower()
tmp = tempfile.mkdtemp()
_logger.debug('Install from url: %r', urls)
try:
# 1. Download & unzip missing modules
for module_name, url in urls.items():
if not url:
continue # nothing to download, local version is already the last one
up = urlparse.urlparse(url)
if up.scheme != apps_server.scheme or up.netloc != apps_server.netloc:
raise openerp.exceptions.AccessDenied()
try:
_logger.info('Downloading module `%s` from OpenERP Apps', module_name)
content = urllib2.urlopen(url).read()
except Exception:
_logger.exception('Failed to fetch module %s', module_name)
raise UserError(_('The `%s` module appears to be unavailable at the moment, please try again later.') % module_name)
else:
zipfile.ZipFile(StringIO(content)).extractall(tmp)
assert os.path.isdir(os.path.join(tmp, module_name))
# 2a. Copy/Replace module source in addons path
for module_name, url in urls.items():
if module_name == OPENERP or not url:
continue # OPENERP is special case, handled below, and no URL means local module
module_path = modules.get_module_path(module_name, downloaded=True, display_warning=False)
bck = backup(module_path, False)
_logger.info('Copy downloaded module `%s` to `%s`', module_name, module_path)
shutil.move(os.path.join(tmp, module_name), module_path)
if bck:
shutil.rmtree(bck)
# 2b. Copy/Replace server+base module source if downloaded
if urls.get(OPENERP, None):
# special case. it contains the server and the base module.
# extract path is not the same
base_path = os.path.dirname(modules.get_module_path('base'))
# copy all modules in the SERVER/openerp/addons directory to the new "openerp" module (except base itself)
for d in os.listdir(base_path):
if d != 'base' and os.path.isdir(os.path.join(base_path, d)):
destdir = os.path.join(tmp, OPENERP, 'addons', d) # XXX 'openerp' subdirectory ?
shutil.copytree(os.path.join(base_path, d), destdir)
# then replace the server by the new "base" module
server_dir = openerp.tools.config['root_path'] # XXX or dirname()
bck = backup(server_dir)
_logger.info('Copy downloaded module `openerp` to `%s`', server_dir)
shutil.move(os.path.join(tmp, OPENERP), server_dir)
#if bck:
# shutil.rmtree(bck)
self.update_list(cr, uid, context=context)
with_urls = [m for m, u in urls.items() if u]
downloaded_ids = self.search(cr, uid, [('name', 'in', with_urls)], context=context)
already_installed = self.search(cr, uid, [('id', 'in', downloaded_ids), ('state', '=', 'installed')], context=context)
to_install_ids = self.search(cr, uid, [('name', 'in', urls.keys()), ('state', '=', 'uninstalled')], context=context)
post_install_action = self.button_immediate_install(cr, uid, to_install_ids, context=context)
if already_installed:
# in this case, force server restart to reload python code...
cr.commit()
openerp.service.server.restart()
return {
'type': 'ir.actions.client',
'tag': 'home',
'params': {'wait': True},
}
return post_install_action
finally:
shutil.rmtree(tmp)
def get_apps_server(self, cr, uid, context=None):
return tools.config.get('apps_server', 'https://apps.openerp.com/apps')
def _update_dependencies(self, cr, uid, mod_browse, depends=None):
if depends is None:
depends = []
existing = set(x.name for x in mod_browse.dependencies_id)
needed = set(depends)
for dep in (needed - existing):
cr.execute('INSERT INTO ir_module_module_dependency (module_id, name) values (%s, %s)', (mod_browse.id, dep))
for dep in (existing - needed):
cr.execute('DELETE FROM ir_module_module_dependency WHERE module_id = %s and name = %s', (mod_browse.id, dep))
self.invalidate_cache(cr, uid, ['dependencies_id'], [mod_browse.id])
def _update_category(self, cr, uid, mod_browse, category='Uncategorized'):
current_category = mod_browse.category_id
current_category_path = []
while current_category:
current_category_path.insert(0, current_category.name)
current_category = current_category.parent_id
categs = category.split('/')
if categs != current_category_path:
cat_id = create_categories(cr, categs)
mod_browse.write({'category_id': cat_id})
def update_translations(self, cr, uid, ids, filter_lang=None, context=None):
if not filter_lang:
res_lang = self.pool.get('res.lang')
lang_ids = res_lang.search(cr, uid, [('translatable', '=', True)])
filter_lang = [lang.code for lang in res_lang.browse(cr, uid, lang_ids)]
elif not isinstance(filter_lang, (list, tuple)):
filter_lang = [filter_lang]
modules = [m.name for m in self.browse(cr, uid, ids) if m.state in ('installed', 'to install', 'to upgrade')]
self.pool.get('ir.translation').load_module_terms(cr, modules, filter_lang, context=context)
def check(self, cr, uid, ids, context=None):
for mod in self.browse(cr, uid, ids, context=context):
if not mod.description:
_logger.warning('module %s: description is empty !', mod.name)
@api.model
@ormcache()
def _installed(self):
""" Return the set of installed modules as a dictionary {name: id} """
return {
module.name: module.id
for module in self.sudo().search([('state', '=', 'installed')])
}
DEP_STATES = [
('uninstallable', 'Uninstallable'),
('uninstalled', 'Not Installed'),
('installed', 'Installed'),
('to upgrade', 'To be upgraded'),
('to remove', 'To be removed'),
('to install', 'To be installed'),
('unknown', 'Unknown'),
]
class module_dependency(osv.Model):
_name = "ir.module.module.dependency"
_description = "Module dependency"
# the dependency name
name = fields2.Char(index=True)
# the module that depends on it
module_id = fields2.Many2one('ir.module.module', 'Module', ondelete='cascade')
# the module corresponding to the dependency, and its status
depend_id = fields2.Many2one('ir.module.module', 'Dependency', compute='_compute_depend')
state = fields2.Selection(DEP_STATES, string='Status', compute='_compute_state')
@api.multi
@api.depends('name')
def _compute_depend(self):
# retrieve all modules corresponding to the dependency names
names = list(set(dep.name for dep in self))
mods = self.env['ir.module.module'].search([('name', 'in', names)])
# index modules by name, and assign dependencies
name_mod = dict((mod.name, mod) for mod in mods)
for dep in self:
dep.depend_id = name_mod.get(dep.name)
@api.one
@api.depends('depend_id.state')
def _compute_state(self):
self.state = self.depend_id.state or 'unknown'
| agpl-3.0 |
ikoula/cloudstack | test/integration/smoke/test_routers_iptables_default_policy.py | 1 | 24995 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" Test VPC nics after router is destroyed """
from nose.plugins.attrib import attr
from marvin.cloudstackTestCase import cloudstackTestCase
from marvin.lib.base import (stopRouter,
startRouter,
destroyRouter,
Account,
VpcOffering,
VPC,
ServiceOffering,
NATRule,
NetworkACL,
PublicIPAddress,
NetworkOffering,
Network,
VirtualMachine,
LoadBalancerRule)
from marvin.lib.common import (get_domain,
get_zone,
get_template,
list_routers,
list_hosts)
from marvin.lib.utils import (cleanup_resources,
get_process_status)
import socket
import time
import inspect
import logging
class Services:
"""Test VPC network services - Port Forwarding Rules Test Data Class.
"""
def __init__(self):
self.services = {
"configurableData": {
"host": {
"password": "password",
"username": "root",
"port": 22
},
"input": "INPUT",
"forward": "FORWARD"
},
"account": {
"email": "test@test.com",
"firstname": "Test",
"lastname": "User",
"username": "test",
# Random characters are appended for unique
# username
"password": "password",
},
"service_offering": {
"name": "Tiny Instance",
"displaytext": "Tiny Instance",
"cpunumber": 1,
"cpuspeed": 100,
"memory": 128,
},
"shared_network_offering_sg": {
"name": "MySharedOffering-sg",
"displaytext": "MySharedOffering-sg",
"guestiptype": "Shared",
"supportedservices": "Dhcp,Dns,UserData,SecurityGroup",
"specifyVlan": "False",
"specifyIpRanges": "False",
"traffictype": "GUEST",
"serviceProviderList": {
"Dhcp": "VirtualRouter",
"Dns": "VirtualRouter",
"UserData": "VirtualRouter",
"SecurityGroup": "SecurityGroupProvider"
}
},
"network_offering": {
"name": 'Test Network offering',
"displaytext": 'Test Network offering',
"guestiptype": 'Isolated',
"supportedservices": 'Dhcp,Dns,SourceNat,PortForwarding',
"traffictype": 'GUEST',
"availability": 'Optional',
"serviceProviderList": {
"Dhcp": 'VirtualRouter',
"Dns": 'VirtualRouter',
"SourceNat": 'VirtualRouter',
"PortForwarding": 'VirtualRouter',
},
},
"vpc_network_offering": {
"name": 'VPC Network offering',
"displaytext": 'VPC Network off',
"guestiptype": 'Isolated',
"supportedservices": 'Vpn,Dhcp,Dns,SourceNat,PortForwarding,Lb,UserData,StaticNat,NetworkACL',
"traffictype": 'GUEST',
"availability": 'Optional',
"useVpc": 'on',
"serviceProviderList": {
"Vpn": 'VpcVirtualRouter',
"Dhcp": 'VpcVirtualRouter',
"Dns": 'VpcVirtualRouter',
"SourceNat": 'VpcVirtualRouter',
"PortForwarding": 'VpcVirtualRouter',
"Lb": 'VpcVirtualRouter',
"UserData": 'VpcVirtualRouter',
"StaticNat": 'VpcVirtualRouter',
"NetworkACL": 'VpcVirtualRouter'
},
},
"vpc_network_offering_no_lb": {
"name": 'VPC Network offering',
"displaytext": 'VPC Network off',
"guestiptype": 'Isolated',
"supportedservices": 'Dhcp,Dns,SourceNat,PortForwarding,UserData,StaticNat,NetworkACL',
"traffictype": 'GUEST',
"availability": 'Optional',
"useVpc": 'on',
"serviceProviderList": {
"Dhcp": 'VpcVirtualRouter',
"Dns": 'VpcVirtualRouter',
"SourceNat": 'VpcVirtualRouter',
"PortForwarding": 'VpcVirtualRouter',
"UserData": 'VpcVirtualRouter',
"StaticNat": 'VpcVirtualRouter',
"NetworkACL": 'VpcVirtualRouter'
},
},
"vpc_offering": {
"name": 'VPC off',
"displaytext": 'VPC off',
"supportedservices": 'Dhcp,Dns,SourceNat,PortForwarding,Vpn,Lb,UserData,StaticNat',
},
"redundant_vpc_offering": {
"name": 'Redundant VPC off',
"displaytext": 'Redundant VPC off',
"supportedservices": 'Dhcp,Dns,SourceNat,PortForwarding,Vpn,Lb,UserData,StaticNat',
"serviceProviderList": {
"Vpn": 'VpcVirtualRouter',
"Dhcp": 'VpcVirtualRouter',
"Dns": 'VpcVirtualRouter',
"SourceNat": 'VpcVirtualRouter',
"PortForwarding": 'VpcVirtualRouter',
"Lb": 'VpcVirtualRouter',
"UserData": 'VpcVirtualRouter',
"StaticNat": 'VpcVirtualRouter',
"NetworkACL": 'VpcVirtualRouter'
},
"serviceCapabilityList": {
"SourceNat": {
"RedundantRouter": 'true'
}
},
},
"vpc": {
"name": "TestVPC",
"displaytext": "TestVPC",
"cidr": '10.1.1.1/16'
},
"network": {
"name": "Test Network",
"displaytext": "Test Network",
"netmask": '255.255.255.0'
},
"natrule": {
"privateport": 22,
"publicport": 22,
"startport": 22,
"endport": 22,
"protocol": "TCP",
"cidrlist": '0.0.0.0/0',
},
"virtual_machine": {
"displayname": "Test VM",
"username": "root",
"password": "password",
"ssh_port": 22,
"privateport": 22,
"publicport": 22,
"protocol": 'TCP',
},
"ostype": 'CentOS 5.3 (64-bit)',
"timeout": 10,
}
class TestVPCIpTablesPolicies(cloudstackTestCase):
@classmethod
def setUpClass(cls):
# We want to fail quicker if it's failure
socket.setdefaulttimeout(60)
cls.testClient = super(TestVPCIpTablesPolicies, cls).getClsTestClient()
cls.apiclient = cls.testClient.getApiClient()
cls.services = Services().services
# Get Zone, Domain and templates
cls.domain = get_domain(cls.apiclient)
cls.zone = get_zone(cls.apiclient, cls.testClient.getZoneForTests())
cls.template = get_template(
cls.apiclient,
cls.zone.id,
cls.services["ostype"])
cls.services["virtual_machine"]["zoneid"] = cls.zone.id
cls.services["virtual_machine"]["template"] = cls.template.id
cls.account = Account.create(
cls.apiclient,
cls.services["account"],
admin=True,
domainid=cls.domain.id)
cls.service_offering = ServiceOffering.create(
cls.apiclient,
cls.services["service_offering"])
cls.logger = logging.getLogger('TestVPCIpTablesPolicies')
cls.stream_handler = logging.StreamHandler()
cls.logger.setLevel(logging.DEBUG)
cls.logger.addHandler(cls.stream_handler)
cls.entity_manager = EntityManager(cls.apiclient, cls.services, cls.service_offering, cls.account, cls.zone, cls.logger)
cls._cleanup = [cls.service_offering, cls.account]
return
@classmethod
def tearDownClass(cls):
try:
cleanup_resources(cls.apiclient, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
def setUp(self):
self.logger.debug("Creating a VPC offering.")
self.vpc_off = VpcOffering.create(
self.apiclient,
self.services["vpc_offering"])
self.logger.debug("Enabling the VPC offering created")
self.vpc_off.update(self.apiclient, state='Enabled')
self.logger.debug("Creating a VPC network in the account: %s" % self.account.name)
self.vpc = VPC.create(
self.apiclient,
self.services["vpc"],
vpcofferingid=self.vpc_off.id,
zoneid=self.zone.id,
account=self.account.name,
domainid=self.account.domainid)
self.cleanup = [self.vpc, self.vpc_off]
self.entity_manager.set_cleanup(self.cleanup)
return
def tearDown(self):
try:
self.entity_manager.destroy_routers()
cleanup_resources(self.apiclient, self.cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
@attr(tags=["advanced", "intervlan"], required_hardware="true")
def test_01_single_VPC_iptables_policies(self):
""" Test iptables default INPUT/FORWARD policies on VPC router """
self.logger.debug("Starting test_01_single_VPC_iptables_policies")
routers = self.entity_manager.query_routers()
self.assertEqual(
isinstance(routers, list), True,
"Check for list routers response return valid data")
self.entity_manager.create_network(self.services["vpc_network_offering"], self.vpc.id, "10.1.1.1")
self.entity_manager.create_network(self.services["vpc_network_offering_no_lb"], self.vpc.id, "10.1.2.1")
self.entity_manager.add_nat_rules(self.vpc.id)
self.entity_manager.do_vpc_test()
for router in routers:
if not router.isredundantrouter and router.vpcid:
hosts = list_hosts(
self.apiclient,
id=router.hostid)
self.assertEqual(
isinstance(hosts, list),
True,
"Check for list hosts response return valid data")
host = hosts[0]
host.user = self.services["configurableData"]["host"]["username"]
host.passwd = self.services["configurableData"]["host"]["password"]
host.port = self.services["configurableData"]["host"]["port"]
tables = [self.services["configurableData"]["input"], self.services["configurableData"]["forward"]]
for table in tables:
try:
result = get_process_status(
host.ipaddress,
host.port,
host.user,
host.passwd,
router.linklocalip,
'iptables -L %s' % table)
except KeyError:
self.skipTest(
"Provide a marvin config file with host\
credentials to run %s" %
self._testMethodName)
self.logger.debug("iptables -L %s: %s" % (table, result))
res = str(result)
self.assertEqual(
res.count("DROP"),
1,
"%s Default Policy should be DROP" % table)
class TestRouterIpTablesPolicies(cloudstackTestCase):
@classmethod
def setUpClass(cls):
# We want to fail quicker if it's failure
socket.setdefaulttimeout(60)
cls.testClient = super(TestRouterIpTablesPolicies, cls).getClsTestClient()
cls.apiclient = cls.testClient.getApiClient()
cls.services = Services().services
# Get Zone, Domain and templates
cls.domain = get_domain(cls.apiclient)
cls.zone = get_zone(cls.apiclient, cls.testClient.getZoneForTests())
cls.template = get_template(
cls.apiclient,
cls.zone.id,
cls.services["ostype"])
cls.services["virtual_machine"]["zoneid"] = cls.zone.id
cls.services["virtual_machine"]["template"] = cls.template.id
cls.account = Account.create(
cls.apiclient,
cls.services["account"],
admin=True,
domainid=cls.domain.id)
cls.service_offering = ServiceOffering.create(
cls.apiclient,
cls.services["service_offering"])
cls.logger = logging.getLogger('TestRouterIpTablesPolicies')
cls.stream_handler = logging.StreamHandler()
cls.logger.setLevel(logging.DEBUG)
cls.logger.addHandler(cls.stream_handler)
cls.entity_manager = EntityManager(cls.apiclient, cls.services, cls.service_offering, cls.account, cls.zone, cls.logger)
cls._cleanup = [cls.service_offering, cls.account]
return
@classmethod
def tearDownClass(cls):
try:
cleanup_resources(cls.apiclient, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
def setUp(self):
self.cleanup = []
self.entity_manager.set_cleanup(self.cleanup)
return
def tearDown(self):
try:
cleanup_resources(self.apiclient, self.cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
@attr(tags=["advanced", "intervlan"], required_hardware="true")
def test_02_routervm_iptables_policies(self):
""" Test iptables default INPUT/FORWARD policy on RouterVM """
self.logger.debug("Starting test_02_routervm_iptables_policies")
vm1 = self.entity_manager.deployvm()
routers = self.entity_manager.query_routers()
self.assertEqual(
isinstance(routers, list), True,
"Check for list routers response return valid data")
for router in routers:
if not router.isredundantrouter and not router.vpcid:
hosts = list_hosts(
self.apiclient,
id=router.hostid)
self.assertEqual(
isinstance(hosts, list),
True,
"Check for list hosts response return valid data")
host = hosts[0]
host.user = self.services["configurableData"]["host"]["username"]
host.passwd = self.services["configurableData"]["host"]["password"]
host.port = self.services["configurableData"]["host"]["port"]
tables = [self.services["configurableData"]["input"], self.services["configurableData"]["forward"]]
for table in tables:
try:
result = get_process_status(
host.ipaddress,
host.port,
host.user,
host.passwd,
router.linklocalip,
'iptables -L %s' % table)
except KeyError:
self.skipTest(
"Provide a marvin config file with host\
credentials to run %s" %
self._testMethodName)
self.logger.debug("iptables -L %s: %s" % (table, result))
res = str(result)
self.assertEqual(
res.count("DROP"),
1,
"%s Default Policy should be DROP" % table)
class EntityManager(object):
def __init__(self, apiclient, services, service_offering, account, zone, logger):
self.apiclient = apiclient
self.services = services
self.service_offering = service_offering
self.account = account
self.zone = zone
self.logger = logger
self.cleanup = []
self.networks = []
self.routers = []
self.ips = []
def set_cleanup(self, cleanup):
self.cleanup = cleanup
def add_nat_rules(self, vpc_id):
for o in self.networks:
for vm in o.get_vms():
if vm.get_ip() is None:
vm.set_ip(self.acquire_publicip(o.get_net(), vpc_id))
if vm.get_nat() is None:
vm.set_nat(self.create_natrule(vm.get_vm(), vm.get_ip(), o.get_net(), vpc_id))
time.sleep(5)
def do_vpc_test(self):
for o in self.networks:
for vm in o.get_vms():
self.check_ssh_into_vm(vm.get_vm(), vm.get_ip())
def create_natrule(self, vm, public_ip, network, vpc_id):
self.logger.debug("Creating NAT rule in network for vm with public IP")
nat_rule_services = self.services["natrule"]
nat_rule = NATRule.create(
self.apiclient,
vm,
nat_rule_services,
ipaddressid=public_ip.ipaddress.id,
openfirewall=False,
networkid=network.id,
vpcid=vpc_id)
self.logger.debug("Adding NetworkACL rules to make NAT rule accessible")
nwacl_nat = NetworkACL.create(
self.apiclient,
networkid=network.id,
services=nat_rule_services,
traffictype='Ingress'
)
self.logger.debug('nwacl_nat=%s' % nwacl_nat.__dict__)
return nat_rule
def check_ssh_into_vm(self, vm, public_ip):
self.logger.debug("Checking if we can SSH into VM=%s on public_ip=%s" %
(vm.name, public_ip.ipaddress.ipaddress))
vm.ssh_client = None
try:
vm.get_ssh_client(ipaddress=public_ip.ipaddress.ipaddress)
self.logger.debug("SSH into VM=%s on public_ip=%s is successful" %
(vm.name, public_ip.ipaddress.ipaddress))
except:
raise Exception("Failed to SSH into VM - %s" % (public_ip.ipaddress.ipaddress))
def create_network(self, net_offerring, vpc_id, gateway='10.1.1.1'):
try:
self.logger.debug('Create NetworkOffering')
net_offerring["name"] = "NET_OFF-" + str(gateway)
nw_off = NetworkOffering.create(
self.apiclient,
net_offerring,
conservemode=False)
nw_off.update(self.apiclient, state='Enabled')
self.logger.debug('Created and Enabled NetworkOffering')
self.services["network"]["name"] = "NETWORK-" + str(gateway)
self.logger.debug('Adding Network=%s to VPC ID %s' % (self.services["network"], vpc_id))
obj_network = Network.create(
self.apiclient,
self.services["network"],
accountid=self.account.name,
domainid=self.account.domainid,
networkofferingid=nw_off.id,
zoneid=self.zone.id,
gateway=gateway,
vpcid=vpc_id)
self.logger.debug("Created network with ID: %s" % obj_network.id)
except Exception, e:
raise Exception('Unable to create a Network with offering=%s because of %s ' % (net_offerring, e))
o = networkO(obj_network)
vm1 = self.deployvm_in_network(obj_network)
self.cleanup.insert(1, obj_network)
self.cleanup.insert(2, nw_off)
o.add_vm(vm1)
self.networks.append(o)
return o
def deployvm_in_network(self, network):
try:
self.logger.debug('Creating VM in network=%s' % network.name)
vm = VirtualMachine.create(
self.apiclient,
self.services["virtual_machine"],
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id,
networkids=[str(network.id)])
self.logger.debug('Created VM=%s in network=%s' % (vm.id, network.name))
self.cleanup.insert(0, vm)
return vm
except:
raise Exception('Unable to create VM in a Network=%s' % network.name)
def deployvm(self):
try:
self.logger.debug('Creating VM')
vm = VirtualMachine.create(
self.apiclient,
self.services["virtual_machine"],
accountid=self.account.name,
domainid=self.account.domainid,
serviceofferingid=self.service_offering.id)
self.cleanup.insert(0, vm)
self.logger.debug('Created VM=%s' % vm.id)
return vm
except:
raise Exception('Unable to create VM')
def acquire_publicip(self, network, vpc_id):
self.logger.debug("Associating public IP for network: %s" % network.name)
public_ip = PublicIPAddress.create(
self.apiclient,
accountid=self.account.name,
zoneid=self.zone.id,
domainid=self.account.domainid,
networkid=network.id,
vpcid=vpc_id)
self.logger.debug("Associated %s with network %s" % (
public_ip.ipaddress.ipaddress,
network.id))
self.ips.append(public_ip)
return public_ip
def query_routers(self):
self.routers = list_routers(self.apiclient,
account=self.account.name,
domainid=self.account.domainid)
return self.routers
def stop_router(self, router):
self.logger.debug('Stopping router')
cmd = stopRouter.stopRouterCmd()
cmd.id = router.id
self.apiclient.stopRouter(cmd)
def destroy_routers(self):
self.logger.debug('Destroying routers')
for router in self.routers:
self.stop_router(router)
cmd = destroyRouter.destroyRouterCmd()
cmd.id = router.id
self.apiclient.destroyRouter(cmd)
self.routers = []
def start_routers(self):
self.logger.debug('Starting routers')
for router in self.routers:
cmd = startRouter.startRouterCmd()
cmd.id = router.id
self.apiclient.startRouter(cmd)
class networkO(object):
def __init__(self, net):
self.network = net
self.vms = []
def get_net(self):
return self.network
def add_vm(self, vm):
self.vms.append(vmsO(vm))
def get_vms(self):
return self.vms
class vmsO(object):
def __init__(self, vm):
self.vm = vm
self.ip = None
self.nat = None
def get_vm(self):
return self.vm
def get_ip(self):
return self.ip
def get_nat(self):
return self.nat
def set_ip(self, ip):
self.ip = ip
def set_nat(self, nat):
self.nat = nat
| gpl-2.0 |
aksalj/kernel_rpi | tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/SchedGui.py | 12980 | 5411 | # SchedGui.py - Python extension for perf script, basic GUI code for
# traces drawing and overview.
#
# Copyright (C) 2010 by Frederic Weisbecker <fweisbec@gmail.com>
#
# This software is distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
try:
import wx
except ImportError:
raise ImportError, "You need to install the wxpython lib for this script"
class RootFrame(wx.Frame):
Y_OFFSET = 100
RECT_HEIGHT = 100
RECT_SPACE = 50
EVENT_MARKING_WIDTH = 5
def __init__(self, sched_tracer, title, parent = None, id = -1):
wx.Frame.__init__(self, parent, id, title)
(self.screen_width, self.screen_height) = wx.GetDisplaySize()
self.screen_width -= 10
self.screen_height -= 10
self.zoom = 0.5
self.scroll_scale = 20
self.sched_tracer = sched_tracer
self.sched_tracer.set_root_win(self)
(self.ts_start, self.ts_end) = sched_tracer.interval()
self.update_width_virtual()
self.nr_rects = sched_tracer.nr_rectangles() + 1
self.height_virtual = RootFrame.Y_OFFSET + (self.nr_rects * (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE))
# whole window panel
self.panel = wx.Panel(self, size=(self.screen_width, self.screen_height))
# scrollable container
self.scroll = wx.ScrolledWindow(self.panel)
self.scroll.SetScrollbars(self.scroll_scale, self.scroll_scale, self.width_virtual / self.scroll_scale, self.height_virtual / self.scroll_scale)
self.scroll.EnableScrolling(True, True)
self.scroll.SetFocus()
# scrollable drawing area
self.scroll_panel = wx.Panel(self.scroll, size=(self.screen_width - 15, self.screen_height / 2))
self.scroll_panel.Bind(wx.EVT_PAINT, self.on_paint)
self.scroll_panel.Bind(wx.EVT_KEY_DOWN, self.on_key_press)
self.scroll_panel.Bind(wx.EVT_LEFT_DOWN, self.on_mouse_down)
self.scroll.Bind(wx.EVT_PAINT, self.on_paint)
self.scroll.Bind(wx.EVT_KEY_DOWN, self.on_key_press)
self.scroll.Bind(wx.EVT_LEFT_DOWN, self.on_mouse_down)
self.scroll.Fit()
self.Fit()
self.scroll_panel.SetDimensions(-1, -1, self.width_virtual, self.height_virtual, wx.SIZE_USE_EXISTING)
self.txt = None
self.Show(True)
def us_to_px(self, val):
return val / (10 ** 3) * self.zoom
def px_to_us(self, val):
return (val / self.zoom) * (10 ** 3)
def scroll_start(self):
(x, y) = self.scroll.GetViewStart()
return (x * self.scroll_scale, y * self.scroll_scale)
def scroll_start_us(self):
(x, y) = self.scroll_start()
return self.px_to_us(x)
def paint_rectangle_zone(self, nr, color, top_color, start, end):
offset_px = self.us_to_px(start - self.ts_start)
width_px = self.us_to_px(end - self.ts_start)
offset_py = RootFrame.Y_OFFSET + (nr * (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE))
width_py = RootFrame.RECT_HEIGHT
dc = self.dc
if top_color is not None:
(r, g, b) = top_color
top_color = wx.Colour(r, g, b)
brush = wx.Brush(top_color, wx.SOLID)
dc.SetBrush(brush)
dc.DrawRectangle(offset_px, offset_py, width_px, RootFrame.EVENT_MARKING_WIDTH)
width_py -= RootFrame.EVENT_MARKING_WIDTH
offset_py += RootFrame.EVENT_MARKING_WIDTH
(r ,g, b) = color
color = wx.Colour(r, g, b)
brush = wx.Brush(color, wx.SOLID)
dc.SetBrush(brush)
dc.DrawRectangle(offset_px, offset_py, width_px, width_py)
def update_rectangles(self, dc, start, end):
start += self.ts_start
end += self.ts_start
self.sched_tracer.fill_zone(start, end)
def on_paint(self, event):
dc = wx.PaintDC(self.scroll_panel)
self.dc = dc
width = min(self.width_virtual, self.screen_width)
(x, y) = self.scroll_start()
start = self.px_to_us(x)
end = self.px_to_us(x + width)
self.update_rectangles(dc, start, end)
def rect_from_ypixel(self, y):
y -= RootFrame.Y_OFFSET
rect = y / (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE)
height = y % (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE)
if rect < 0 or rect > self.nr_rects - 1 or height > RootFrame.RECT_HEIGHT:
return -1
return rect
def update_summary(self, txt):
if self.txt:
self.txt.Destroy()
self.txt = wx.StaticText(self.panel, -1, txt, (0, (self.screen_height / 2) + 50))
def on_mouse_down(self, event):
(x, y) = event.GetPositionTuple()
rect = self.rect_from_ypixel(y)
if rect == -1:
return
t = self.px_to_us(x) + self.ts_start
self.sched_tracer.mouse_down(rect, t)
def update_width_virtual(self):
self.width_virtual = self.us_to_px(self.ts_end - self.ts_start)
def __zoom(self, x):
self.update_width_virtual()
(xpos, ypos) = self.scroll.GetViewStart()
xpos = self.us_to_px(x) / self.scroll_scale
self.scroll.SetScrollbars(self.scroll_scale, self.scroll_scale, self.width_virtual / self.scroll_scale, self.height_virtual / self.scroll_scale, xpos, ypos)
self.Refresh()
def zoom_in(self):
x = self.scroll_start_us()
self.zoom *= 2
self.__zoom(x)
def zoom_out(self):
x = self.scroll_start_us()
self.zoom /= 2
self.__zoom(x)
def on_key_press(self, event):
key = event.GetRawKeyCode()
if key == ord("+"):
self.zoom_in()
return
if key == ord("-"):
self.zoom_out()
return
key = event.GetKeyCode()
(x, y) = self.scroll.GetViewStart()
if key == wx.WXK_RIGHT:
self.scroll.Scroll(x + 1, y)
elif key == wx.WXK_LEFT:
self.scroll.Scroll(x - 1, y)
elif key == wx.WXK_DOWN:
self.scroll.Scroll(x, y + 1)
elif key == wx.WXK_UP:
self.scroll.Scroll(x, y - 1)
| gpl-2.0 |
stephen144/odoo | addons/l10n_be_invoice_bba/partner.py | 47 | 1364 | # -*- encoding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
# Copyright (c) 2011 Noviat nv/sa (www.noviat.be). All rights reserved.
from openerp.osv import fields, osv
import time
from openerp.tools.translate import _
class res_partner(osv.osv):
""" add field to indicate default 'Communication Type' on customer invoices """
_inherit = 'res.partner'
def _get_comm_type(self, cr, uid, context=None):
res = self.pool.get('account.invoice')._get_reference_type(cr, uid,context=context)
return res
_columns = {
'out_inv_comm_type': fields.selection(_get_comm_type, 'Communication Type', change_default=True,
help='Select Default Communication Type for Outgoing Invoices.' ),
'out_inv_comm_algorithm': fields.selection([
('random','Random'),
('date','Date'),
('partner_ref','Customer Reference'),
], 'Communication Algorithm',
help='Select Algorithm to generate the Structured Communication on Outgoing Invoices.' ),
}
def _commercial_fields(self, cr, uid, context=None):
return super(res_partner, self)._commercial_fields(cr, uid, context=context) + \
['out_inv_comm_type', 'out_inv_comm_algorithm']
_default = {
'out_inv_comm_type': 'none',
}
| agpl-3.0 |
shakamunyi/solum | solum/common/safe_utils.py | 37 | 2103 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Utilities and helper functions that won't produce circular imports."""
import inspect
def getcallargs(function, *args, **kwargs):
"""This is a simplified inspect.getcallargs (2.7+).
It should be replaced when python >= 2.7 is standard.
"""
keyed_args = {}
argnames, varargs, keywords, defaults = inspect.getargspec(function)
keyed_args.update(kwargs)
#NOTE(alaski) the implicit 'self' or 'cls' argument shows up in
# argnames but not in args or kwargs. Uses 'in' rather than '==' because
# some tests use 'self2'.
if 'self' in argnames[0] or 'cls' == argnames[0]:
# The function may not actually be a method or have im_self.
# Typically seen when it's stubbed with mox.
if inspect.ismethod(function) and hasattr(function, 'im_self'):
keyed_args[argnames[0]] = function.im_self
else:
keyed_args[argnames[0]] = None
remaining_argnames = filter(lambda x: x not in keyed_args, argnames)
keyed_args.update(dict(zip(remaining_argnames, args)))
if defaults:
num_defaults = len(defaults)
for argname, value in zip(argnames[-num_defaults:], defaults):
if argname not in keyed_args:
keyed_args[argname] = value
return keyed_args
| apache-2.0 |
xiaotdl/ansible | lib/ansible/plugins/callback/minimal.py | 87 | 3499 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.plugins.callback import CallbackBase
from ansible import constants as C
class CallbackModule(CallbackBase):
'''
This is the default callback interface, which simply prints messages
to stdout when new callback events are received.
'''
CALLBACK_VERSION = 2.0
CALLBACK_TYPE = 'stdout'
CALLBACK_NAME = 'minimal'
def _command_generic_msg(self, host, result, caption):
''' output the result of a command run '''
buf = "%s | %s | rc=%s >>\n" % (host, caption, result.get('rc',0))
buf += result.get('stdout','')
buf += result.get('stderr','')
buf += result.get('msg','')
return buf + "\n"
def v2_runner_on_failed(self, result, ignore_errors=False):
if 'exception' in result._result:
if self._display.verbosity < 3:
# extract just the actual error message from the exception text
error = result._result['exception'].strip().split('\n')[-1]
msg = "An exception occurred during task execution. To see the full traceback, use -vvv. The error was: %s" % error
else:
msg = "An exception occurred during task execution. The full traceback is:\n" + result._result['exception']
self._display.display(msg, color='red')
# finally, remove the exception from the result so it's not shown every time
del result._result['exception']
if result._task.action in C.MODULE_NO_JSON:
self._display.display(self._command_generic_msg(result._host.get_name(), result._result,"FAILED"), color='red')
else:
self._display.display("%s | FAILED! => %s" % (result._host.get_name(), self._dump_results(result._result, indent=4)), color='red')
def v2_runner_on_ok(self, result):
if result._task.action in C.MODULE_NO_JSON:
self._display.display(self._command_generic_msg(result._host.get_name(), result._result,"SUCCESS"), color='green')
else:
self._display.display("%s | SUCCESS => %s" % (result._host.get_name(), self._dump_results(result._result, indent=4)), color='green')
self._handle_warnings(result._result)
def v2_runner_on_skipped(self, result):
self._display.display("%s | SKIPPED" % (result._host.get_name()), color='cyan')
def v2_runner_on_unreachable(self, result):
self._display.display("%s | UNREACHABLE!" % result._host.get_name(), color='yellow')
def v2_on_file_diff(self, result):
if 'diff' in result._result and result._result['diff']:
self._display.display(self._get_diff(result._result['diff']))
| gpl-3.0 |
sdgathman/cjdns | node_build/dependencies/libuv/build/gyp/pylib/gyp/xml_fix.py | 2767 | 2174 | # Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Applies a fix to CR LF TAB handling in xml.dom.
Fixes this: http://code.google.com/p/chromium/issues/detail?id=76293
Working around this: http://bugs.python.org/issue5752
TODO(bradnelson): Consider dropping this when we drop XP support.
"""
import xml.dom.minidom
def _Replacement_write_data(writer, data, is_attrib=False):
"""Writes datachars to writer."""
data = data.replace("&", "&").replace("<", "<")
data = data.replace("\"", """).replace(">", ">")
if is_attrib:
data = data.replace(
"\r", "
").replace(
"\n", "
").replace(
"\t", "	")
writer.write(data)
def _Replacement_writexml(self, writer, indent="", addindent="", newl=""):
# indent = current indentation
# addindent = indentation to add to higher levels
# newl = newline string
writer.write(indent+"<" + self.tagName)
attrs = self._get_attributes()
a_names = attrs.keys()
a_names.sort()
for a_name in a_names:
writer.write(" %s=\"" % a_name)
_Replacement_write_data(writer, attrs[a_name].value, is_attrib=True)
writer.write("\"")
if self.childNodes:
writer.write(">%s" % newl)
for node in self.childNodes:
node.writexml(writer, indent + addindent, addindent, newl)
writer.write("%s</%s>%s" % (indent, self.tagName, newl))
else:
writer.write("/>%s" % newl)
class XmlFix(object):
"""Object to manage temporary patching of xml.dom.minidom."""
def __init__(self):
# Preserve current xml.dom.minidom functions.
self.write_data = xml.dom.minidom._write_data
self.writexml = xml.dom.minidom.Element.writexml
# Inject replacement versions of a function and a method.
xml.dom.minidom._write_data = _Replacement_write_data
xml.dom.minidom.Element.writexml = _Replacement_writexml
def Cleanup(self):
if self.write_data:
xml.dom.minidom._write_data = self.write_data
xml.dom.minidom.Element.writexml = self.writexml
self.write_data = None
def __del__(self):
self.Cleanup()
| gpl-3.0 |
GDGLima/contentbox | third_party/django/contrib/messages/storage/fallback.py | 627 | 2171 | from django.contrib.messages.storage.base import BaseStorage
from django.contrib.messages.storage.cookie import CookieStorage
from django.contrib.messages.storage.session import SessionStorage
class FallbackStorage(BaseStorage):
"""
Tries to store all messages in the first backend, storing any unstored
messages in each subsequent backend backend.
"""
storage_classes = (CookieStorage, SessionStorage)
def __init__(self, *args, **kwargs):
super(FallbackStorage, self).__init__(*args, **kwargs)
self.storages = [storage_class(*args, **kwargs)
for storage_class in self.storage_classes]
self._used_storages = set()
def _get(self, *args, **kwargs):
"""
Gets a single list of messages from all storage backends.
"""
all_messages = []
for storage in self.storages:
messages, all_retrieved = storage._get()
# If the backend hasn't been used, no more retrieval is necessary.
if messages is None:
break
if messages:
self._used_storages.add(storage)
all_messages.extend(messages)
# If this storage class contained all the messages, no further
# retrieval is necessary
if all_retrieved:
break
return all_messages, all_retrieved
def _store(self, messages, response, *args, **kwargs):
"""
Stores the messages, returning any unstored messages after trying all
backends.
For each storage backend, any messages not stored are passed on to the
next backend.
"""
for storage in self.storages:
if messages:
messages = storage._store(messages, response,
remove_oldest=False)
# Even if there are no more messages, continue iterating to ensure
# storages which contained messages are flushed.
elif storage in self._used_storages:
storage._store([], response)
self._used_storages.remove(storage)
return messages
| apache-2.0 |
thiriel/maps | venv/lib/python2.7/site-packages/django/core/files/temp.py | 536 | 1819 | """
The temp module provides a NamedTemporaryFile that can be re-opened on any
platform. Most platforms use the standard Python tempfile.TemporaryFile class,
but MS Windows users are given a custom class.
This is needed because in Windows NT, the default implementation of
NamedTemporaryFile uses the O_TEMPORARY flag, and thus cannot be reopened [1].
1: http://mail.python.org/pipermail/python-list/2005-December/359474.html
"""
import os
import tempfile
from django.core.files.utils import FileProxyMixin
__all__ = ('NamedTemporaryFile', 'gettempdir',)
if os.name == 'nt':
class TemporaryFile(FileProxyMixin):
"""
Temporary file object constructor that works in Windows and supports
reopening of the temporary file in windows.
"""
def __init__(self, mode='w+b', bufsize=-1, suffix='', prefix='',
dir=None):
fd, name = tempfile.mkstemp(suffix=suffix, prefix=prefix,
dir=dir)
self.name = name
self.file = os.fdopen(fd, mode, bufsize)
self.close_called = False
# Because close can be called during shutdown
# we need to cache os.unlink and access it
# as self.unlink only
unlink = os.unlink
def close(self):
if not self.close_called:
self.close_called = True
try:
self.file.close()
except (OSError, IOError):
pass
try:
self.unlink(self.name)
except (OSError):
pass
def __del__(self):
self.close()
NamedTemporaryFile = TemporaryFile
else:
NamedTemporaryFile = tempfile.NamedTemporaryFile
gettempdir = tempfile.gettempdir
| bsd-3-clause |
motlib/mqtt-ts | src/sensors/sysinfo.py | 1 | 1526 | '''System info related sensor implementations.'''
from sensors.sbase import SensorBase
from datetime import datetime
class NetTraffic(SensorBase):
'''Measures the average rx and tx throughput of a network interface.'''
def __init__(self, scfg):
super().__init__(scfg)
self.device = scfg['device']
self.lasttime = None
self.old_tx = None
self.old_rx = None
def get_file_value(self, filename):
with open(filename, 'r') as f:
val = float(f.read())
return val
def sample(self):
patht = '/sys/class/net/{dev}/statistics/{stat}'
rx = self.get_file_value(patht.format(
dev=self.device, stat='rx_bytes'))
tx = self.get_file_value(patht.format(
dev=self.device, stat='tx_bytes'))
t = datetime.now()
evts = []
if self.old_rx is not None:
val = (rx - self.old_rx) / ((t - self.lasttime).total_seconds())
# TODO: I need bytes per second!
evts.append(
self.new_event(val, 'bytes per second', 'rx_throughput'))
if self.old_tx is not None:
val = (tx - self.old_tx) / ((t - self.lasttime).total_seconds())
# TODO: I need bytes per second!
evts.append(
self.new_event(val, 'bytes per second', 'tx_throughput'))
self.old_rx = rx
self.old_tx = tx
self.lasttime = t
return evts
| gpl-3.0 |
nimbusproject/epumgmt | src/python/epumgmt/sbin/most-recent-log.py | 1 | 1325 | #!/usr/bin/env python
import os
import sys
import time
from epumgmt.sbin import sbin_common
def get_logfiledir(p):
logfiledir = p.get_conf_or_none("logging", "logfiledir")
if not logfiledir:
sys.stderr.write("There is no logfiledir configuration")
return None
return sbin_common.apply_vardir_maybe(p, logfiledir)
if len(sys.argv) != 2:
sys.stderr.write("This program requires 1 argument, the absolute path to the main.conf file")
sys.exit(1)
p = sbin_common.get_parameters(sys.argv[1])
logfiledir = get_logfiledir(p)
if not logfiledir:
sys.exit(1)
# find the newest file in the directory:
sys.stderr.write("Log file dir: %s\n" % logfiledir)
sortme = []
for root, dirs, files in os.walk(logfiledir):
for name in files:
path = os.path.join(logfiledir, name)
if os.path.isfile(path):
astat = os.stat(path)
modtime = time.localtime(astat[8])
sortme.append((modtime, path))
break # only look in the top directory
if len(sortme) == 0:
sys.stderr.write("Could not find any files in: %s" % logfiledir)
sys.exit(1)
sortme.sort()
newest_file = sortme[-1][1]
sys.stderr.write("Newest log file: %s\n" % newest_file)
sys.stderr.flush()
f = open(newest_file)
for line in f:
print line,
f.close()
| apache-2.0 |
Aperturedimigo/Complete | QT/GUI_Privasee_notYourFace.py | 1 | 1271 | import sys
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtGui import QPainter
from PyQt5.QtGui import QPixmap
from PyQt5.QtWidgets import *
from PyQt5.QtCore import *
windowSizeX = 440
windowSizeY = 250
fontMajor = "Arial"
fontMinor = "Dotum"
class Form(QWidget):
# __init__ : 생성자
# parent : 부모객체
def __init__(self, parent=None):
super(Form, self).__init__(parent)
nameLabel = QLabel()
nameLabel.setText("You've got wrong face.")
newfont = QtGui.QFont(fontMinor, 16, QtGui.QFont.Bold)
nameLabel.setFont(newfont)
nameLabel.setAlignment(Qt.AlignCenter)
blankLabel = QLabel()
blankLabel.setText(" ")
backButton = QPushButton("Back")
backButton.clicked.connect(self.lockContact)
mainLayout = QGridLayout()
mainLayout.addWidget(nameLabel,0,0)
mainLayout.addWidget(backButton,1,0)
self.setLayout(mainLayout)
self.setWindowTitle("Privasee")
def lockContact(self):
'''call GUI_Privasee_Main.py'''
sys.exit(app.exec_())
if __name__ == '__main__':
app = QApplication(sys.argv)
screen = Form()
screen.resize(windowSizeX,windowSizeY)
screen.show()
sys.exit(app.exec_())
| mit |
jumpstarter-io/neutron | neutron/plugins/embrane/l2base/ml2/ml2_support.py | 2 | 2160 | # Copyright 2014 Embrane, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Ivar Lazzaro, Embrane, Inc.
# @author: Ignacio Scopetta, Embrane, Inc.
from heleosapi import info as h_info
from neutron.common import constants
from neutron import manager
from neutron.plugins.embrane.l2base import support_base as base
from neutron.plugins.embrane.l2base import support_exceptions as exc
class Ml2Support(base.SupportBase):
"""Modular Layer 2 plugin support.
Obtains the information needed to build the user security zones.
"""
def __init__(self):
super(Ml2Support, self).__init__()
def retrieve_utif_info(self, context, neutron_port):
plugin = manager.NeutronManager.get_plugin()
network = plugin.get_network(
context, neutron_port['network_id'])
is_gw = (neutron_port["device_owner"] ==
constants.DEVICE_OWNER_ROUTER_GW)
network_type = network.get('provider:network_type')
if network_type != 'vlan':
raise exc.UtifInfoError(
err_msg=_("Network type %s not supported. Please be sure "
"that tenant_network_type is vlan") % network_type)
result = h_info.UtifInfo(network.get('provider:segmentation_id'),
network['name'],
network['id'],
is_gw,
network['tenant_id'],
neutron_port['id'],
neutron_port['mac_address'])
return result
| apache-2.0 |
diderson/couchapp | docs/conf.py | 2 | 9433 | # -*- coding: utf-8 -*-
#
# CouchApp documentation build configuration file, created by
# sphinx-quickstart on Wed Aug 5 15:00:02 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'CouchApp'
copyright = u'2015, <a href="https://github.com/couchapp/couchapp/graphs/contributors">Various CouchApp Contributors</a>'
author = u'Various CouchApp Contributors'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.0.1'
# The full version, including alpha/beta/rc tags.
release = '1.0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'CouchAppdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'CouchApp.tex', u'CouchApp Documentation',
author, 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'CouchApp', u'CouchApp Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'CouchApp', u'CouchApp Documentation',
author, 'CouchApp', 'Utilities to make standalone CouchDB application development simple',
'Development'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| apache-2.0 |
brandond/ansible | lib/ansible/modules/cloud/opennebula/one_image.py | 52 | 11657 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
"""
(c) 2018, Milan Ilic <milani@nordeus.com>
This file is part of Ansible
Ansible is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Ansible is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a clone of the GNU General Public License
along with Ansible. If not, see <http://www.gnu.org/licenses/>.
"""
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: one_image
short_description: Manages OpenNebula images
description:
- Manages OpenNebula images
version_added: "2.6"
requirements:
- python-oca
options:
api_url:
description:
- URL of the OpenNebula RPC server.
- It is recommended to use HTTPS so that the username/password are not
- transferred over the network unencrypted.
- If not set then the value of the C(ONE_URL) environment variable is used.
api_username:
description:
- Name of the user to login into the OpenNebula RPC server. If not set
- then the value of the C(ONE_USERNAME) environment variable is used.
api_password:
description:
- Password of the user to login into OpenNebula RPC server. If not set
- then the value of the C(ONE_PASSWORD) environment variable is used.
id:
description:
- A C(id) of the image you would like to manage.
name:
description:
- A C(name) of the image you would like to manage.
state:
description:
- C(present) - state that is used to manage the image
- C(absent) - delete the image
- C(cloned) - clone the image
- C(renamed) - rename the image to the C(new_name)
choices: ["present", "absent", "cloned", "renamed"]
default: present
enabled:
description:
- Whether the image should be enabled or disabled.
type: bool
new_name:
description:
- A name that will be assigned to the existing or new image.
- In the case of cloning, by default C(new_name) will take the name of the origin image with the prefix 'Copy of'.
author:
- "Milan Ilic (@ilicmilan)"
'''
EXAMPLES = '''
# Fetch the IMAGE by id
- one_image:
id: 45
register: result
# Print the IMAGE properties
- debug:
msg: result
# Rename existing IMAGE
- one_image:
id: 34
state: renamed
new_name: bar-image
# Disable the IMAGE by id
- one_image:
id: 37
enabled: no
# Enable the IMAGE by name
- one_image:
name: bar-image
enabled: yes
# Clone the IMAGE by name
- one_image:
name: bar-image
state: cloned
new_name: bar-image-clone
register: result
# Delete the IMAGE by id
- one_image:
id: '{{ result.id }}'
state: absent
'''
RETURN = '''
id:
description: image id
type: int
returned: success
sample: 153
name:
description: image name
type: str
returned: success
sample: app1
group_id:
description: image's group id
type: int
returned: success
sample: 1
group_name:
description: image's group name
type: str
returned: success
sample: one-users
owner_id:
description: image's owner id
type: int
returned: success
sample: 143
owner_name:
description: image's owner name
type: str
returned: success
sample: ansible-test
state:
description: state of image instance
type: str
returned: success
sample: READY
used:
description: is image in use
type: bool
returned: success
sample: true
running_vms:
description: count of running vms that use this image
type: int
returned: success
sample: 7
'''
try:
import oca
HAS_OCA = True
except ImportError:
HAS_OCA = False
from ansible.module_utils.basic import AnsibleModule
import os
def get_image(module, client, predicate):
pool = oca.ImagePool(client)
# Filter -2 means fetch all images user can Use
pool.info(filter=-2)
for image in pool:
if predicate(image):
return image
return None
def get_image_by_name(module, client, image_name):
return get_image(module, client, lambda image: (image.name == image_name))
def get_image_by_id(module, client, image_id):
return get_image(module, client, lambda image: (image.id == image_id))
def get_image_instance(module, client, requested_id, requested_name):
if requested_id:
return get_image_by_id(module, client, requested_id)
else:
return get_image_by_name(module, client, requested_name)
IMAGE_STATES = ['INIT', 'READY', 'USED', 'DISABLED', 'LOCKED', 'ERROR', 'CLONE', 'DELETE', 'USED_PERS', 'LOCKED_USED', 'LOCKED_USED_PERS']
def get_image_info(image):
image.info()
info = {
'id': image.id,
'name': image.name,
'state': IMAGE_STATES[image.state],
'running_vms': image.running_vms,
'used': bool(image.running_vms),
'user_name': image.uname,
'user_id': image.uid,
'group_name': image.gname,
'group_id': image.gid,
}
return info
def wait_for_state(module, image, wait_timeout, state_predicate):
import time
start_time = time.time()
while (time.time() - start_time) < wait_timeout:
image.info()
state = image.state
if state_predicate(state):
return image
time.sleep(1)
module.fail_json(msg="Wait timeout has expired!")
def wait_for_ready(module, image, wait_timeout=60):
return wait_for_state(module, image, wait_timeout, lambda state: (state in [IMAGE_STATES.index('READY')]))
def wait_for_delete(module, image, wait_timeout=60):
return wait_for_state(module, image, wait_timeout, lambda state: (state in [IMAGE_STATES.index('DELETE')]))
def enable_image(module, client, image, enable):
image.info()
changed = False
state = image.state
if state not in [IMAGE_STATES.index('READY'), IMAGE_STATES.index('DISABLED'), IMAGE_STATES.index('ERROR')]:
if enable:
module.fail_json(msg="Cannot enable " + IMAGE_STATES[state] + " image!")
else:
module.fail_json(msg="Cannot disable " + IMAGE_STATES[state] + " image!")
if ((enable and state != IMAGE_STATES.index('READY')) or
(not enable and state != IMAGE_STATES.index('DISABLED'))):
changed = True
if changed and not module.check_mode:
client.call('image.enable', image.id, enable)
result = get_image_info(image)
result['changed'] = changed
return result
def clone_image(module, client, image, new_name):
if new_name is None:
new_name = "Copy of " + image.name
tmp_image = get_image_by_name(module, client, new_name)
if tmp_image:
result = get_image_info(tmp_image)
result['changed'] = False
return result
if image.state == IMAGE_STATES.index('DISABLED'):
module.fail_json(msg="Cannot clone DISABLED image")
if not module.check_mode:
new_id = client.call('image.clone', image.id, new_name)
image = get_image_by_id(module, client, new_id)
wait_for_ready(module, image)
result = get_image_info(image)
result['changed'] = True
return result
def rename_image(module, client, image, new_name):
if new_name is None:
module.fail_json(msg="'new_name' option has to be specified when the state is 'renamed'")
if new_name == image.name:
result = get_image_info(image)
result['changed'] = False
return result
tmp_image = get_image_by_name(module, client, new_name)
if tmp_image:
module.fail_json(msg="Name '" + new_name + "' is already taken by IMAGE with id=" + str(tmp_image.id))
if not module.check_mode:
client.call('image.rename', image.id, new_name)
result = get_image_info(image)
result['changed'] = True
return result
def delete_image(module, client, image):
if not image:
return {'changed': False}
if image.running_vms > 0:
module.fail_json(msg="Cannot delete image. There are " + str(image.running_vms) + " VMs using it.")
if not module.check_mode:
client.call('image.delete', image.id)
wait_for_delete(module, image)
return {'changed': True}
def get_connection_info(module):
url = module.params.get('api_url')
username = module.params.get('api_username')
password = module.params.get('api_password')
if not url:
url = os.environ.get('ONE_URL')
if not username:
username = os.environ.get('ONE_USERNAME')
if not password:
password = os.environ.get('ONE_PASSWORD')
if not(url and username and password):
module.fail_json(msg="One or more connection parameters (api_url, api_username, api_password) were not specified")
from collections import namedtuple
auth_params = namedtuple('auth', ('url', 'username', 'password'))
return auth_params(url=url, username=username, password=password)
def main():
fields = {
"api_url": {"required": False, "type": "str"},
"api_username": {"required": False, "type": "str"},
"api_password": {"required": False, "type": "str", "no_log": True},
"id": {"required": False, "type": "int"},
"name": {"required": False, "type": "str"},
"state": {
"default": "present",
"choices": ['present', 'absent', 'cloned', 'renamed'],
"type": "str"
},
"enabled": {"required": False, "type": "bool"},
"new_name": {"required": False, "type": "str"},
}
module = AnsibleModule(argument_spec=fields,
mutually_exclusive=[['id', 'name']],
supports_check_mode=True)
if not HAS_OCA:
module.fail_json(msg='This module requires python-oca to work!')
auth = get_connection_info(module)
params = module.params
id = params.get('id')
name = params.get('name')
state = params.get('state')
enabled = params.get('enabled')
new_name = params.get('new_name')
client = oca.Client(auth.username + ':' + auth.password, auth.url)
result = {}
if not id and state == 'renamed':
module.fail_json(msg="Option 'id' is required when the state is 'renamed'")
image = get_image_instance(module, client, id, name)
if not image and state != 'absent':
if id:
module.fail_json(msg="There is no image with id=" + str(id))
else:
module.fail_json(msg="There is no image with name=" + name)
if state == 'absent':
result = delete_image(module, client, image)
else:
result = get_image_info(image)
changed = False
result['changed'] = False
if enabled is not None:
result = enable_image(module, client, image, enabled)
if state == "cloned":
result = clone_image(module, client, image, new_name)
elif state == "renamed":
result = rename_image(module, client, image, new_name)
changed = changed or result['changed']
result['changed'] = changed
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
pferreir/indico-backup | indico/MaKaC/authentication/LocalAuthentication.py | 2 | 2188 | # -*- coding: utf-8 -*-
##
##
## This file is part of Indico.
## Copyright (C) 2002 - 2014 European Organization for Nuclear Research (CERN).
##
## Indico is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 3 of the
## License, or (at your option) any later version.
##
## Indico is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Indico;if not, see <http://www.gnu.org/licenses/>.
import bcrypt
from MaKaC.authentication.baseAuthentication import Authenthicator, PIdentity, SSOHandler
from MaKaC.i18n import _
class LocalAuthenticator(Authenthicator, SSOHandler):
idxName = "localIdentities"
id = "Local"
name = "Indico"
desciption = "Indico Login"
def __init__(self):
Authenthicator.__init__(self)
def createIdentity(self, li, avatar):
return LocalIdentity(li.getLogin(), li.getPassword(), avatar)
def createIdentitySSO(self, login, avatar):
return LocalIdentity(login, None, avatar)
class LocalIdentity(PIdentity):
def __init__(self, login, password, user):
PIdentity.__init__(self, login, user)
self.setPassword(password)
def setPassword(self, newPwd):
self.algorithm = 'bcrypt'
if newPwd is not None:
self.password = bcrypt.hashpw(newPwd, bcrypt.gensalt())
else:
# This happens e.g. when SSO is used with Local identities.
# The user can add the password later if he wants to anyway
self.password = None
def authenticate(self, id):
if self.password is None:
return None
if self.getLogin() == id.getLogin() and self.password == bcrypt.hashpw(id.getPassword(), self.password):
return self.user
return None
def getAuthenticatorTag(self):
return LocalAuthenticator.getId()
| gpl-3.0 |
foursquare/commons-old | src/python/twitter/pants/tasks/markdown_to_html.py | 1 | 9585 | # ==================================================================================================
# Copyright 2012 Twitter, Inc.
# --------------------------------------------------------------------------------------------------
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this work except in compliance with the License.
# You may obtain a copy of the License in the LICENSE file, or at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==================================================================================================
__author__ = 'John Sirois'
try:
import markdown
WIKILINKS_PATTERN = r'\[\[([^\]]+)\]\]'
class WikilinksPattern(markdown.inlinepatterns.Pattern):
def __init__(self, build_url, markdown_instance=None):
markdown.inlinepatterns.Pattern.__init__(self, WIKILINKS_PATTERN, markdown_instance)
self.build_url = build_url
def handleMatch(self, m):
alias, url = self.build_url(m.group(2).strip())
el = markdown.util.etree.Element('a')
el.set('href', url)
el.text = markdown.util.AtomicString(alias)
return el
class WikilinksExtension(markdown.Extension):
def __init__(self, build_url, configs=None):
markdown.Extension.__init__(self, configs or {})
self.build_url = build_url
def extendMarkdown(self, md, md_globals):
md.inlinePatterns['wikilinks'] = WikilinksPattern(self.build_url, md)
HAS_MARKDOWN = True
except ImportError:
HAS_MARKDOWN = False
try:
from pygments.formatters.html import HtmlFormatter
from pygments.styles import get_all_styles
def configure_codehighlight_options(option_group, mkflag):
all_styles = list(get_all_styles())
option_group.add_option(mkflag("code-style"), dest="markdown_to_html_code_style",
type="choice", choices=all_styles,
help="Selects the stylesheet to use for code highlights, one of: "
"%s." % ' '.join(all_styles))
def emit_codehighlight_css(path, style):
with safe_open(path, 'w') as css:
css.write((HtmlFormatter(style=style)).get_style_defs('.codehilite'))
return path
except ImportError:
def configure_codehighlight_options(option_group, mkflag): pass
def emit_codehighlight_css(path, style): pass
import os
import re
import textwrap
from twitter.common.dirutil import safe_open
from twitter.pants import get_buildroot
from twitter.pants.base import Address, Target
from twitter.pants.targets import Page
from twitter.pants.tasks import binary_utils, Task, TaskError
class MarkdownToHtml(Task):
AVAILABLE = HAS_MARKDOWN
@classmethod
def setup_parser(cls, option_group, args, mkflag):
configure_codehighlight_options(option_group, mkflag)
option_group.add_option(mkflag("open"), mkflag("open", negate=True),
dest = "markdown_to_html_open",
action="callback", callback=mkflag.set_bool, default=False,
help = "[%default] Open the generated documents in a browser.")
option_group.add_option(mkflag("standalone"), mkflag("standalone", negate=True),
dest = "markdown_to_html_standalone",
action="callback", callback=mkflag.set_bool, default=False,
help = "[%default] Generate a well-formed standalone html document.")
option_group.add_option(mkflag("outdir"), dest="markdown_to_html_outdir",
help="Emit generated html in to this directory.")
option_group.add_option(mkflag("extension"), dest = "markdown_to_html_extensions",
action="append",
help = "Override the default markdown extensions and process pages "
"whose source have these extensions instead.")
def __init__(self, context):
Task.__init__(self, context)
self.open = context.options.markdown_to_html_open
self.outdir = (
context.options.markdown_to_html_outdir
or context.config.get('markdown-to-html', 'workdir')
)
self.extensions = set(
context.options.markdown_to_html_extensions
or context.config.getlist('markdown-to-html', 'extensions', ['.md'])
)
self.standalone = context.options.markdown_to_html_standalone
self.code_style = context.config.get('markdown-to-html', 'code-style')
if hasattr(context.options, 'markdown_to_html_code_style'):
if context.options.markdown_to_html_code_style:
self.code_style = context.options.markdown_to_html_code_style
def execute(self, targets):
if not MarkdownToHtml.AVAILABLE:
raise TaskError('Cannot process markdown - no markdown lib on the sys.path')
# TODO(John Sirois): consider adding change detection
css_relpath = os.path.join('css', 'codehighlight.css')
css = emit_codehighlight_css(os.path.join(self.outdir, css_relpath), self.code_style)
if css:
self.context.log.info('Emitted %s' % css)
def is_page(target):
return isinstance(target, Page)
roots = set()
interior_nodes = set()
if self.open:
dependencies_by_page = self.context.dependants(on_predicate=is_page, from_predicate=is_page)
roots.update(dependencies_by_page.keys())
for dependencies in dependencies_by_page.values():
interior_nodes.update(dependencies)
roots.difference_update(dependencies)
for page in self.context.targets(is_page):
# There are no in or out edges so we need to show show this isolated page.
if not page.dependencies and page not in interior_nodes:
roots.add(page)
genmap = self.context.products.get('markdown_html')
show = []
for page in filter(is_page, targets):
_, ext = os.path.splitext(page.source)
if ext in self.extensions:
def process_page(key, outdir, url_builder, config):
outputs = list()
if css and self.standalone:
outputs.append(css_relpath)
html_path = self.process(
outdir,
page.target_base,
page.source,
self.standalone,
url_builder,
config,
css=css
)
self.context.log.info('Processed %s to %s' % (page.source, html_path))
outputs.append(os.path.relpath(html_path, outdir))
genmap.add(key, outdir, outputs)
return html_path
def url_builder(linked_page, config=None):
path, ext = os.path.splitext(linked_page.source)
return linked_page.name, os.path.relpath(path + '.html', os.path.dirname(page.source))
html = process_page(page, os.path.join(self.outdir, 'html'), url_builder, lambda p: None)
if self.open and page in roots:
show.append(html)
for wiki in page.wikis():
def get_config(page):
return page.wiki_config(wiki)
basedir = os.path.join(self.outdir, wiki.id)
process_page((wiki, page), basedir, wiki.url_builder, get_config)
if show:
binary_utils.open(*show)
PANTS_LINK = re.compile(r'''pants\(['"]([^)]+)['"]\)''')
def process(self, outdir, base, source, standalone, url_builder, get_config, css=None):
def parse_url(spec):
match = MarkdownToHtml.PANTS_LINK.match(spec)
if match:
page = Target.get(Address.parse(get_buildroot(), match.group(1)))
if not page:
raise TaskError('Invalid link %s' % match.group(1))
alias, url = url_builder(page, config=get_config(page))
return alias, url
else:
return spec, spec
def build_url(label):
components = label.split('|', 1)
if len(components) == 1:
return parse_url(label.strip())
else:
alias, link = components
_, url = parse_url(link.strip())
return alias, url
wikilinks = WikilinksExtension(build_url)
path, ext = os.path.splitext(source)
with safe_open(os.path.join(outdir, path + '.html'), 'w') as output:
with open(os.path.join(get_buildroot(), base, source), 'r') as input:
md_html = markdown.markdown(
input.read(),
extensions=['codehilite(guess_lang=False)', 'extra', 'tables', 'toc', wikilinks],
)
if standalone:
if css:
css_relpath = os.path.relpath(css, outdir)
out_relpath = os.path.dirname(source)
link_relpath = os.path.relpath(css_relpath, out_relpath)
css = '<link rel="stylesheet" type="text/css" href="%s"/>' % link_relpath
html = textwrap.dedent('''
<html>
<head>
%s
</head>
<body>
<!-- generated by pants! -->
%s
</body>
</html>
''').strip() % (css or '', md_html)
output.write(html)
else:
if css:
with safe_open(css) as fd:
output.write(textwrap.dedent('''
<style type="text/css">
%s
</style>
''').strip() % fd.read())
output.write('\n')
output.write(md_html)
return output.name
| apache-2.0 |
kennedyshead/home-assistant | tests/components/zwave/conftest.py | 6 | 2428 | """Fixtures for Z-Wave tests."""
from unittest.mock import AsyncMock, MagicMock, patch
import pytest
from homeassistant.components.zwave import const
from tests.components.light.conftest import mock_light_profiles # noqa: F401
from tests.mock.zwave import MockNetwork, MockNode, MockOption, MockValue
@pytest.fixture
def mock_openzwave():
"""Mock out Open Z-Wave."""
base_mock = MagicMock()
libopenzwave = base_mock.libopenzwave
libopenzwave.__file__ = "test"
base_mock.network.ZWaveNetwork = MockNetwork
base_mock.option.ZWaveOption = MockOption
with patch.dict(
"sys.modules",
{
"libopenzwave": libopenzwave,
"openzwave.option": base_mock.option,
"openzwave.network": base_mock.network,
"openzwave.group": base_mock.group,
},
):
yield base_mock
@pytest.fixture
def mock_discovery():
"""Mock discovery."""
discovery = MagicMock()
discovery.async_load_platform = AsyncMock(return_value=None)
yield discovery
@pytest.fixture
def mock_import_module():
"""Mock import module."""
platform = MagicMock()
mock_device = MagicMock()
mock_device.name = "test_device"
platform.get_device.return_value = mock_device
import_module = MagicMock()
import_module.return_value = platform
yield import_module
@pytest.fixture
def mock_values():
"""Mock values."""
node = MockNode()
mock_schema = {
const.DISC_COMPONENT: "mock_component",
const.DISC_VALUES: {
const.DISC_PRIMARY: {const.DISC_COMMAND_CLASS: ["mock_primary_class"]},
"secondary": {const.DISC_COMMAND_CLASS: ["mock_secondary_class"]},
"optional": {
const.DISC_COMMAND_CLASS: ["mock_optional_class"],
const.DISC_OPTIONAL: True,
},
},
}
value_class = MagicMock()
value_class.primary = MockValue(
command_class="mock_primary_class", node=node, value_id=1000
)
value_class.secondary = MockValue(command_class="mock_secondary_class", node=node)
value_class.duplicate_secondary = MockValue(
command_class="mock_secondary_class", node=node
)
value_class.optional = MockValue(command_class="mock_optional_class", node=node)
value_class.no_match_value = MockValue(command_class="mock_bad_class", node=node)
yield (node, value_class, mock_schema)
| apache-2.0 |
skia-dev/oss-fuzz | projects/urllib3/fuzz_urlparse.py | 3 | 1029 | #!/usr/bin/python3
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import atheris
import urllib3
def TestOneInput(data):
fdp = atheris.FuzzedDataProvider(data)
original = fdp.ConsumeUnicode(sys.maxsize)
try:
urllib3.util.parse_url(original)
except urllib3.exceptions.LocationParseError:
None
return
def main():
atheris.Setup(sys.argv, TestOneInput, enable_python_coverage=True)
atheris.Fuzz()
if __name__ == "__main__":
main()
| apache-2.0 |
RowenStipe/qTox | tools/update-server/deployUpdateFTP_64.py | 21 | 1100 | #!/usr/bin/python2.7
# This script deploys a qTox update to an FTP server.
# Pass the path to the root of the local update server as argument, no spaces allowed
import sys
import os
target = 'win64'
prefix = '/qtox/'+target+'/'
uploadcmd1 = "bash -c '"+'ftp -n tux3-dev.tox.chat 0<<<"`echo -ne "user qtox-win-update-upload INSERT-PASSWORD-HERE\ncd '+target+'\nsend '
uploadcmd2 = '\n"`"'+"'"
def upload(file, rfile):
#print(uploadcmd1+file+' '+rfile+uploadcmd2)
os.system(uploadcmd1+file+' '+rfile+uploadcmd2)
# Check our local folders
if (len(sys.argv) < 2):
print("ERROR: Needs the path to the local update server in argument")
sys.exit(1)
localpath = sys.argv[1];
# Upload files/
filenames = next(os.walk(localpath+prefix+'/files/'))[2]
for filename in filenames:
print("Uploading files/"+filename+'...')
upload(localpath+prefix+'/files/'+filename, 'files/'+filename)
# Upload version and flist
print("Uploading flist...")
upload(localpath+prefix+'flist', 'flist')
print("Uploading version...")
upload(localpath+prefix+'version', 'version')
| gpl-3.0 |
ICGC-TCGA-PanCancer/pancancer-sandbox | pcawg_metadata_parser/generate_all_donors.py | 1 | 24480 | #!/usr/bin/env python
import sys
import os
import re
import glob
import xmltodict
import json
import yaml
import copy
import logging
from argparse import ArgumentParser
from argparse import RawDescriptionHelpFormatter
from elasticsearch import Elasticsearch
from collections import OrderedDict
import datetime
import dateutil.parser
from itertools import izip
from distutils.version import LooseVersion
es_queries = [
# query 0: donors_sanger_vcf_without_missing_bams
{
"fields": "donor_unique_id",
"size": 10000
}
]
def create_reorganized_donor(donor_unique_id, es_json):
reorganized_donor = {
'donor_unique_id': donor_unique_id,
'submitter_donor_id': es_json['submitter_donor_id'],
'dcc_project_code': es_json['dcc_project_code'],
'data_train': 'train2' if es_json.get('flags').get('is_train2_donor') else 'train3',
'train2_pilot': True if es_json.get('flags').get('is_train2_pilot') else False,
'wgs': {
'normal_specimen': {
'bwa_alignment': {
}
},
'tumor_specimens': []
},
'rna_seq': {
'normal_specimen': {},
'tumor_specimens': []
}
}
if es_json.get('normal_alignment_status') and es_json.get('normal_alignment_status').get('aligned_bam'):
reorganized_donor['wgs']['normal_specimen']['bwa_alignment'] = {
'submitter_specimen_id': es_json.get('normal_alignment_status').get('submitter_specimen_id'),
'submitter_sample_id': es_json.get('normal_alignment_status').get('submitter_sample_id'),
'specimen_type': es_json.get('normal_alignment_status').get('dcc_specimen_type'),
'aliquot_id': es_json.get('normal_alignment_status').get('aliquot_id'),
'gnos_repo': filter_liri_jp(es_json.get('dcc_project_code'), es_json.get('normal_alignment_status').get('aligned_bam').get('gnos_repo')),
'gnos_id': es_json.get('normal_alignment_status').get('aligned_bam').get('gnos_id'),
'gnos_last_modified': es_json.get('normal_alignment_status').get('aligned_bam').get('gnos_last_modified')[-1],
'files': [
{
'bam_file_name': es_json.get('normal_alignment_status').get('aligned_bam').get('bam_file_name'),
'bam_file_md5sum': es_json.get('normal_alignment_status').get('aligned_bam').get('bam_file_md5sum'),
'bam_file_size': es_json.get('normal_alignment_status').get('aligned_bam').get('bam_file_size')
}
]
}
add_wgs_tumor_specimens(reorganized_donor, es_json)
add_rna_seq_info(reorganized_donor, es_json)
return reorganized_donor
def add_wgs_tumor_specimens(reorganized_donor, es_json):
wgs_tumor_alignment_info = es_json.get('tumor_alignment_status') \
if es_json.get('tumor_alignment_status') else []
wgs_tumor_sanger_vcf_info = es_json.get('variant_calling_results').get('sanger_variant_calling') \
if es_json.get('variant_calling_results') else {}
sanger_vcf_files = wgs_tumor_sanger_vcf_info.get('files')
tumor_wgs_specimen_count = 0
aliquot_info = {}
for aliquot in wgs_tumor_alignment_info:
tumor_wgs_specimen_count += 1
aliquot_id = aliquot.get('aliquot_id')
aliquot_info = {
'bwa_alignment':{ },
'sanger_variant_calling':{ }
}
if aliquot.get('aligned_bam'):
aliquot_info['bwa_alignment'] = {
'submitter_specimen_id': aliquot.get('submitter_specimen_id'),
'submitter_sample_id': aliquot.get('submitter_sample_id'),
'specimen_type': aliquot.get('dcc_specimen_type'),
'aliquot_id': aliquot.get('aliquot_id'),
'gnos_repo': filter_liri_jp(es_json.get('dcc_project_code'), aliquot.get('aligned_bam').get('gnos_repo')),
'gnos_id': aliquot.get('aligned_bam').get('gnos_id'),
'gnos_last_modified': aliquot.get('aligned_bam').get('gnos_last_modified')[-1],
'files':[
{
'bam_file_name': aliquot.get('aligned_bam').get('bam_file_name'),
'bam_file_md5sum': aliquot.get('aligned_bam').get('bam_file_md5sum'),
'bam_file_size': aliquot.get('aligned_bam').get('bam_file_size')
}
]
}
if sanger_vcf_files:
aliquot_info['sanger_variant_calling'] = {
'submitter_specimen_id': aliquot.get('submitter_specimen_id'),
'submitter_sample_id': aliquot.get('submitter_sample_id'),
'specimen_type': aliquot.get('dcc_specimen_type'),
'aliquot_id': aliquot.get('aliquot_id'),
'gnos_repo': wgs_tumor_sanger_vcf_info.get('gnos_repo'),
'gnos_id': wgs_tumor_sanger_vcf_info.get('gnos_id'),
'gnos_last_modified': wgs_tumor_sanger_vcf_info.get('gnos_last_modified')[-1],
'files':[]
}
for f in sanger_vcf_files:
if aliquot_id in f.get('file_name'):
aliquot_info.get('sanger_variant_calling').get('files').append(f)
reorganized_donor.get('wgs').get('tumor_specimens').append(aliquot_info)
reorganized_donor['tumor_wgs_specimen_count'] = tumor_wgs_specimen_count
def filter_liri_jp(project, gnos_repo):
if not project == 'LIRI-JP':
return gnos_repo
elif "https://gtrepo-riken.annailabs.com/" in gnos_repo:
return ["https://gtrepo-riken.annailabs.com/"]
else:
print "This should never happen: alignment for LIRI-JP is not available at Riken repo"
sys.exit(1)
def add_rna_seq_info(reorganized_donor, es_json):
# to build pcawg santa cruz pilot dataset, this is a temporary walkaround to exclude the 130 RNA-Seq bad
# entries from MALY-DE and CLLE-ES projects
#if reorganized_donor.get('dcc_project_code') in ('MALY-DE', 'CLLE-ES'): return
rna_seq_info = es_json.get('rna_seq').get('alignment')
for specimen_type in rna_seq_info.keys():
if not rna_seq_info.get(specimen_type): # the specimen_type has no alignment result
continue
if 'normal' in specimen_type:
aliquot = rna_seq_info.get(specimen_type)
alignment_info = {}
for workflow_type in aliquot.keys():
alignment_info[workflow_type] = {
'submitter_specimen_id': aliquot.get(workflow_type).get('submitter_specimen_id'),
'submitter_sample_id': aliquot.get(workflow_type).get('submitter_sample_id'),
'specimen_type': aliquot.get(workflow_type).get('dcc_specimen_type'),
'aliquot_id': aliquot.get(workflow_type).get('aliquot_id'),
'gnos_repo': aliquot.get(workflow_type).get('gnos_info').get('gnos_repo'),
'gnos_id': aliquot.get(workflow_type).get('gnos_info').get('gnos_id'),
'gnos_last_modified': aliquot.get(workflow_type).get('gnos_info').get('gnos_last_modified')[-1],
'files': [
{
'bam_file_name': aliquot.get(workflow_type).get('gnos_info').get('bam_file_name'),
'bam_file_md5sum': aliquot.get(workflow_type).get('gnos_info').get('bam_file_md5sum'),
'bam_file_size': aliquot.get(workflow_type).get('gnos_info').get('bam_file_size')
}
]
}
reorganized_donor.get('rna_seq')[specimen_type + ('_specimens' if specimen_type == 'tumor' else '_specimen')] = alignment_info
else:
for aliquot in rna_seq_info.get(specimen_type):
alignment_info = {}
for workflow_type in aliquot.keys():
alignment_info[workflow_type] = {
'submitter_specimen_id': aliquot.get(workflow_type).get('submitter_specimen_id'),
'submitter_sample_id': aliquot.get(workflow_type).get('submitter_sample_id'),
'specimen_type': aliquot.get(workflow_type).get('dcc_specimen_type'),
'aliquot_id': aliquot.get(workflow_type).get('aliquot_id'),
'gnos_repo': aliquot.get(workflow_type).get('gnos_info').get('gnos_repo'),
'gnos_id': aliquot.get(workflow_type).get('gnos_info').get('gnos_id'),
'gnos_last_modified': aliquot.get(workflow_type).get('gnos_info').get('gnos_last_modified')[-1],
'files': [
{
'bam_file_name': aliquot.get(workflow_type).get('gnos_info').get('bam_file_name'),
'bam_file_md5sum': aliquot.get(workflow_type).get('gnos_info').get('bam_file_md5sum'),
'bam_file_size': aliquot.get(workflow_type).get('gnos_info').get('bam_file_size')
}
]
}
reorganized_donor.get('rna_seq')[specimen_type + '_specimens'].append(alignment_info)
def get_donor_json(es, es_index, donor_unique_id):
es_query_donor = {
"query": {
"term": {
"donor_unique_id": donor_unique_id
}
}
}
response = es.search(index=es_index, body=es_query_donor)
es_json = response['hits']['hits'][0]['_source']
return es_json
def get_donors_list(es, es_index, es_queries):
q_index = 0
response = es.search(index=es_index, body=es_queries[q_index])
donors_list = []
for p in response['hits']['hits']:
donors_list.append(p.get('fields').get('donor_unique_id')[0])
return donors_list
def init_es(es_host, es_index):
es = Elasticsearch([ es_host ])
es.indices.create( es_index, ignore=400 )
# create mappings
es_mapping = open('pancan.reorganized.donor.mapping.json')
es.indices.put_mapping(index=es_index, doc_type='donor', body=es_mapping.read())
es_mapping.close()
return es
def set_default(obj):
if isinstance(obj, datetime.datetime):
return obj.isoformat()
if isinstance(obj, set):
return list(obj)
raise TypeError
def generate_json_for_tsv_file(reorganized_donor):
pilot_tsv_json = OrderedDict()
pilot_tsv_json['dcc_project_code'] = reorganized_donor.get('dcc_project_code')
pilot_tsv_json['submitter_donor_id'] = reorganized_donor.get('submitter_donor_id')
pilot_tsv_json['data_train'] = reorganized_donor.get('data_train')
pilot_tsv_json['train2_pilot'] = reorganized_donor.get('train2_pilot')
# wgs normal specimen
pilot_tsv_json['normal_wgs_submitter_specimen_id'] = reorganized_donor.get('wgs').get('normal_specimen').get('bwa_alignment').get('submitter_specimen_id')
pilot_tsv_json['normal_wgs_submitter_sample_id'] = reorganized_donor.get('wgs').get('normal_specimen').get('bwa_alignment').get('submitter_sample_id')
pilot_tsv_json['normal_wgs_aliquot_id'] = reorganized_donor.get('wgs').get('normal_specimen').get('bwa_alignment').get('aliquot_id')
pilot_tsv_json['normal_wgs_alignment_gnos_repo'] = [reorganized_donor.get('wgs').get('normal_specimen').get('bwa_alignment').get('gnos_repo')]
pilot_tsv_json['normal_wgs_alignment_gnos_id'] = reorganized_donor.get('wgs').get('normal_specimen').get('bwa_alignment').get('gnos_id')
pilot_tsv_json['normal_wgs_alignment_bam_file_name'] = reorganized_donor.get('wgs').get('normal_specimen').get('bwa_alignment').get('files')[0].get('bam_file_name')
# wgs tumor specimen
wgs_tumor_speciments = reorganized_donor.get('wgs').get('tumor_specimens')
pilot_tsv_json['tumor_wgs_specimen_count'] = reorganized_donor.get('tumor_wgs_specimen_count')
pilot_tsv_json['tumor_wgs_submitter_specimen_id'] = []
pilot_tsv_json['tumor_wgs_submitter_sample_id'] = []
pilot_tsv_json['tumor_wgs_aliquot_id'] = []
pilot_tsv_json['tumor_wgs_alignment_gnos_repo'] = []
pilot_tsv_json['tumor_wgs_alignment_gnos_id'] = []
pilot_tsv_json['tumor_wgs_alignment_bam_file_name'] = []
# wgs tumor sanger vcf
pilot_tsv_json['sanger_variant_calling_repo'] = []
pilot_tsv_json['sanger_variant_calling_gnos_id'] = wgs_tumor_speciments[0].get('sanger_variant_calling').get('gnos_id')
pilot_tsv_json['sanger_variant_calling_file_name_prefix'] = []
for specimen in wgs_tumor_speciments:
pilot_tsv_json['tumor_wgs_submitter_specimen_id'].append(specimen.get('bwa_alignment').get('submitter_specimen_id'))
pilot_tsv_json['tumor_wgs_submitter_sample_id'].append(specimen.get('bwa_alignment').get('submitter_sample_id'))
pilot_tsv_json['tumor_wgs_aliquot_id'].append(specimen.get('bwa_alignment').get('aliquot_id'))
pilot_tsv_json['tumor_wgs_alignment_gnos_repo'].append(specimen.get('bwa_alignment').get('gnos_repo'))
pilot_tsv_json['tumor_wgs_alignment_gnos_id'].append(specimen.get('bwa_alignment').get('gnos_id'))
pilot_tsv_json['tumor_wgs_alignment_bam_file_name'].append(specimen.get('bwa_alignment').get('files')[0].get('bam_file_name'))
# wgs tumor sanger vcf
pilot_tsv_json['sanger_variant_calling_repo'].append(specimen.get('sanger_variant_calling').get('gnos_repo'))
pilot_tsv_json['sanger_variant_calling_file_name_prefix'].append(specimen.get('sanger_variant_calling').get('aliquot_id'))
# rna_seq normal specimen
pilot_tsv_json['normal_rna_seq_submitter_specimen_id'] = None
pilot_tsv_json['normal_rna_seq_submitter_sample_id'] = None
pilot_tsv_json['normal_rna_seq_aliquot_id'] = None
pilot_tsv_json['normal_rna_seq_STAR_alignment_gnos_repo'] = None
pilot_tsv_json['normal_rna_seq_STAR_alignment_gnos_id'] = None
pilot_tsv_json['normal_rna_seq_STAR_alignment_bam_file_name'] = None
pilot_tsv_json['normal_rna_seq_TOPHAT2_alignment_gnos_repo'] = None
pilot_tsv_json['normal_rna_seq_TOPHAT2_alignment_gnos_id'] = None
pilot_tsv_json['normal_rna_seq_TOPHAT2_alignment_bam_file_name'] = None
rna_seq_normal = reorganized_donor.get('rna_seq').get('normal_specimen')
if rna_seq_normal and rna_seq_normal.get('tophat'):
pilot_tsv_json['normal_rna_seq_submitter_specimen_id'] = rna_seq_normal.get('tophat').get('submitter_specimen_id')
pilot_tsv_json['normal_rna_seq_submitter_sample_id'] = rna_seq_normal.get('tophat').get('submitter_sample_id')
pilot_tsv_json['normal_rna_seq_aliquot_id'] = rna_seq_normal.get('tophat').get('aliquot_id')
pilot_tsv_json['normal_rna_seq_TOPHAT2_alignment_gnos_repo'] = [rna_seq_normal.get('tophat').get('gnos_repo')]
pilot_tsv_json['normal_rna_seq_TOPHAT2_alignment_gnos_id'] = rna_seq_normal.get('tophat').get('gnos_id')
pilot_tsv_json['normal_rna_seq_TOPHAT2_alignment_bam_file_name'] = rna_seq_normal.get('tophat').get('files')[0].get('bam_file_name')
if rna_seq_normal and rna_seq_normal.get('star'):
pilot_tsv_json['normal_rna_seq_submitter_specimen_id'] = rna_seq_normal.get('star').get('submitter_specimen_id')
pilot_tsv_json['normal_rna_seq_submitter_sample_id'] = rna_seq_normal.get('star').get('submitter_sample_id')
pilot_tsv_json['normal_rna_seq_aliquot_id'] = rna_seq_normal.get('star').get('aliquot_id')
pilot_tsv_json['normal_rna_seq_STAR_alignment_gnos_repo'] = rna_seq_normal.get('star').get('gnos_repo')
pilot_tsv_json['normal_rna_seq_STAR_alignment_gnos_id'] = rna_seq_normal.get('star').get('gnos_id')
pilot_tsv_json['normal_rna_seq_STAR_alignment_bam_file_name'] = rna_seq_normal.get('star').get('files')[0].get('bam_file_name')
# rna_seq tumor specimens
pilot_tsv_json['tumor_rna_seq_submitter_specimen_id'] = []
pilot_tsv_json['tumor_rna_seq_submitter_sample_id'] = []
pilot_tsv_json['tumor_rna_seq_aliquot_id'] = []
pilot_tsv_json['tumor_rna_seq_STAR_alignment_gnos_repo'] = []
pilot_tsv_json['tumor_rna_seq_STAR_alignment_gnos_id'] = []
pilot_tsv_json['tumor_rna_seq_STAR_alignment_bam_file_name'] = []
pilot_tsv_json['tumor_rna_seq_TOPHAT2_alignment_gnos_repo'] = []
pilot_tsv_json['tumor_rna_seq_TOPHAT2_alignment_gnos_id'] = []
pilot_tsv_json['tumor_rna_seq_TOPHAT2_alignment_bam_file_name'] = []
rna_seq_tumor = reorganized_donor.get('rna_seq').get('tumor_specimens')
rna_seq_tumor_specimen_id = []
rna_seq_tumor_sample_id = []
rna_seq_tumor_aliquot_id = []
if rna_seq_tumor:
for rna_seq_tumor_specimen in rna_seq_tumor:
if rna_seq_tumor_specimen.get('tophat'):
rna_seq_tumor_specimen_id_tmp = rna_seq_tumor_specimen.get('tophat').get('submitter_specimen_id')
rna_seq_tumor_sample_id_tmp = rna_seq_tumor_specimen.get('tophat').get('submitter_sample_id')
rna_seq_tumor_aliquot_id_tmp = rna_seq_tumor_specimen.get('tophat').get('aliquot_id')
pilot_tsv_json['tumor_rna_seq_TOPHAT2_alignment_gnos_repo'].append(rna_seq_tumor_specimen.get('tophat').get('gnos_repo'))
pilot_tsv_json['tumor_rna_seq_TOPHAT2_alignment_gnos_id'].append(rna_seq_tumor_specimen.get('tophat').get('gnos_id'))
pilot_tsv_json['tumor_rna_seq_TOPHAT2_alignment_bam_file_name'].append(rna_seq_tumor_specimen.get('tophat').get('files')[0].get('bam_file_name'))
if rna_seq_tumor_specimen.get('star'):
rna_seq_tumor_specimen_id_tmp = rna_seq_tumor_specimen.get('star').get('submitter_specimen_id')
rna_seq_tumor_sample_id_tmp = rna_seq_tumor_specimen.get('star').get('submitter_sample_id')
rna_seq_tumor_aliquot_id_tmp = rna_seq_tumor_specimen.get('star').get('aliquot_id')
pilot_tsv_json['tumor_rna_seq_STAR_alignment_gnos_repo'].append(rna_seq_tumor_specimen.get('star').get('gnos_repo'))
pilot_tsv_json['tumor_rna_seq_STAR_alignment_gnos_id'].append(rna_seq_tumor_specimen.get('star').get('gnos_id'))
pilot_tsv_json['tumor_rna_seq_STAR_alignment_bam_file_name'].append(rna_seq_tumor_specimen.get('star').get('files')[0].get('bam_file_name'))
rna_seq_tumor_specimen_id.append(rna_seq_tumor_specimen_id_tmp)
rna_seq_tumor_sample_id.append(rna_seq_tumor_sample_id_tmp)
rna_seq_tumor_aliquot_id.append(rna_seq_tumor_aliquot_id_tmp)
pilot_tsv_json['tumor_rna_seq_submitter_specimen_id'] = rna_seq_tumor_specimen_id
pilot_tsv_json['tumor_rna_seq_submitter_sample_id'] = rna_seq_tumor_sample_id
pilot_tsv_json['tumor_rna_seq_aliquot_id'] = rna_seq_tumor_aliquot_id
return pilot_tsv_json
def write_individule_json(report_dir, donor_unique_id, reorganized_donor):
(project_code, donor_id) = donor_unique_id.split('::')
project_dir = report_dir + '/donors/' + project_code
if not os.path.exists(project_dir): os.makedirs(project_dir)
with open(project_dir + '/' + donor_id + '.json', 'w') as w:
w.write(json.dumps(reorganized_donor, indent=4, sort_keys=True))
def main(argv=None):
parser = ArgumentParser(description="PCAWG Reorganized Json Donors Info Generator",
formatter_class=RawDescriptionHelpFormatter)
parser.add_argument("-m", "--metadata_dir", dest="metadata_dir",
help="Directory containing metadata manifest files", required=True)
parser.add_argument("-r", "--gnos_repo", dest="repo",
help="Specify which GNOS repo to process, process all repos if none specified", required=False)
args = parser.parse_args()
metadata_dir = args.metadata_dir # this dir contains gnos manifest files, will also host all reports
repo = args.repo
if not os.path.isdir(metadata_dir): # TODO: should add more directory name check to make sure it's right
sys.exit('Error: specified metadata directory does not exist!')
timestamp = str.split(metadata_dir, '/')[-1]
es_index = 'p_' + ('' if not repo else repo+'_') + re.sub(r'\D', '', timestamp).replace('20','',1)
es_index_reorganize = 'r_' + ('' if not repo else repo+'_') + re.sub(r'\D', '', timestamp).replace('20','',1)
es_type = "donor"
es_host = 'localhost:9200'
es = Elasticsearch([es_host])
#es_reorganized = init_es(es_host, es_index_reorganize)
donor_fh = open(metadata_dir+'/reports/donors_all.jsonl', 'w')
# pilot_tsv_fh = open(metadata_dir + '/reports/donors_with_bwa_alignment.tsv', 'w')
# # read the tsv fields file and write to the pilot donor tsv file
# tsv_fields = ["Project code", "Submitter donor ID", "Data train", "Train2 pilot", "Normal WGS submitter specimen ID", \
# "Normal WGS submitter sample ID", "Normal WGS aliquot ID", "Normal WGS alignment GNOS repo(s)", "Normal WGS alignment GNOS analysis ID", \
# "Normal WGS alignment BAM file name", "Tumour WGS Specimen Count", "Tumour WGS submitter specimen ID(s)", \
# "Tumour WGS submitter sample ID(s)", "Tumour WGS aliquot ID(s)", "Tumour WGS alignment GNOS repo(s)", \
# "Tumour WGS alignment GNOS analysis ID(s)", "Tumour WGS alignment BAM file name(s)", "Sanger variant call GNOS repo(s)", \
# "Sanger variant call GNOS analysis ID(s)", "Sanger variant call GNOS file name prefix", "Normal RNA-Seq submitter specimen ID", \
# "Normal RNA-Seq submitter sample ID", "Normal RNA-Seq aliquot ID", "Normal RNA-Seq STAR alignment GNOS repo(s)", \
# "Normal RNA-Seq STAR alignment GNOS analysis ID", "Normal RNA-Seq STAR alignment BAM file name", \
# "Normal RNA-Seq TopHat2 alignment GNOS repo(s)", "Normal RNA-Seq TopHat2 alignment GNOS analysis ID", \
# "Normal RNA-Seq TopHat2 alignment BAM file name", "Tumour RNA-Seq submitter specimen ID", "Tumour RNA-Seq submitter sample ID", \
# "Tumour RNA-Seq aliquot ID", "Tumour RNA-Seq STAR alignment GNOS repo(s)", "Tumour RNA-Seq STAR alignment GNOS analysis ID", \
# "Tumour RNA-Seq STAR alignment BAM file name", "Tumour RNA-Seq TopHat2 alignment GNOS repo(s)", \
# "Tumour RNA-Seq TopHat2 alignment GNOS analysis ID", "Tumour RNA-Seq TopHat2 alignment BAM file name"
# ]
# pilot_tsv_fh.write('\t'.join(tsv_fields) + '\n')
# get the list of donors whose sanger_vcf without missing bams
donors_list = get_donors_list(es, es_index, es_queries)
# get json doc for each donor and reorganize it
for donor_unique_id in donors_list:
es_json = get_donor_json(es, es_index, donor_unique_id)
reorganized_donor = create_reorganized_donor(donor_unique_id, es_json)
write_individule_json(metadata_dir+'/reports/', donor_unique_id, reorganized_donor)
# DO NOT NEED THIS YET: push to Elasticsearch
#es_reorganized.index(index=es_index_reorganize, doc_type='donor', id=reorganized_donor['donor_unique_id'], \
# body=json.loads(json.dumps(reorganized_donor, default=set_default)), timeout=90 )
donor_fh.write(json.dumps(reorganized_donor, default=set_default) + '\n')
'''
# generate json for tsv file from reorganized donor
pilot_tsv_json = generate_json_for_tsv_file(reorganized_donor)
# write to the tsv file
line = ""
for p in pilot_tsv_json.keys():
if isinstance(pilot_tsv_json.get(p), list):
if pilot_tsv_json.get(p):
count0 = 0
for q in pilot_tsv_json.get(p):
if isinstance(q, list):
if q:
line += '|'.join(q)
else:
line += ''
else:
line += str(q) if q else ''
count0 = count0 + 1
if count0 < len(pilot_tsv_json.get(p)):
line += ','
else:
line += '' # None as empty string
else:
line += str(pilot_tsv_json.get(p)) if pilot_tsv_json.get(p) is not None else ''
line += '\t' # field ends
line = line[:-1] # remove the last unwanted '\t'
pilot_tsv_fh.write(line + '\n')
'''
#pilot_tsv_fh.close()
donor_fh.close()
return 0
if __name__ == "__main__":
sys.exit(main())
| gpl-2.0 |
jeffmarcom/checkbox | checkbox/lib/conversion.py | 2 | 5369 | #
# This file is part of Checkbox.
#
# Copyright 2008 Canonical Ltd.
#
# Checkbox is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Checkbox is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Checkbox. If not, see <http://www.gnu.org/licenses/>.
#
import re
from datetime import (
datetime,
timedelta,
)
from checkbox.lib.tz import tzutc
DATETIME_RE = re.compile(r"""
^(?P<year>\d\d\d\d)-?(?P<month>\d\d)-?(?P<day>\d\d)
T(?P<hour>\d\d):?(?P<minute>\d\d):?(?P<second>\d\d)
(?:\.(?P<second_fraction>\d{0,6}))?
(?P<tz>
(?:(?P<tz_sign>[-+])(?P<tz_hour>\d\d):(?P<tz_minute>\d\d))
| Z)?$
""", re.VERBOSE)
TYPE_FORMATS = (
(r"(yes|true)", lambda v: True),
(r"(no|false)", lambda v: False),
(r"-?\d+", lambda v: int(v.group(0))),
(r"-?\d+\.\d+", lambda v: float(v.group(0))),
(r"(-?\d+) ?([kmgt]?b?)", lambda v: int(v.group(1))),
(r"(-?\d+\.\d+) ?([kmgt]?b?)", lambda v: float(v.group(1))),
(r"(-?\d+) ?([kmgt]?hz)", lambda v: int(v.group(1))),
(r"(-?\d+\.\d+) ?([kmgt]?hz)", lambda v: float(v.group(1))))
TYPE_FORMATS = tuple(
(re.compile(r"^%s$" % pattern, re.IGNORECASE), format)
for pattern, format in TYPE_FORMATS)
TYPE_MULTIPLIERS = (
(r"b", 1),
(r"kb?", 1024),
(r"mb?", 1024 * 1024),
(r"gb?", 1024 * 1024 * 1024),
(r"tb?", 1024 * 1024 * 1024 * 1024),
(r"hz", 1),
(r"khz?", 1024),
(r"mhz?", 1024 * 1024),
(r"ghz?", 1024 * 1024 * 1024),
(r"thz?", 1024 * 1024 * 1024 * 1024))
TYPE_MULTIPLIERS = tuple(
(re.compile(r"^%s$" % pattern, re.IGNORECASE), multiplier)
for pattern, multiplier in TYPE_MULTIPLIERS)
def datetime_to_string(dt):
"""Return a consistent string representation for a given datetime.
:param dt: The datetime object.
"""
return dt.isoformat()
def string_to_datetime(string):
"""Return a datetime object from a consistent string representation.
:param string: The string representation.
"""
# we cannot use time.strptime: this function accepts neither fractions
# of a second nor a time zone given e.g. as '+02:30'.
match = DATETIME_RE.match(string)
# The Relax NG schema allows a leading minus sign and year numbers
# with more than four digits, which are not "covered" by _time_regex.
if not match:
raise ValueError("Datetime with unreasonable value: %s" % string)
time_parts = match.groupdict()
year = int(time_parts['year'])
month = int(time_parts['month'])
day = int(time_parts['day'])
hour = int(time_parts['hour'])
minute = int(time_parts['minute'])
second = int(time_parts['second'])
second_fraction = time_parts['second_fraction']
if second_fraction is not None:
milliseconds = second_fraction + '0' * (6 - len(second_fraction))
milliseconds = int(milliseconds)
else:
milliseconds = 0
# The Relax NG validator accepts leap seconds, but the datetime
# constructor rejects them. The time values submitted by the HWDB
# client are not necessarily very precise, hence we can round down
# to 59.999999 seconds without losing any real precision.
if second > 59:
second = 59
milliseconds = 999999
dt = datetime(
year, month, day, hour, minute, second, milliseconds, tzinfo=tzutc)
tz_sign = time_parts['tz_sign']
tz_hour = time_parts['tz_hour']
tz_minute = time_parts['tz_minute']
if tz_sign in ('-', '+'):
delta = timedelta(hours=int(tz_hour), minutes=int(tz_minute))
if tz_sign == '-':
dt = dt + delta
else:
dt = dt - delta
return dt
def sizeof_bytes(bytes):
for x in ["bytes", "KB", "MB", "GB", "TB"]:
string = "%3.1f%s" % (bytes, x)
if bytes < 1024.0:
break
bytes /= 1024.0
return string
def sizeof_hertz(hertz):
for x in ["Hz", "KHz", "MHz", "GHz"]:
string = "%3.1f%s" % (hertz, x)
if hertz < 1000.0:
break
hertz /= 1000.0
return string
def string_to_type(string):
"""Return a typed representation for the given string.
The result might be a bool, int or float. The string might also be
supplemented by a multiplier like KB which would return an int or
float multiplied by 1024 for example.
:param string: The string representation.
"""
for regex, formatter in TYPE_FORMATS:
match = regex.match(string)
if match:
string = formatter(match)
if len(match.groups()) > 1:
unit = match.group(2)
for regex, multiplier in TYPE_MULTIPLIERS:
match = regex.match(unit)
if match:
string *= multiplier
break
else:
raise ValueError("Unknown multiplier: %s" % unit)
break
return string
| gpl-3.0 |
RaoUmer/django | django/core/files/storage.py | 97 | 10905 | import os
import errno
try:
from urllib.parse import urljoin
except ImportError: # Python 2
from urlparse import urljoin
import itertools
from datetime import datetime
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured, SuspiciousOperation
from django.core.files import locks, File
from django.core.files.move import file_move_safe
from django.utils.encoding import force_text, filepath_to_uri
from django.utils.functional import LazyObject
from django.utils.importlib import import_module
from django.utils.text import get_valid_filename
from django.utils._os import safe_join, abspathu
__all__ = ('Storage', 'FileSystemStorage', 'DefaultStorage', 'default_storage')
class Storage(object):
"""
A base storage class, providing some default behaviors that all other
storage systems can inherit or override, as necessary.
"""
# The following methods represent a public interface to private methods.
# These shouldn't be overridden by subclasses unless absolutely necessary.
def open(self, name, mode='rb'):
"""
Retrieves the specified file from storage.
"""
return self._open(name, mode)
def save(self, name, content):
"""
Saves new content to the file specified by name. The content should be a
proper File object, ready to be read from the beginning.
"""
# Get the proper name for the file, as it will actually be saved.
if name is None:
name = content.name
name = self.get_available_name(name)
name = self._save(name, content)
# Store filenames with forward slashes, even on Windows
return force_text(name.replace('\\', '/'))
# These methods are part of the public API, with default implementations.
def get_valid_name(self, name):
"""
Returns a filename, based on the provided filename, that's suitable for
use in the target storage system.
"""
return get_valid_filename(name)
def get_available_name(self, name):
"""
Returns a filename that's free on the target storage system, and
available for new content to be written to.
"""
dir_name, file_name = os.path.split(name)
file_root, file_ext = os.path.splitext(file_name)
# If the filename already exists, add an underscore and a number (before
# the file extension, if one exists) to the filename until the generated
# filename doesn't exist.
count = itertools.count(1)
while self.exists(name):
# file_ext includes the dot.
name = os.path.join(dir_name, "%s_%s%s" % (file_root, next(count), file_ext))
return name
def path(self, name):
"""
Returns a local filesystem path where the file can be retrieved using
Python's built-in open() function. Storage systems that can't be
accessed using open() should *not* implement this method.
"""
raise NotImplementedError("This backend doesn't support absolute paths.")
# The following methods form the public API for storage systems, but with
# no default implementations. Subclasses must implement *all* of these.
def delete(self, name):
"""
Deletes the specified file from the storage system.
"""
raise NotImplementedError()
def exists(self, name):
"""
Returns True if a file referened by the given name already exists in the
storage system, or False if the name is available for a new file.
"""
raise NotImplementedError()
def listdir(self, path):
"""
Lists the contents of the specified path, returning a 2-tuple of lists;
the first item being directories, the second item being files.
"""
raise NotImplementedError()
def size(self, name):
"""
Returns the total size, in bytes, of the file specified by name.
"""
raise NotImplementedError()
def url(self, name):
"""
Returns an absolute URL where the file's contents can be accessed
directly by a Web browser.
"""
raise NotImplementedError()
def accessed_time(self, name):
"""
Returns the last accessed time (as datetime object) of the file
specified by name.
"""
raise NotImplementedError()
def created_time(self, name):
"""
Returns the creation time (as datetime object) of the file
specified by name.
"""
raise NotImplementedError()
def modified_time(self, name):
"""
Returns the last modified time (as datetime object) of the file
specified by name.
"""
raise NotImplementedError()
class FileSystemStorage(Storage):
"""
Standard filesystem storage
"""
def __init__(self, location=None, base_url=None):
if location is None:
location = settings.MEDIA_ROOT
self.base_location = location
self.location = abspathu(self.base_location)
if base_url is None:
base_url = settings.MEDIA_URL
self.base_url = base_url
def _open(self, name, mode='rb'):
return File(open(self.path(name), mode))
def _save(self, name, content):
full_path = self.path(name)
# Create any intermediate directories that do not exist.
# Note that there is a race between os.path.exists and os.makedirs:
# if os.makedirs fails with EEXIST, the directory was created
# concurrently, and we can continue normally. Refs #16082.
directory = os.path.dirname(full_path)
if not os.path.exists(directory):
try:
os.makedirs(directory)
except OSError as e:
if e.errno != errno.EEXIST:
raise
if not os.path.isdir(directory):
raise IOError("%s exists and is not a directory." % directory)
# There's a potential race condition between get_available_name and
# saving the file; it's possible that two threads might return the
# same name, at which point all sorts of fun happens. So we need to
# try to create the file, but if it already exists we have to go back
# to get_available_name() and try again.
while True:
try:
# This file has a file path that we can move.
if hasattr(content, 'temporary_file_path'):
file_move_safe(content.temporary_file_path(), full_path)
content.close()
# This is a normal uploadedfile that we can stream.
else:
# This fun binary flag incantation makes os.open throw an
# OSError if the file already exists before we open it.
flags = (os.O_WRONLY | os.O_CREAT | os.O_EXCL |
getattr(os, 'O_BINARY', 0))
# The current umask value is masked out by os.open!
fd = os.open(full_path, flags, 0o666)
try:
locks.lock(fd, locks.LOCK_EX)
_file = None
for chunk in content.chunks():
if _file is None:
mode = 'wb' if isinstance(chunk, bytes) else 'wt'
_file = os.fdopen(fd, mode)
_file.write(chunk)
finally:
locks.unlock(fd)
if _file is not None:
_file.close()
else:
os.close(fd)
except OSError as e:
if e.errno == errno.EEXIST:
# Ooops, the file exists. We need a new file name.
name = self.get_available_name(name)
full_path = self.path(name)
else:
raise
else:
# OK, the file save worked. Break out of the loop.
break
if settings.FILE_UPLOAD_PERMISSIONS is not None:
os.chmod(full_path, settings.FILE_UPLOAD_PERMISSIONS)
return name
def delete(self, name):
name = self.path(name)
# If the file exists, delete it from the filesystem.
# Note that there is a race between os.path.exists and os.remove:
# if os.remove fails with ENOENT, the file was removed
# concurrently, and we can continue normally.
if os.path.exists(name):
try:
os.remove(name)
except OSError as e:
if e.errno != errno.ENOENT:
raise
def exists(self, name):
return os.path.exists(self.path(name))
def listdir(self, path):
path = self.path(path)
directories, files = [], []
for entry in os.listdir(path):
if os.path.isdir(os.path.join(path, entry)):
directories.append(entry)
else:
files.append(entry)
return directories, files
def path(self, name):
try:
path = safe_join(self.location, name)
except ValueError:
raise SuspiciousOperation("Attempted access to '%s' denied." % name)
return os.path.normpath(path)
def size(self, name):
return os.path.getsize(self.path(name))
def url(self, name):
if self.base_url is None:
raise ValueError("This file is not accessible via a URL.")
return urljoin(self.base_url, filepath_to_uri(name))
def accessed_time(self, name):
return datetime.fromtimestamp(os.path.getatime(self.path(name)))
def created_time(self, name):
return datetime.fromtimestamp(os.path.getctime(self.path(name)))
def modified_time(self, name):
return datetime.fromtimestamp(os.path.getmtime(self.path(name)))
def get_storage_class(import_path=None):
if import_path is None:
import_path = settings.DEFAULT_FILE_STORAGE
try:
dot = import_path.rindex('.')
except ValueError:
raise ImproperlyConfigured("%s isn't a storage module." % import_path)
module, classname = import_path[:dot], import_path[dot+1:]
try:
mod = import_module(module)
except ImportError as e:
raise ImproperlyConfigured('Error importing storage module %s: "%s"' % (module, e))
try:
return getattr(mod, classname)
except AttributeError:
raise ImproperlyConfigured('Storage module "%s" does not define a "%s" class.' % (module, classname))
class DefaultStorage(LazyObject):
def _setup(self):
self._wrapped = get_storage_class()()
default_storage = DefaultStorage()
| bsd-3-clause |
igor-rangel7l/igorrangel.repository | plugin.video.SportsDevil/lib/utils/xbmcUtils.py | 27 | 2568 | # -*- coding: utf-8 -*-
import xbmcgui, xbmc, xbmcplugin
enable_debug = False
#######################################
# Xbmc Helpers
#######################################
def select(title, menuItems):
select = xbmcgui.Dialog().select(title, menuItems)
if select == -1:
return None
else:
return menuItems[select]
def getKeyboard(default = '', heading = '', hidden = False):
kboard = xbmc.Keyboard(default, heading, hidden)
kboard.doModal()
if kboard.isConfirmed():
return kboard.getText()
return ''
def getImage(title):
dialog = xbmcgui.Dialog()
image = dialog.browse(1, title, 'pictures', '.jpg|.png', True)
return image
def showMessage(msg):
xbmc.executebuiltin('Notification(SportsDevil,' + str(msg.encode('utf-8', 'ignore')) + ')')
def showBusyAnimation():
xbmc.executebuiltin( 'ActivateWindow(busydialog)' )
def hideBusyAnimation():
xbmc.executebuiltin( 'Dialog.Close(busydialog,true)' )
def closeAllDialogs():
xbmc.executebuiltin('Dialog.Close(all, true)')
def log(msg):
if enable_debug:
try:
xbmc.log(msg)
except:
xbmc.log(msg.encode('utf-8'))
def setSortMethodsForCurrentXBMCList(handle, sortKeys):
def addSortMethod(method):
xbmcplugin.addSortMethod(handle = handle, sortMethod = method)
if not sortKeys or sortKeys==[]:
addSortMethod(xbmcplugin.SORT_METHOD_UNSORTED)
else:
if 'name' in sortKeys:
addSortMethod(xbmcplugin.SORT_METHOD_LABEL)
if 'size' in sortKeys:
addSortMethod(xbmcplugin.SORT_METHOD_SIZE)
if 'duration' in sortKeys:
addSortMethod(xbmcplugin.SORT_METHOD_DURATION)
if 'genre' in sortKeys:
addSortMethod(xbmcplugin.SORT_METHOD_GENRE)
if 'rating' in sortKeys:
addSortMethod(xbmcplugin.SORT_METHOD_VIDEO_RATING)
if 'date' in sortKeys:
addSortMethod(xbmcplugin.SORT_METHOD_DATE)
if 'file' in sortKeys:
addSortMethod(xbmcplugin.SORT_METHOD_FILE)
def getContainerFolderPath():
return xbmc.getInfoLabel('Container.FolderPath')
def getListItemPath():
return xbmc.getInfoLabel('ListItem.Path')
def getCurrentWindow():
return xbmc.getInfoLabel('System.CurrentWindow')
def getCurrentControl():
return xbmc.getInfoLabel('System.CurrentControl')
def getCurrentWindowXmlFile():
return xbmc.getInfoLabel('Window.Property(xmlfile)') | gpl-2.0 |
bplancher/odoo | addons/sale_stock/tests/test_sale_stock.py | 18 | 12679 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from openerp.addons.sale.tests.test_sale_common import TestSale
from openerp.exceptions import UserError
class TestSaleStock(TestSale):
def test_00_sale_stock_invoice(self):
"""
Test SO's changes when playing around with stock moves, quants, pack operations, pickings
and whatever other model there is in stock with "invoice on delivery" products
"""
inv_obj = self.env['account.invoice']
self.so = self.env['sale.order'].create({
'partner_id': self.partner.id,
'partner_invoice_id': self.partner.id,
'partner_shipping_id': self.partner.id,
'order_line': [(0, 0, {'name': p.name, 'product_id': p.id, 'product_uom_qty': 2, 'product_uom': p.uom_id.id, 'price_unit': p.list_price}) for (_, p) in self.products.iteritems()],
'pricelist_id': self.env.ref('product.list0').id,
'picking_policy': 'direct',
})
# confirm our standard so, check the picking
self.so.action_confirm()
self.assertTrue(self.so.picking_ids, 'Sale Stock: no picking created for "invoice on delivery" stockable products')
# invoice on order
self.so.action_invoice_create()
# deliver partially, check the so's invoice_status and delivered quantities
self.assertEqual(self.so.invoice_status, 'no', 'Sale Stock: so invoice_status should be "nothing to invoice" after invoicing')
pick = self.so.picking_ids
pick.force_assign()
pick.pack_operation_product_ids.write({'qty_done': 1})
wiz_act = pick.do_new_transfer()
wiz = self.env[wiz_act['res_model']].browse(wiz_act['res_id'])
wiz.process()
self.assertEqual(self.so.invoice_status, 'to invoice', 'Sale Stock: so invoice_status should be "to invoice" after partial delivery')
del_qties = [sol.qty_delivered for sol in self.so.order_line]
del_qties_truth = [1.0 if sol.product_id.type in ['product', 'consu'] else 0.0 for sol in self.so.order_line]
self.assertEqual(del_qties, del_qties_truth, 'Sale Stock: delivered quantities are wrong after partial delivery')
# invoice on delivery: only stockable products
inv_id = self.so.action_invoice_create()
inv_1 = inv_obj.browse(inv_id)
self.assertTrue(all([il.product_id.invoice_policy == 'delivery' for il in inv_1.invoice_line_ids]),
'Sale Stock: invoice should only contain "invoice on delivery" products')
# complete the delivery and check invoice_status again
self.assertEqual(self.so.invoice_status, 'no',
'Sale Stock: so invoice_status should be "nothing to invoice" after partial delivery and invoicing')
self.assertEqual(len(self.so.picking_ids), 2, 'Sale Stock: number of pickings should be 2')
pick_2 = self.so.picking_ids[0]
pick_2.force_assign()
pick_2.pack_operation_product_ids.write({'qty_done': 1})
self.assertIsNone(pick_2.do_new_transfer(), 'Sale Stock: second picking should be final without need for a backorder')
self.assertEqual(self.so.invoice_status, 'to invoice', 'Sale Stock: so invoice_status should be "to invoice" after complete delivery')
del_qties = [sol.qty_delivered for sol in self.so.order_line]
del_qties_truth = [2.0 if sol.product_id.type in ['product', 'consu'] else 0.0 for sol in self.so.order_line]
self.assertEqual(del_qties, del_qties_truth, 'Sale Stock: delivered quantities are wrong after complete delivery')
# invoice on delivery
inv_id = self.so.action_invoice_create()
self.assertEqual(self.so.invoice_status, 'invoiced',
'Sale Stock: so invoice_status should be "fully invoiced" after complete delivery and invoicing')
def test_01_sale_stock_order(self):
"""
Test SO's changes when playing around with stock moves, quants, pack operations, pickings
and whatever other model there is in stock with "invoice on order" products
"""
# let's cheat and put all our products to "invoice on order"
self.so = self.env['sale.order'].create({
'partner_id': self.partner.id,
'partner_invoice_id': self.partner.id,
'partner_shipping_id': self.partner.id,
'order_line': [(0, 0, {'name': p.name, 'product_id': p.id, 'product_uom_qty': 2, 'product_uom': p.uom_id.id, 'price_unit': p.list_price}) for (_, p) in self.products.iteritems()],
'pricelist_id': self.env.ref('product.list0').id,
'picking_policy': 'direct',
})
for sol in self.so.order_line:
sol.product_id.invoice_policy = 'order'
# confirm our standard so, check the picking
self.so.action_confirm()
self.assertTrue(self.so.picking_ids, 'Sale Stock: no picking created for "invoice on order" stockable products')
# let's do an invoice for a deposit of 5%
adv_wiz = self.env['sale.advance.payment.inv'].with_context(active_ids=[self.so.id]).create({
'advance_payment_method': 'percentage',
'amount': 5.0,
'product_id': self.env.ref('sale.advance_product_0').id,
})
act = adv_wiz.with_context(open_invoices=True).create_invoices()
inv = self.env['account.invoice'].browse(act['res_id'])
self.assertEqual(inv.amount_untaxed, self.so.amount_untaxed * 5.0 / 100.0, 'Sale Stock: deposit invoice is wrong')
self.assertEqual(self.so.invoice_status, 'to invoice', 'Sale Stock: so should be to invoice after invoicing deposit')
# invoice on order: everything should be invoiced
self.so.action_invoice_create(final=True)
self.assertEqual(self.so.invoice_status, 'invoiced', 'Sale Stock: so should be fully invoiced after second invoice')
# deliver, check the delivered quantities
pick = self.so.picking_ids
pick.force_assign()
pick.pack_operation_product_ids.write({'qty_done': 2})
self.assertIsNone(pick.do_new_transfer(), 'Sale Stock: complete delivery should not need a backorder')
del_qties = [sol.qty_delivered for sol in self.so.order_line]
del_qties_truth = [2.0 if sol.product_id.type in ['product', 'consu'] else 0.0 for sol in self.so.order_line]
self.assertEqual(del_qties, del_qties_truth, 'Sale Stock: delivered quantities are wrong after partial delivery')
# invoice on delivery: nothing to invoice
with self.assertRaises(UserError):
self.so.action_invoice_create()
def test_02_sale_stock_return(self):
"""
Test a SO with a product invoiced on delivery. Deliver and invoice the SO, then do a return
of the picking. Check that a refund invoice is well generated.
"""
# intial so
self.partner = self.env.ref('base.res_partner_1')
self.product = self.env.ref('product.product_product_47')
so_vals = {
'partner_id': self.partner.id,
'partner_invoice_id': self.partner.id,
'partner_shipping_id': self.partner.id,
'order_line': [(0, 0, {
'name': self.product.name,
'product_id': self.product.id,
'product_uom_qty': 5.0,
'product_uom': self.product.uom_id.id,
'price_unit': self.product.list_price})],
'pricelist_id': self.env.ref('product.list0').id,
}
self.so = self.env['sale.order'].create(so_vals)
# confirm our standard so, check the picking
self.so.action_confirm()
self.assertTrue(self.so.picking_ids, 'Sale Stock: no picking created for "invoice on delivery" stockable products')
# invoice in on delivery, nothing should be invoiced
self.assertEqual(self.so.invoice_status, 'no', 'Sale Stock: so invoice_status should be "nothing to invoice"')
# deliver completely
pick = self.so.picking_ids
pick.force_assign()
pick.pack_operation_product_ids.write({'qty_done': 5})
pick.do_new_transfer()
# Check quantity delivered
del_qty = sum(sol.qty_delivered for sol in self.so.order_line)
self.assertEqual(del_qty, 5.0, 'Sale Stock: delivered quantity should be 5.0 after complete delivery')
# Check invoice
self.assertEqual(self.so.invoice_status, 'to invoice', 'Sale Stock: so invoice_status should be "to invoice" before invoicing')
inv_1_id = self.so.action_invoice_create()
self.assertEqual(self.so.invoice_status, 'invoiced', 'Sale Stock: so invoice_status should be "invoiced" after invoicing')
self.assertEqual(len(inv_1_id), 1, 'Sale Stock: only one invoice should be created')
self.inv_1 = self.env['account.invoice'].browse(inv_1_id)
self.assertEqual(self.inv_1.amount_untaxed, self.inv_1.amount_untaxed, 'Sale Stock: amount in SO and invoice should be the same')
# Create return picking
StockReturnPicking = self.env['stock.return.picking']
default_data = StockReturnPicking.with_context(active_ids=pick.ids, active_id=pick.ids[0]).default_get(['move_dest_exists', 'original_location_id', 'product_return_moves', 'parent_location_id', 'location_id'])
return_wiz = StockReturnPicking.with_context(active_ids=pick.ids, active_id=pick.ids[0]).create(default_data)
res = return_wiz.create_returns()
return_pick = self.env['stock.picking'].browse(res['res_id'])
# Validate picking
return_pick.force_assign()
return_pick.pack_operation_product_ids.write({'qty_done': 5})
return_pick.do_new_transfer()
# Check invoice
self.assertEqual(self.so.invoice_status, 'invoiced', 'Sale Stock: so invoice_status should be "invoiced" after picking return')
def test_03_sale_stock_delivery_partial(self):
"""
Test a SO with a product invoiced on delivery. Deliver partially and invoice the SO, when
the SO is set on 'done', the SO should be fully invoiced.
"""
# intial so
self.partner = self.env.ref('base.res_partner_1')
self.product = self.env.ref('product.product_product_47')
so_vals = {
'partner_id': self.partner.id,
'partner_invoice_id': self.partner.id,
'partner_shipping_id': self.partner.id,
'order_line': [(0, 0, {
'name': self.product.name,
'product_id': self.product.id,
'product_uom_qty': 5.0,
'product_uom': self.product.uom_id.id,
'price_unit': self.product.list_price})],
'pricelist_id': self.env.ref('product.list0').id,
}
self.so = self.env['sale.order'].create(so_vals)
# confirm our standard so, check the picking
self.so.action_confirm()
self.assertTrue(self.so.picking_ids, 'Sale Stock: no picking created for "invoice on delivery" stockable products')
# invoice in on delivery, nothing should be invoiced
self.assertEqual(self.so.invoice_status, 'no', 'Sale Stock: so invoice_status should be "nothing to invoice"')
# deliver partially
pick = self.so.picking_ids
pick.force_assign()
pick.pack_operation_product_ids.write({'qty_done': 4})
backorder_wiz_id = pick.do_new_transfer()['res_id']
backorder_wiz = self.env['stock.backorder.confirmation'].browse([backorder_wiz_id])
backorder_wiz.process_cancel_backorder()
# Check quantity delivered
del_qty = sum(sol.qty_delivered for sol in self.so.order_line)
self.assertEqual(del_qty, 4.0, 'Sale Stock: delivered quantity should be 4.0 after partial delivery')
# Check invoice
self.assertEqual(self.so.invoice_status, 'to invoice', 'Sale Stock: so invoice_status should be "to invoice" before invoicing')
inv_1_id = self.so.action_invoice_create()
self.assertEqual(self.so.invoice_status, 'no', 'Sale Stock: so invoice_status should be "no" after invoicing')
self.assertEqual(len(inv_1_id), 1, 'Sale Stock: only one invoice should be created')
self.inv_1 = self.env['account.invoice'].browse(inv_1_id)
self.assertEqual(self.inv_1.amount_untaxed, self.inv_1.amount_untaxed, 'Sale Stock: amount in SO and invoice should be the same')
self.so.action_done()
self.assertEqual(self.so.invoice_status, 'invoiced', 'Sale Stock: so invoice_status should be "invoiced" when set to done')
| agpl-3.0 |
tbabej/astropy | astropy/nddata/mixins/tests/test_ndslicing.py | 2 | 5027 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# TEST_UNICODE_LITERALS
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import numpy as np
from numpy.testing import assert_array_equal
from ... import NDData, NDSlicingMixin
from ...nduncertainty import NDUncertainty, StdDevUncertainty
from ....tests.helper import pytest
from .... import units as u
# Just add the Mixin to NDData
# TODO: Make this use NDDataRef instead!
class NDDataSliceable(NDSlicingMixin, NDData):
pass
# Just some uncertainty (following the StdDevUncertainty implementation of
# storing the uncertainty in a property 'array') with slicing.
class SomeUncertainty(NDUncertainty):
@property
def uncertainty_type(self):
return 'fake'
def _propagate_add(self, data, final_data):
pass
def _propagate_subtract(self, data, final_data):
pass
def _propagate_multiply(self, data, final_data):
pass
def _propagate_divide(self, data, final_data):
pass
def test_slicing_only_data():
data = np.arange(10)
nd = NDDataSliceable(data)
nd2 = nd[2:5]
assert_array_equal(data[2:5], nd2.data)
def test_slicing_data_scalar_fail():
data = np.array(10)
nd = NDDataSliceable(data)
with pytest.raises(TypeError): # as exc
nd[:]
# assert exc.value.args[0] == 'Scalars cannot be sliced.'
def test_slicing_1ddata_ndslice():
data = np.array([10, 20])
nd = NDDataSliceable(data)
# Standard numpy warning here:
with pytest.raises(IndexError):
nd[:, :]
@pytest.mark.parametrize('prop_name', ['mask', 'wcs', 'uncertainty'])
def test_slicing_1dmask_ndslice(prop_name):
# Data is 2d but mask only 1d so this should let the IndexError when
# slicing the mask rise to the user.
data = np.ones((3, 3))
kwarg = {prop_name: np.ones(3)}
nd = NDDataSliceable(data, **kwarg)
# Standard numpy warning here:
with pytest.raises(IndexError):
nd[:, :]
def test_slicing_all_npndarray_1d():
data = np.arange(10)
mask = data > 3
uncertainty = np.linspace(10, 20, 10)
wcs = np.linspace(1, 1000, 10)
# Just to have them too
unit = u.s
meta = {'observer': 'Brian'}
nd = NDDataSliceable(data, mask=mask, uncertainty=uncertainty, wcs=wcs,
unit=unit, meta=meta)
nd2 = nd[2:5]
assert_array_equal(data[2:5], nd2.data)
assert_array_equal(mask[2:5], nd2.mask)
assert_array_equal(uncertainty[2:5], nd2.uncertainty.array)
assert_array_equal(wcs[2:5], nd2.wcs)
assert unit is nd2.unit
assert meta == nd.meta
def test_slicing_all_npndarray_nd():
# See what happens for multidimensional properties
data = np.arange(1000).reshape(10, 10, 10)
mask = data > 3
uncertainty = np.linspace(10, 20, 1000).reshape(10, 10, 10)
wcs = np.linspace(1, 1000, 1000).reshape(10, 10, 10)
nd = NDDataSliceable(data, mask=mask, uncertainty=uncertainty, wcs=wcs)
# Slice only 1D
nd2 = nd[2:5]
assert_array_equal(data[2:5], nd2.data)
assert_array_equal(mask[2:5], nd2.mask)
assert_array_equal(uncertainty[2:5], nd2.uncertainty.array)
assert_array_equal(wcs[2:5], nd2.wcs)
# Slice 3D
nd2 = nd[2:5, :, 4:7]
assert_array_equal(data[2:5, :, 4:7], nd2.data)
assert_array_equal(mask[2:5, :, 4:7], nd2.mask)
assert_array_equal(uncertainty[2:5, :, 4:7], nd2.uncertainty.array)
assert_array_equal(wcs[2:5, :, 4:7], nd2.wcs)
def test_slicing_all_npndarray_shape_diff():
data = np.arange(10)
mask = (data > 3)[0:9]
uncertainty = np.linspace(10, 20, 15)
wcs = np.linspace(1, 1000, 12)
nd = NDDataSliceable(data, mask=mask, uncertainty=uncertainty, wcs=wcs)
nd2 = nd[2:5]
assert_array_equal(data[2:5], nd2.data)
# All are sliced even if the shapes differ (no Info)
assert_array_equal(mask[2:5], nd2.mask)
assert_array_equal(uncertainty[2:5], nd2.uncertainty.array)
assert_array_equal(wcs[2:5], nd2.wcs)
def test_slicing_all_something_wrong():
data = np.arange(10)
mask = [False]*10
uncertainty = {'rdnoise': 2.9, 'gain': 1.4}
wcs = 145 * u.degree
nd = NDDataSliceable(data, mask=mask, uncertainty=uncertainty, wcs=wcs)
nd2 = nd[2:5]
# Sliced properties:
assert_array_equal(data[2:5], nd2.data)
assert_array_equal(mask[2:5], nd2.mask)
# Not sliced attributes (they will raise a Info nevertheless)
uncertainty is nd2.uncertainty
assert_array_equal(wcs, nd2.wcs)
def test_boolean_slicing():
data = np.arange(10)
mask = data.copy()
uncertainty = StdDevUncertainty(data.copy())
wcs = data.copy()
nd = NDDataSliceable(data, mask=mask, uncertainty=uncertainty, wcs=wcs)
nd2 = nd[(nd.data >= 3) & (nd.data < 8)]
assert_array_equal(data[3:8], nd2.data)
assert_array_equal(mask[3:8], nd2.mask)
assert_array_equal(wcs[3:8], nd2.wcs)
assert_array_equal(uncertainty.array[3:8], nd2.uncertainty.array)
| bsd-3-clause |
ajs124/esp-idf | tools/idf_monitor.py | 3 | 20709 | #!/usr/bin/env python
#
# esp-idf serial output monitor tool. Does some helpful things:
# - Looks up hex addresses in ELF file with addr2line
# - Reset ESP32 via serial RTS line (Ctrl-T Ctrl-R)
# - Run "make flash" (Ctrl-T Ctrl-F)
# - Run "make app-flash" (Ctrl-T Ctrl-A)
# - If gdbstub output is detected, gdb is automatically loaded
#
# Copyright 2015-2016 Espressif Systems (Shanghai) PTE LTD
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Contains elements taken from miniterm "Very simple serial terminal" which
# is part of pySerial. https://github.com/pyserial/pyserial
# (C)2002-2015 Chris Liechti <cliechti@gmx.net>
#
# Originally released under BSD-3-Clause license.
#
from __future__ import print_function, division
import subprocess
import argparse
import codecs
import re
import os
try:
import queue
except ImportError:
import Queue as queue
import time
import sys
import serial
import serial.tools.miniterm as miniterm
import threading
import ctypes
import types
from distutils.version import StrictVersion
key_description = miniterm.key_description
# Control-key characters
CTRL_A = '\x01'
CTRL_B = '\x02'
CTRL_F = '\x06'
CTRL_H = '\x08'
CTRL_R = '\x12'
CTRL_T = '\x14'
CTRL_RBRACKET = '\x1d' # Ctrl+]
# ANSI terminal codes
ANSI_RED = '\033[1;31m'
ANSI_YELLOW = '\033[0;33m'
ANSI_NORMAL = '\033[0m'
def color_print(message, color):
""" Print a message to stderr with colored highlighting """
sys.stderr.write("%s%s%s\n" % (color, message, ANSI_NORMAL))
def yellow_print(message):
color_print(message, ANSI_YELLOW)
def red_print(message):
color_print(message, ANSI_RED)
__version__ = "1.0"
# Tags for tuples in queues
TAG_KEY = 0
TAG_SERIAL = 1
# regex matches an potential PC value (0x4xxxxxxx)
MATCH_PCADDR = re.compile(r'0x4[0-9a-f]{7}', re.IGNORECASE)
DEFAULT_TOOLCHAIN_PREFIX = "xtensa-esp32-elf-"
class StoppableThread(object):
"""
Provide a Thread-like class which can be 'cancelled' via a subclass-provided
cancellation method.
Can be started and stopped multiple times.
Isn't an instance of type Thread because Python Thread objects can only be run once
"""
def __init__(self):
self._thread = None
@property
def alive(self):
"""
Is 'alive' whenever the internal thread object exists
"""
return self._thread is not None
def start(self):
if self._thread is None:
self._thread = threading.Thread(target=self._run_outer)
self._thread.start()
def _cancel(self):
pass # override to provide cancellation functionality
def run(self):
pass # override for the main thread behaviour
def _run_outer(self):
try:
self.run()
finally:
self._thread = None
def stop(self):
if self._thread is not None:
old_thread = self._thread
self._thread = None
self._cancel()
old_thread.join()
class ConsoleReader(StoppableThread):
""" Read input keys from the console and push them to the queue,
until stopped.
"""
def __init__(self, console, event_queue):
super(ConsoleReader, self).__init__()
self.console = console
self.event_queue = event_queue
def run(self):
self.console.setup()
try:
while self.alive:
try:
if os.name == 'nt':
# Windows kludge: because the console.cancel() method doesn't
# seem to work to unblock getkey() on the Windows implementation.
#
# So we only call getkey() if we know there's a key waiting for us.
import msvcrt
while not msvcrt.kbhit() and self.alive:
time.sleep(0.1)
if not self.alive:
break
c = self.console.getkey()
except KeyboardInterrupt:
c = '\x03'
if c is not None:
self.event_queue.put((TAG_KEY, c), False)
finally:
self.console.cleanup()
def _cancel(self):
if os.name == 'posix':
# this is the way cancel() is implemented in pyserial 3.3 or newer,
# older pyserial (3.1+) has cancellation implemented via 'select',
# which does not work when console sends an escape sequence response
#
# even older pyserial (<3.1) does not have this method
#
# on Windows there is a different (also hacky) fix, applied above.
#
# note that TIOCSTI is not implemented in WSL / bash-on-Windows.
# TODO: introduce some workaround to make it work there.
import fcntl, termios
fcntl.ioctl(self.console.fd, termios.TIOCSTI, b'\0')
class SerialReader(StoppableThread):
""" Read serial data from the serial port and push to the
event queue, until stopped.
"""
def __init__(self, serial, event_queue):
super(SerialReader, self).__init__()
self.baud = serial.baudrate
self.serial = serial
self.event_queue = event_queue
if not hasattr(self.serial, 'cancel_read'):
# enable timeout for checking alive flag,
# if cancel_read not available
self.serial.timeout = 0.25
def run(self):
if not self.serial.is_open:
self.serial.baudrate = self.baud
self.serial.rts = True # Force an RTS reset on open
self.serial.open()
self.serial.rts = False
try:
while self.alive:
data = self.serial.read(self.serial.in_waiting or 1)
if len(data):
self.event_queue.put((TAG_SERIAL, data), False)
finally:
self.serial.close()
def _cancel(self):
if hasattr(self.serial, 'cancel_read'):
try:
self.serial.cancel_read()
except:
pass
class Monitor(object):
"""
Monitor application main class.
This was originally derived from miniterm.Miniterm, but it turned out to be easier to write from scratch for this
purpose.
Main difference is that all event processing happens in the main thread, not the worker threads.
"""
def __init__(self, serial_instance, elf_file, make="make", toolchain_prefix=DEFAULT_TOOLCHAIN_PREFIX, eol="CRLF"):
super(Monitor, self).__init__()
self.event_queue = queue.Queue()
self.console = miniterm.Console()
if os.name == 'nt':
sys.stderr = ANSIColorConverter(sys.stderr)
self.console.output = ANSIColorConverter(self.console.output)
self.console.byte_output = ANSIColorConverter(self.console.byte_output)
if StrictVersion(serial.VERSION) < StrictVersion('3.3.0'):
# Use Console.getkey implementation from 3.3.0 (to be in sync with the ConsoleReader._cancel patch above)
def getkey_patched(self):
c = self.enc_stdin.read(1)
if c == unichr(0x7f):
c = unichr(8) # map the BS key (which yields DEL) to backspace
return c
self.console.getkey = types.MethodType(getkey_patched, self.console)
self.serial = serial_instance
self.console_reader = ConsoleReader(self.console, self.event_queue)
self.serial_reader = SerialReader(self.serial, self.event_queue)
self.elf_file = elf_file
self.make = make
self.toolchain_prefix = toolchain_prefix
self.menu_key = CTRL_T
self.exit_key = CTRL_RBRACKET
self.translate_eol = {
"CRLF": lambda c: c.replace(b"\n", b"\r\n"),
"CR": lambda c: c.replace(b"\n", b"\r"),
"LF": lambda c: c.replace(b"\r", b"\n"),
}[eol]
# internal state
self._pressed_menu_key = False
self._read_line = b""
self._gdb_buffer = b""
def main_loop(self):
self.console_reader.start()
self.serial_reader.start()
try:
while self.console_reader.alive and self.serial_reader.alive:
(event_tag, data) = self.event_queue.get()
if event_tag == TAG_KEY:
self.handle_key(data)
elif event_tag == TAG_SERIAL:
self.handle_serial_input(data)
else:
raise RuntimeError("Bad event data %r" % ((event_tag,data),))
finally:
try:
self.console_reader.stop()
self.serial_reader.stop()
except:
pass
sys.stderr.write(ANSI_NORMAL + "\n")
def handle_key(self, key):
if self._pressed_menu_key:
self.handle_menu_key(key)
self._pressed_menu_key = False
elif key == self.menu_key:
self._pressed_menu_key = True
elif key == self.exit_key:
self.console_reader.stop()
self.serial_reader.stop()
else:
try:
key = self.translate_eol(key)
self.serial.write(codecs.encode(key))
except serial.SerialException:
pass # this shouldn't happen, but sometimes port has closed in serial thread
def handle_serial_input(self, data):
# this may need to be made more efficient, as it pushes out a byte
# at a time to the console
for b in data:
self.console.write_bytes(b)
if b == b'\n': # end of line
self.handle_serial_input_line(self._read_line.strip())
self._read_line = b""
else:
self._read_line += b
self.check_gdbstub_trigger(b)
def handle_serial_input_line(self, line):
for m in re.finditer(MATCH_PCADDR, line):
self.lookup_pc_address(m.group())
def handle_menu_key(self, c):
if c == self.exit_key or c == self.menu_key: # send verbatim
self.serial.write(codecs.encode(c))
elif c in [ CTRL_H, 'h', 'H', '?' ]:
red_print(self.get_help_text())
elif c == CTRL_R: # Reset device via RTS
self.serial.setRTS(True)
time.sleep(0.2)
self.serial.setRTS(False)
elif c == CTRL_F: # Recompile & upload
self.run_make("flash")
elif c == CTRL_A: # Recompile & upload app only
self.run_make("app-flash")
else:
red_print('--- unknown menu character {} --'.format(key_description(c)))
def get_help_text(self):
return """
--- idf_monitor ({version}) - ESP-IDF monitor tool
--- based on miniterm from pySerial
---
--- {exit:8} Exit program
--- {menu:8} Menu escape key, followed by:
--- Menu keys:
--- {menu:7} Send the menu character itself to remote
--- {exit:7} Send the exit character itself to remote
--- {reset:7} Reset target board via RTS line
--- {make:7} Run 'make flash' to build & flash
--- {appmake:7} Run 'make app-flash to build & flash app
""".format(version=__version__,
exit=key_description(self.exit_key),
menu=key_description(self.menu_key),
reset=key_description(CTRL_R),
make=key_description(CTRL_F),
appmake=key_description(CTRL_A),
)
def __enter__(self):
""" Use 'with self' to temporarily disable monitoring behaviour """
self.serial_reader.stop()
self.console_reader.stop()
def __exit__(self, *args, **kwargs):
""" Use 'with self' to temporarily disable monitoring behaviour """
self.console_reader.start()
self.serial_reader.start()
def prompt_next_action(self, reason):
self.console.setup() # set up console to trap input characters
try:
red_print("""
--- {}
--- Press {} to exit monitor.
--- Press {} to run 'make flash'.
--- Press {} to run 'make app-flash'.
--- Press any other key to resume monitor (resets target).""".format(reason,
key_description(self.exit_key),
key_description(CTRL_F),
key_description(CTRL_A)))
k = CTRL_T # ignore CTRL-T here, so people can muscle-memory Ctrl-T Ctrl-F, etc.
while k == CTRL_T:
k = self.console.getkey()
finally:
self.console.cleanup()
if k == self.exit_key:
self.event_queue.put((TAG_KEY, k))
elif k in [ CTRL_F, CTRL_A ]:
self.event_queue.put((TAG_KEY, self.menu_key))
self.event_queue.put((TAG_KEY, k))
def run_make(self, target):
with self:
yellow_print("Running make %s..." % target)
p = subprocess.Popen([self.make,
target ])
try:
p.wait()
except KeyboardInterrupt:
p.wait()
if p.returncode != 0:
self.prompt_next_action("Build failed")
def lookup_pc_address(self, pc_addr):
translation = subprocess.check_output(
["%saddr2line" % self.toolchain_prefix,
"-pfia", "-e", self.elf_file, pc_addr],
cwd=".")
if not "?? ??:0" in translation:
yellow_print(translation)
def check_gdbstub_trigger(self, c):
self._gdb_buffer = self._gdb_buffer[-6:] + c # keep the last 7 characters seen
m = re.match(b"\\$(T..)#(..)", self._gdb_buffer) # look for a gdb "reason" for a break
if m is not None:
try:
chsum = sum(ord(p) for p in m.group(1)) & 0xFF
calc_chsum = int(m.group(2), 16)
except ValueError:
return # payload wasn't valid hex digits
if chsum == calc_chsum:
self.run_gdb()
else:
red_print("Malformed gdb message... calculated checksum %02x received %02x" % (chsum, calc_chsum))
def run_gdb(self):
with self: # disable console control
sys.stderr.write(ANSI_NORMAL)
try:
subprocess.call(["%sgdb" % self.toolchain_prefix,
"-ex", "set serial baud %d" % self.serial.baudrate,
"-ex", "target remote %s" % self.serial.port,
"-ex", "interrupt", # monitor has already parsed the first 'reason' command, need a second
self.elf_file], cwd=".")
except KeyboardInterrupt:
pass # happens on Windows, maybe other OSes
self.prompt_next_action("gdb exited")
def main():
parser = argparse.ArgumentParser("idf_monitor - a serial output monitor for esp-idf")
parser.add_argument(
'--port', '-p',
help='Serial port device',
default=os.environ.get('ESPTOOL_PORT', '/dev/ttyUSB0')
)
parser.add_argument(
'--baud', '-b',
help='Serial port baud rate',
type=int,
default=os.environ.get('MONITOR_BAUD', 115200))
parser.add_argument(
'--make', '-m',
help='Command to run make',
type=str, default='make')
parser.add_argument(
'--toolchain-prefix',
help="Triplet prefix to add before cross-toolchain names",
default=DEFAULT_TOOLCHAIN_PREFIX)
parser.add_argument(
"--eol",
choices=['CR', 'LF', 'CRLF'],
type=lambda c: c.upper(),
help="End of line to use when sending to the serial port",
default='CR')
parser.add_argument(
'elf_file', help='ELF file of application',
type=argparse.FileType('rb'))
args = parser.parse_args()
if args.port.startswith("/dev/tty."):
args.port = args.port.replace("/dev/tty.", "/dev/cu.")
yellow_print("--- WARNING: Serial ports accessed as /dev/tty.* will hang gdb if launched.")
yellow_print("--- Using %s instead..." % args.port)
serial_instance = serial.serial_for_url(args.port, args.baud,
do_not_open=True)
serial_instance.dtr = False
serial_instance.rts = False
args.elf_file.close() # don't need this as a file
# remove the parallel jobserver arguments from MAKEFLAGS, as any
# parent make is only running 1 job (monitor), so we can re-spawn
# all of the child makes we need (the -j argument remains part of
# MAKEFLAGS)
try:
makeflags = os.environ["MAKEFLAGS"]
makeflags = re.sub(r"--jobserver[^ =]*=[0-9,]+ ?", "", makeflags)
os.environ["MAKEFLAGS"] = makeflags
except KeyError:
pass # not running a make jobserver
monitor = Monitor(serial_instance, args.elf_file.name, args.make, args.toolchain_prefix, args.eol)
yellow_print('--- idf_monitor on {p.name} {p.baudrate} ---'.format(
p=serial_instance))
yellow_print('--- Quit: {} | Menu: {} | Help: {} followed by {} ---'.format(
key_description(monitor.exit_key),
key_description(monitor.menu_key),
key_description(monitor.menu_key),
key_description(CTRL_H)))
monitor.main_loop()
if os.name == 'nt':
# Windows console stuff
STD_OUTPUT_HANDLE = -11
STD_ERROR_HANDLE = -12
# wincon.h values
FOREGROUND_INTENSITY = 8
FOREGROUND_GREY = 7
# matches the ANSI color change sequences that IDF sends
RE_ANSI_COLOR = re.compile(b'\033\\[([01]);3([0-7])m')
# list mapping the 8 ANSI colors (the indexes) to Windows Console colors
ANSI_TO_WINDOWS_COLOR = [ 0, 4, 2, 6, 1, 5, 3, 7 ]
GetStdHandle = ctypes.windll.kernel32.GetStdHandle
SetConsoleTextAttribute = ctypes.windll.kernel32.SetConsoleTextAttribute
class ANSIColorConverter(object):
"""Class to wrap a file-like output stream, intercept ANSI color codes,
and convert them into calls to Windows SetConsoleTextAttribute.
Doesn't support all ANSI terminal code escape sequences, only the sequences IDF uses.
Ironically, in Windows this console output is normally wrapped by winpty which will then detect the console text
color changes and convert these back to ANSI color codes for MSYS' terminal to display. However this is the
least-bad working solution, as winpty doesn't support any "passthrough" mode for raw output.
"""
def __init__(self, output):
self.output = output
self.handle = GetStdHandle(STD_ERROR_HANDLE if self.output == sys.stderr else STD_OUTPUT_HANDLE)
self.matched = b''
def write(self, data):
for b in data:
l = len(self.matched)
if b == '\033': # ESC
self.matched = b
elif (l == 1 and b == '[') or (1 < l < 7):
self.matched += b
if self.matched == ANSI_NORMAL: # reset console
SetConsoleTextAttribute(self.handle, FOREGROUND_GREY)
self.matched = b''
elif len(self.matched) == 7: # could be an ANSI sequence
m = re.match(RE_ANSI_COLOR, self.matched)
if m is not None:
color = ANSI_TO_WINDOWS_COLOR[int(m.group(2))]
if m.group(1) == b'1':
color |= FOREGROUND_INTENSITY
SetConsoleTextAttribute(self.handle, color)
else:
self.output.write(self.matched) # not an ANSI color code, display verbatim
self.matched = b''
else:
self.output.write(b)
self.matched = b''
def flush(self):
self.output.flush()
if __name__ == "__main__":
main()
| apache-2.0 |
zicklag/godot | doc/tools/makerst.py | 4 | 16059 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import codecs
import sys
import os
import xml.etree.ElementTree as ET
input_list = []
for arg in sys.argv[1:]:
if arg.endswith(os.sep):
arg = arg[:-1]
input_list.append(arg)
if len(input_list) < 1:
print('usage: makerst.py <path to folders> and/or <path to .xml files> (order of arguments irrelevant)')
print('example: makerst.py "../../modules/" "../classes" path_to/some_class.xml')
sys.exit(0)
def validate_tag(elem, tag):
if elem.tag != tag:
print("Tag mismatch, expected '" + tag + "', got " + elem.tag)
sys.exit(255)
class_names = []
classes = {}
def ul_string(str, ul):
str += "\n"
for i in range(len(str) - 1):
str += ul
str += "\n"
return str
def make_class_list(class_list, columns):
f = codecs.open('class_list.rst', 'wb', 'utf-8')
prev = 0
col_max = len(class_list) / columns + 1
print(('col max is ', col_max))
col_count = 0
row_count = 0
last_initial = ''
fit_columns = []
for n in range(0, columns):
fit_columns += [[]]
indexers = []
last_initial = ''
idx = 0
for n in class_list:
col = idx / col_max
if col >= columns:
col = columns - 1
fit_columns[col] += [n]
idx += 1
if n[:1] != last_initial:
indexers += [n]
last_initial = n[:1]
row_max = 0
f.write("\n")
for n in range(0, columns):
if len(fit_columns[n]) > row_max:
row_max = len(fit_columns[n])
f.write("| ")
for n in range(0, columns):
f.write(" | |")
f.write("\n")
f.write("+")
for n in range(0, columns):
f.write("--+-------+")
f.write("\n")
for r in range(0, row_max):
s = '+ '
for c in range(0, columns):
if r >= len(fit_columns[c]):
continue
classname = fit_columns[c][r]
initial = classname[0]
if classname in indexers:
s += '**' + initial + '** | '
else:
s += ' | '
s += '[' + classname + '](class_' + classname.lower() + ') | '
s += '\n'
f.write(s)
for n in range(0, columns):
f.write("--+-------+")
f.write("\n")
def rstize_text(text, cclass):
# Linebreak + tabs in the XML should become two line breaks unless in a "codeblock"
pos = 0
while True:
pos = text.find('\n', pos)
if pos == -1:
break
pre_text = text[:pos]
while text[pos + 1] == '\t':
pos += 1
post_text = text[pos + 1:]
# Handle codeblocks
if post_text.startswith("[codeblock]"):
end_pos = post_text.find("[/codeblock]")
if end_pos == -1:
sys.exit("ERROR! [codeblock] without a closing tag!")
code_text = post_text[len("[codeblock]"):end_pos]
post_text = post_text[end_pos:]
# Remove extraneous tabs
code_pos = 0
while True:
code_pos = code_text.find('\n', code_pos)
if code_pos == -1:
break
to_skip = 0
while code_pos + to_skip + 1 < len(code_text) and code_text[code_pos + to_skip + 1] == '\t':
to_skip += 1
if len(code_text[code_pos + to_skip + 1:]) == 0:
code_text = code_text[:code_pos] + "\n"
code_pos += 1
else:
code_text = code_text[:code_pos] + "\n " + code_text[code_pos + to_skip + 1:]
code_pos += 5 - to_skip
text = pre_text + "\n[codeblock]" + code_text + post_text
pos += len("\n[codeblock]" + code_text)
# Handle normal text
else:
text = pre_text + "\n\n" + post_text
pos += 2
# Escape * character to avoid interpreting it as emphasis
pos = 0
while True:
pos = text.find('*', pos)
if pos == -1:
break
text = text[:pos] + "\*" + text[pos + 1:]
pos += 2
# Escape _ character at the end of a word to avoid interpreting it as an inline hyperlink
pos = 0
while True:
pos = text.find('_', pos)
if pos == -1:
break
if not text[pos + 1].isalnum(): # don't escape within a snake_case word
text = text[:pos] + "\_" + text[pos + 1:]
pos += 2
else:
pos += 1
# Handle [tags]
inside_code = False
pos = 0
while True:
pos = text.find('[', pos)
if pos == -1:
break
endq_pos = text.find(']', pos + 1)
if endq_pos == -1:
break
pre_text = text[:pos]
post_text = text[endq_pos + 1:]
tag_text = text[pos + 1:endq_pos]
escape_post = False
if tag_text in class_names:
tag_text = make_type(tag_text)
escape_post = True
else: # command
cmd = tag_text
space_pos = tag_text.find(' ')
if cmd == '/codeblock':
tag_text = ''
inside_code = False
# Strip newline if the tag was alone on one
if pre_text[-1] == '\n':
pre_text = pre_text[:-1]
elif cmd == '/code':
tag_text = '``'
inside_code = False
elif inside_code:
tag_text = '[' + tag_text + ']'
elif cmd.find('html') == 0:
cmd = tag_text[:space_pos]
param = tag_text[space_pos + 1:]
tag_text = param
elif cmd.find('method') == 0 or cmd.find('member') == 0 or cmd.find('signal') == 0:
cmd = tag_text[:space_pos]
param = tag_text[space_pos + 1:]
if param.find('.') != -1:
(class_param, method_param) = param.split('.')
tag_text = ':ref:`' + class_param + '.' + method_param + '<class_' + class_param + '_' + method_param + '>`'
else:
tag_text = ':ref:`' + param + '<class_' + cclass + "_" + param + '>`'
escape_post = True
elif cmd.find('image=') == 0:
tag_text = "" # '![](' + cmd[6:] + ')'
elif cmd.find('url=') == 0:
tag_text = ':ref:`' + cmd[4:] + '<' + cmd[4:] + ">`"
elif cmd == '/url':
tag_text = ''
escape_post = True
elif cmd == 'center':
tag_text = ''
elif cmd == '/center':
tag_text = ''
elif cmd == 'codeblock':
tag_text = '\n::\n'
inside_code = True
elif cmd == 'br':
# Make a new paragraph instead of a linebreak, rst is not so linebreak friendly
tag_text = '\n\n'
# Strip potential leading spaces
while post_text[0] == ' ':
post_text = post_text[1:]
elif cmd == 'i' or cmd == '/i':
tag_text = '*'
elif cmd == 'b' or cmd == '/b':
tag_text = '**'
elif cmd == 'u' or cmd == '/u':
tag_text = ''
elif cmd == 'code':
tag_text = '``'
inside_code = True
else:
tag_text = make_type(tag_text)
escape_post = True
# Properly escape things like `[Node]s`
if escape_post and post_text and post_text[0].isalnum(): # not punctuation, escape
post_text = '\ ' + post_text
text = pre_text + tag_text + post_text
pos = len(pre_text) + len(tag_text)
return text
def make_type(t):
global class_names
if t in class_names:
return ':ref:`' + t + '<class_' + t.lower() + '>`'
return t
def make_method(
f,
name,
m,
declare,
cname,
event=False,
pp=None
):
if (declare or pp == None):
t = '- '
else:
t = ""
ret_type = 'void'
args = list(m)
mdata = {}
mdata['argidx'] = []
for a in args:
if a.tag == 'return':
idx = -1
elif a.tag == 'argument':
idx = int(a.attrib['index'])
else:
continue
mdata['argidx'].append(idx)
mdata[idx] = a
if not event:
if -1 in mdata['argidx']:
t += make_type(mdata[-1].attrib['type'])
else:
t += 'void'
t += ' '
if declare or pp == None:
s = '**' + m.attrib['name'] + '** '
else:
s = ':ref:`' + m.attrib['name'] + '<class_' + cname + "_" + m.attrib['name'] + '>` '
s += '**(**'
argfound = False
for a in mdata['argidx']:
arg = mdata[a]
if a < 0:
continue
if a > 0:
s += ', '
else:
s += ' '
s += make_type(arg.attrib['type'])
if 'name' in arg.attrib:
s += ' ' + arg.attrib['name']
else:
s += ' arg' + str(a)
if 'default' in arg.attrib:
s += '=' + arg.attrib['default']
s += ' **)**'
if 'qualifiers' in m.attrib:
s += ' ' + m.attrib['qualifiers']
if (not declare):
if (pp != None):
pp.append((t, s))
else:
f.write("- " + t + " " + s + "\n")
else:
f.write(t + s + "\n")
def make_heading(title, underline):
return title + '\n' + underline * len(title) + "\n\n"
def make_rst_class(node):
name = node.attrib['name']
f = codecs.open("class_" + name.lower() + '.rst', 'wb', 'utf-8')
# Warn contributors not to edit this file directly
f.write(".. Generated automatically by doc/tools/makerst.py in Godot's source tree.\n")
f.write(".. DO NOT EDIT THIS FILE, but the " + name + ".xml source instead.\n")
f.write(".. The source is found in doc/classes or modules/<name>/doc_classes.\n\n")
f.write(".. _class_" + name + ":\n\n")
f.write(make_heading(name, '='))
if 'inherits' in node.attrib:
inh = node.attrib['inherits'].strip()
f.write('**Inherits:** ')
first = True
while (inh in classes):
if (not first):
f.write(" **<** ")
else:
first = False
f.write(make_type(inh))
inode = classes[inh]
if ('inherits' in inode.attrib):
inh = inode.attrib['inherits'].strip()
else:
inh = None
f.write("\n\n")
inherited = []
for cn in classes:
c = classes[cn]
if 'inherits' in c.attrib:
if (c.attrib['inherits'].strip() == name):
inherited.append(c.attrib['name'])
if (len(inherited)):
f.write('**Inherited By:** ')
for i in range(len(inherited)):
if (i > 0):
f.write(", ")
f.write(make_type(inherited[i]))
f.write("\n\n")
if 'category' in node.attrib:
f.write('**Category:** ' + node.attrib['category'].strip() + "\n\n")
f.write(make_heading('Brief Description', '-'))
briefd = node.find('brief_description')
if briefd != None:
f.write(rstize_text(briefd.text.strip(), name) + "\n\n")
methods = node.find('methods')
if methods != None and len(list(methods)) > 0:
f.write(make_heading('Member Functions', '-'))
ml = []
for m in list(methods):
make_method(f, node.attrib['name'], m, False, name, False, ml)
longest_t = 0
longest_s = 0
for s in ml:
sl = len(s[0])
if (sl > longest_s):
longest_s = sl
tl = len(s[1])
if (tl > longest_t):
longest_t = tl
sep = "+"
for i in range(longest_s + 2):
sep += "-"
sep += "+"
for i in range(longest_t + 2):
sep += "-"
sep += "+\n"
f.write(sep)
for s in ml:
rt = s[0]
while (len(rt) < longest_s):
rt += " "
st = s[1]
while (len(st) < longest_t):
st += " "
f.write("| " + rt + " | " + st + " |\n")
f.write(sep)
f.write('\n')
events = node.find('signals')
if events != None and len(list(events)) > 0:
f.write(make_heading('Signals', '-'))
for m in list(events):
f.write(".. _class_" + name + "_" + m.attrib['name'] + ":\n\n")
make_method(f, node.attrib['name'], m, True, name, True)
f.write('\n')
d = m.find('description')
if d == None or d.text.strip() == '':
continue
f.write(rstize_text(d.text.strip(), name))
f.write("\n\n")
f.write('\n')
members = node.find('members')
if members != None and len(list(members)) > 0:
f.write(make_heading('Member Variables', '-'))
for c in list(members):
# Leading two spaces necessary to prevent breaking the <ul>
f.write(" .. _class_" + name + "_" + c.attrib['name'] + ":\n\n")
s = '- '
s += make_type(c.attrib['type']) + ' '
s += '**' + c.attrib['name'] + '**'
if c.text.strip() != '':
s += ' - ' + rstize_text(c.text.strip(), name)
f.write(s + '\n\n')
f.write('\n')
constants = node.find('constants')
if constants != None and len(list(constants)) > 0:
f.write(make_heading('Numeric Constants', '-'))
for c in list(constants):
s = '- '
s += '**' + c.attrib['name'] + '**'
if 'value' in c.attrib:
s += ' = **' + c.attrib['value'] + '**'
if c.text.strip() != '':
s += ' --- ' + rstize_text(c.text.strip(), name)
f.write(s + '\n')
f.write('\n')
descr = node.find('description')
if descr != None and descr.text.strip() != '':
f.write(make_heading('Description', '-'))
f.write(rstize_text(descr.text.strip(), name) + "\n\n")
methods = node.find('methods')
if methods != None and len(list(methods)) > 0:
f.write(make_heading('Member Function Description', '-'))
for m in list(methods):
f.write(".. _class_" + name + "_" + m.attrib['name'] + ":\n\n")
make_method(f, node.attrib['name'], m, True, name)
f.write('\n')
d = m.find('description')
if d == None or d.text.strip() == '':
continue
f.write(rstize_text(d.text.strip(), name))
f.write("\n\n")
f.write('\n')
file_list = []
for path in input_list:
if os.path.basename(path) == 'modules':
for subdir, dirs, _ in os.walk(path):
if 'doc_classes' in dirs:
doc_dir = os.path.join(subdir, 'doc_classes')
class_file_names = [f for f in os.listdir(doc_dir) if f.endswith('.xml')]
file_list += [os.path.join(doc_dir, f) for f in class_file_names]
elif not os.path.isfile(path):
file_list += [os.path.join(path, f) for f in os.listdir(path) if f.endswith('.xml')]
elif os.path.isfile(path) and path.endswith('.xml'):
file_list.append(path)
for file in file_list:
tree = ET.parse(file)
doc = tree.getroot()
if 'version' not in doc.attrib:
print("Version missing from 'doc'")
sys.exit(255)
version = doc.attrib['version']
if doc.attrib['name'] in class_names:
continue
class_names.append(doc.attrib['name'])
classes[doc.attrib['name']] = doc
class_names.sort()
# Don't make class list for Sphinx, :toctree: handles it
# make_class_list(class_names, 2)
for cn in class_names:
c = classes[cn]
make_rst_class(c)
| mit |
tkaitchuck/nupic | build_system/contrib/xcode-setup-install.py | 1 | 1372 | #!/usr/bin/env python
import os
import sys
import string
doClean = ('clean' in sys.argv) or ('uninstall' in sys.argv)
rootDir = os.getcwd()
buildSystemDir = os.path.join(rootDir, 'build_system')
# Generate the configure input files.
setupCmd = 'python ' + os.path.join(buildSystemDir, 'setup.py') + ' --autogen' \
+ " --win32BuildDir '$(NTAX_BUILD_DIR)'"
print 'Running command:', setupCmd
sys.stdout.flush()
retCode = os.system(setupCmd)
if retCode != 0:
print >>sys.stderr, 'setup.py failed: Error', retCode
sys.exit(1)
buildDir = os.environ['BUILT_PRODUCTS_DIR']
buildStyle = os.environ['BUILD_STYLE']
# Build the configure command.
configureCmd = os.path.join(buildSystemDir, 'contrib', 'configure.py')
configureCmd += ' --mode=%s' % buildStyle
configureCmd += ' --builddir=%s' % buildDir
print 'Running command:', configureCmd
sys.stdout.flush()
retCode = os.system(configureCmd)
if retCode != 0:
print >>sys.stderr, 'configure failed: Error', retCode
sys.exit(1)
# Build
success = True
pushd = os.getcwd()
os.chdir(buildDir)
buildCmd = os.path.join(buildSystemDir, 'contrib', 'make.py')
if doClean: buildCmd += ' clean'
print 'Running command:', buildCmd
retCode = os.system(buildCmd)
if retCode != 0:
print >>sys.stderr, 'Build failed: Error', retCode
success = False
os.chdir(pushd)
if not success:
sys.exit(1)
| gpl-3.0 |
boegel/easybuild-easyblocks | easybuild/easyblocks/d/dolfin.py | 1 | 15555 | ##
# Copyright 2009-2020 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# https://github.com/easybuilders/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBuild support for DOLFIN, implemented as an easyblock
@author: Kenneth Hoste (Ghent University)
@author: Jens Timmerman (Ghent University)
"""
import glob
import os
import re
import tempfile
from distutils.version import LooseVersion
import easybuild.tools.environment as env
import easybuild.tools.toolchain as toolchain
from easybuild.easyblocks.generic.cmakepythonpackage import CMakePythonPackage
from easybuild.tools.build_log import EasyBuildError
from easybuild.tools.filetools import change_dir, remove
from easybuild.tools.modules import get_software_root, get_software_version
from easybuild.tools.run import run_cmd
from easybuild.tools.systemtools import get_shared_lib_ext
class EB_DOLFIN(CMakePythonPackage):
"""Support for building and installing DOLFIN."""
@staticmethod
def extra_options():
extra_vars = CMakePythonPackage.extra_options()
extra_vars['separate_build_dir'][0] = True
return extra_vars
def __init__(self, *args, **kwargs):
"""Initialize class variables."""
super(EB_DOLFIN, self).__init__(*args, **kwargs)
self.boost_dir = None
self.saved_configopts = None
def configure_step(self):
"""Set DOLFIN-specific configure options and configure with CMake."""
shlib_ext = get_shared_lib_ext()
# compiler flags
cflags = os.getenv('CFLAGS')
cxxflags = os.getenv('CXXFLAGS')
fflags = os.getenv('FFLAGS')
# fix for "SEEK_SET is #defined but must not be for the C++ binding of MPI. Include mpi.h before stdio.h"
if self.toolchain.mpi_family() in [toolchain.INTELMPI, toolchain.MPICH, toolchain.MPICH2, toolchain.MVAPICH2]:
cflags += " -DMPICH_IGNORE_CXX_SEEK"
cxxflags += " -DMPICH_IGNORE_CXX_SEEK"
fflags += " -DMPICH_IGNORE_CXX_SEEK"
self.cfg.update('configopts', '-DCMAKE_C_FLAGS="%s"' % cflags)
self.cfg.update('configopts', '-DCMAKE_CXX_FLAGS="%s"' % cxxflags)
self.cfg.update('configopts', '-DCMAKE_Fortran_FLAGS="%s"' % fflags)
# set correct compilers to be used at runtime
self.cfg.update('configopts', '-DMPI_C_COMPILER="$MPICC"')
self.cfg.update('configopts', '-DMPI_CXX_COMPILER="$MPICXX"')
# specify MPI library
self.cfg.update('configopts', '-DMPI_COMPILER="%s"' % os.getenv('MPICC'))
if os.getenv('MPI_LIB_SHARED') and os.getenv('MPI_INC_DIR'):
self.cfg.update('configopts', '-DMPI_LIBRARY="%s"' % os.getenv('MPI_LIB_SHARED'))
self.cfg.update('configopts', '-DMPI_INCLUDE_PATH="%s"' % os.getenv('MPI_INC_DIR'))
else:
raise EasyBuildError("MPI_LIB_SHARED or MPI_INC_DIR not set, could not determine MPI-related paths.")
# save config options to reuse them later (e.g. for sanity check commands)
self.saved_configopts = self.cfg['configopts']
# make sure that required dependencies are loaded
deps = ['Boost', 'CGAL', 'ParMETIS', 'PETSc', 'Python',
'SCOTCH', 'SLEPc', 'SuiteSparse', 'Trilinos', 'zlib']
# Armadillo was replaced by Eigen in v1.3
if LooseVersion(self.version) < LooseVersion('1.3'):
deps.append('Armadillo')
else:
deps.append('Eigen')
# UFC has been integrated into FFC in v1.4, cfr. https://bitbucket.org/fenics-project/ufc-deprecated
if LooseVersion(self.version) < LooseVersion('1.4'):
deps.append('UFC')
# PLY, petsc4py, slepc4py are required since v1.5
if LooseVersion(self.version) >= LooseVersion('1.5'):
deps.extend(['petsc4py', 'PLY', 'slepc4py'])
# pybind11 is required to build Python bindings since v2018.1
if LooseVersion(self.version) >= LooseVersion('2018.1'):
deps.append('pybind11')
depsdict = {}
for dep in deps:
deproot = get_software_root(dep)
if not deproot:
raise EasyBuildError("Dependency %s not available.", dep)
else:
depsdict.update({dep: deproot})
# zlib
self.cfg.update('configopts', '-DZLIB_INCLUDE_DIR=%s' % os.path.join(depsdict['zlib'], "include"))
self.cfg.update('configopts', '-DZLIB_LIBRARY=%s' % os.path.join(depsdict['zlib'], "lib", "libz.a"))
# set correct openmp options
openmp = self.toolchain.get_flag('openmp')
self.cfg.update('configopts', '-DOpenMP_CXX_FLAGS="%s"' % openmp)
self.cfg.update('configopts', '-DOpenMP_C_FLAGS="%s"' % openmp)
# Boost config parameters
self.cfg.update('configopts', "-DBOOST_INCLUDEDIR=%s/include" % depsdict['Boost'])
self.cfg.update('configopts', "-DBoost_DEBUG=ON -DBOOST_ROOT=%s" % depsdict['Boost'])
self.boost_dir = depsdict['Boost']
# UFC and Armadillo config params
if 'UFC' in depsdict:
self.cfg.update('configopts', "-DUFC_DIR=%s" % depsdict['UFC'])
if 'Armadillo' in depsdict:
self.cfg.update('configopts', "-DARMADILLO_DIR:PATH=%s " % depsdict['Armadillo'])
# Eigen config params
if 'Eigen' in depsdict:
self.cfg.update('configopts', "-DEIGEN3_INCLUDE_DIR=%s " % os.path.join(depsdict['Eigen'], 'include'))
# specify Python paths
if LooseVersion(self.version) < LooseVersion('2018.1'):
python = depsdict['Python']
pyver = '.'.join(get_software_version('Python').split('.')[:2])
self.cfg.update('configopts', "-DPYTHON_INCLUDE_PATH=%s/include/python%s" % (python, pyver))
self.cfg.update('configopts', "-DPYTHON_LIBRARY=%s/lib/libpython%s.%s" % (python, pyver, shlib_ext))
# SuiteSparse config params
suitesparse = depsdict['SuiteSparse']
umfpack_params = [
'-DUMFPACK_DIR="%(sp)s/UMFPACK"',
'-DUMFPACK_INCLUDE_DIRS="%(sp)s/UMFPACK/include;%(sp)s/UFconfig"',
'-DAMD_DIR="%(sp)s/UMFPACK"',
'-DCHOLMOD_DIR="%(sp)s/CHOLMOD"',
'-DCHOLMOD_INCLUDE_DIRS="%(sp)s/CHOLMOD/include;%(sp)s/UFconfig"',
'-DUFCONFIG_DIR="%(sp)s/UFconfig"',
'-DCAMD_LIBRARY:PATH="%(sp)s/CAMD/lib/libcamd.a"',
'-DCCOLAMD_LIBRARY:PATH="%(sp)s/CCOLAMD/lib/libccolamd.a"',
'-DCOLAMD_LIBRARY:PATH="%(sp)s/COLAMD/lib/libcolamd.a"'
]
self.cfg.update('configopts', ' '.join(umfpack_params) % {'sp': suitesparse})
# ParMETIS and SCOTCH
self.cfg.update('configopts', '-DPARMETIS_DIR="%s"' % depsdict['ParMETIS'])
self.cfg.update('configopts', '-DSCOTCH_DIR="%s" -DSCOTCH_DEBUG:BOOL=ON' % depsdict['SCOTCH'])
# BLACS and LAPACK
self.cfg.update('configopts', '-DBLAS_LIBRARIES:PATH="%s"' % os.getenv('LIBBLAS'))
self.cfg.update('configopts', '-DLAPACK_LIBRARIES:PATH="%s"' % os.getenv('LIBLAPACK'))
# CGAL
self.cfg.update('configopts', '-DCGAL_DIR:PATH="%s"' % depsdict['CGAL'])
# PETSc
# need to specify PETSC_ARCH explicitely (env var alone is not sufficient)
for env_var in ["PETSC_DIR", "PETSC_ARCH"]:
val = os.getenv(env_var)
if val:
self.cfg.update('configopts', '-D%s=%s' % (env_var, val))
# MTL4
if 'MTL4' in depsdict:
self.cfg.update('configopts', '-DMTL4_DIR:PATH="%s"' % depsdict['MTL4'])
# SUNDIALS
if 'SUNDIALS' in depsdict:
self.cfg.update('configopts', '-DSUNDIALS_DIR:PATH="%s"' % depsdict['SUNDIALS'])
# configure
out = super(EB_DOLFIN, self).configure_step()
# make sure that all optional packages are found
not_found_re = re.compile("The following optional packages could not be found")
if not_found_re.search(out):
raise EasyBuildError("Optional packages could not be found, this should not happen...")
# enable verbose build, so we have enough information if something goes wrong
self.cfg.update('buildopts', "VERBOSE=1")
def test_step(self):
"""Run DOLFIN demos by means of test."""
if self.cfg['runtest']:
# set cache/error dirs for Instant
tmpdir = tempfile.mkdtemp()
instant_cache_dir = os.path.join(tmpdir, '.instant', 'cache')
instant_error_dir = os.path.join(tmpdir, '.instant', 'error')
try:
os.makedirs(instant_cache_dir)
os.makedirs(instant_error_dir)
except OSError as err:
raise EasyBuildError("Failed to create Instant cache/error dirs: %s", err)
env_vars = [
('INSTANT_CACHE_DIR', instant_cache_dir),
('INSTANT_ERROR_DIR', instant_error_dir),
]
env_var_cmds = ' && '.join(['export %s="%s"' % (var, val) for (var, val) in env_vars])
cpp_cmds = [
env_var_cmds,
"cd %(dir)s",
]
if LooseVersion(self.version) < LooseVersion('1.1'):
cpp_cmds.append("cmake . %s" % self.saved_configopts)
cpp_cmds.extend([
"make VERBOSE=1",
"./demo_%(name)s",
"cd -",
])
cmd_template_cpp = " && ".join(cpp_cmds)
# list based on demos available for DOLFIN v1.0.0
pde_demos = ['biharmonic', 'cahn-hilliard', 'hyperelasticity', 'mixed-poisson',
'navier-stokes', 'poisson', 'stokes-iterative']
if LooseVersion(self.version) < LooseVersion('1.1'):
demos = [os.path.join('demo', 'la', 'eigenvalue')] + [os.path.join('demo', 'pde', x) for x in pde_demos]
else:
# verified with v1.6.0
demos = [os.path.join('demo', 'documented', x) for x in pde_demos]
# construct commands
cmds = [tmpl % {'dir': os.path.join(d, subdir), 'name': os.path.basename(d)}
for d in demos for (tmpl, subdir) in [(cmd_template_cpp, 'cpp')]]
# exclude Python tests for now, because they 'hang' sometimes (unclear why)
# they can be reinstated once run_cmd (or its equivalent) has support for timeouts
# see https://github.com/easybuilders/easybuild-framework/issues/581
# test command templates
# cmd_template_python = " && ".join([
# env_var_cmds,
# "cd %(dir)s",
# "python demo_%(name)s.py",
# "cd -",
# ])
# for (tmpl, subdir) in [(cmd_template_python, 'python'), (cmd_template_cpp, 'cpp')]
# subdomains-poisson has no C++ get_version, only Python
# Python tests excluded, see above
# name = 'subdomains-poisson'
# path = os.path.join('demo', 'pde', name, 'python')
# cmds += [cmd_template_python % {'dir': path, 'name': name}]
# supply empty argument to each command
for cmd in cmds:
run_cmd(cmd, log_all=True)
# clean up temporary dir
remove(tmpdir)
def install_step(self):
"""Custom install procedure for DOLFIN: also install Python bindings."""
super(EB_DOLFIN, self).install_step()
# avoid that pip (ab)uses $HOME/.cache/pip
# cfr. https://pip.pypa.io/en/stable/reference/pip_install/#caching
env.setvar('XDG_CACHE_HOME', tempfile.gettempdir())
self.log.info("Using %s as pip cache directory", os.environ['XDG_CACHE_HOME'])
if LooseVersion(self.version) >= LooseVersion('2018.1'):
# see https://bitbucket.org/fenics-project/dolfin/issues/897/switch-from-swig-to-pybind11-for-python
# and https://github.com/FEniCS/dolfin/blob/master/python/README.rst
cwd = change_dir(os.path.join(self.start_dir, 'python'))
env.setvar('CMAKE_PREFIX_PATH', self.installdir)
env.setvar('PYBIND11_DIR', get_software_root('pybind11'))
run_cmd("pip install --prefix %s ." % self.installdir)
change_dir(cwd)
def post_install_step(self):
"""Post install actions: extend RPATH paths in .so libraries part of the DOLFIN Python package."""
if LooseVersion(self.version) >= LooseVersion('1.1'):
# cfr. https://github.com/hashdist/hashstack/blob/master/pkgs/dolfin/dolfin.yaml (look for patchelf)
# determine location of libdolfin.so
dolfin_lib = 'libdolfin.so'
dolfin_libdir = None
for libdir in ['lib', 'lib64']:
if os.path.exists(os.path.join(self.installdir, libdir, dolfin_lib)):
dolfin_libdir = os.path.join(self.installdir, libdir)
break
if dolfin_libdir is None:
raise EasyBuildError("Failed to locate %s", dolfin_lib)
for pylibdir in self.all_pylibdirs:
libs = glob.glob(os.path.join(self.installdir, pylibdir, 'dolfin', 'cpp', '_*.so'))
for lib in libs:
out, _ = run_cmd("patchelf --print-rpath %s" % lib, simple=False, log_all=True)
curr_rpath = out.strip()
cmd = "patchelf --set-rpath '%s:%s' %s" % (curr_rpath, dolfin_libdir, lib)
run_cmd(cmd, log_all=True)
def make_module_extra(self):
"""Set extra environment variables for DOLFIN."""
txt = super(EB_DOLFIN, self).make_module_extra()
# Dolfin needs to find Boost
# check whether boost_dir is defined for compatibility with --module-only
if self.boost_dir:
txt += self.module_generator.set_environment('BOOST_DIR', self.boost_dir)
envvars = ['I_MPI_CXX', 'I_MPI_CC']
for envvar in envvars:
envar_val = os.getenv(envvar)
# if environment variable is set, also set it in module
if envar_val:
txt += self.module_generator.set_environment(envvar, envar_val)
return txt
def sanity_check_step(self):
"""Custom sanity check for DOLFIN."""
# custom sanity check paths
custom_paths = {
'files': ['bin/dolfin-%s' % x for x in ['version', 'convert', 'order', 'plot']] + ['include/dolfin.h'],
'dirs': ['%s/dolfin' % self.pylibdir],
}
super(EB_DOLFIN, self).sanity_check_step(custom_paths=custom_paths)
| gpl-2.0 |
cpollard1001/FreeCAD_sf_master | src/Mod/TemplatePyMod/MengerSponge.py | 27 | 3238 | # Script to create a Menger sponge
# (c) 2012 Werner Mayer LGPL
# The script is based on the work of daxmick at
# http://forum.freecadweb.org/viewtopic.php?f=3&t=2307
import threading
import Mesh, MeshGui
from FreeCAD import Base
# Create a global mesh and make copies of them
# This makes the algorithm faster by ~60%.
box = Mesh.createBox(1,1,1)
# Create a Box and Place it a coords (x,y,z)
def PlaceBox(x,y,z):
global box
mbox=box.copy()
mbox.translate(x,y,z)
return mbox
def Sierpinski(level,x0,y0,z0):
#print threading.current_thread().name
boxnums = pow(3,level)
thirds = boxnums / 3
twothirds = thirds * 2
if(level == 0):
rangerx = [x0]
rangery = [y0]
rangerz = [z0]
else:
rangerx = [ x0, x0 + thirds, x0 + twothirds ]
rangery = [ y0, y0 + thirds, y0 + twothirds ]
rangerz = [ z0, z0 + thirds, z0 + twothirds ]
block = 1
skip=[5,11,13,14,15,17,23]
mesh=Mesh.Mesh()
for i in rangerx:
for j in rangery:
for k in rangerz:
if block not in skip:
if(level > 0):
mesh.addMesh(Sierpinski(level-1,i,j,k))
else:
mesh.addMesh(PlaceBox(i,j,k))
block+=1
return mesh
### Multi-threaded ###
class MengerThread(threading.Thread):
def __init__(self,args):
self.args=args
self.mesh=Mesh.Mesh()
threading.Thread.__init__(self)
def run(self):
for i in self.args:
self.mesh.addMesh(Sierpinski(*i))
def makeMengerSponge_mt(level=3,x0=0,y0=0,z0=0):
"""
Is much slower than makeMengerSponge!!! :(
"""
if level == 0:
mesh=Sierpinski(level,x0,y0,z0)
Mesh.show(mesh)
return
boxnums = pow(3,level)
thirds = boxnums / 3
twothirds = thirds * 2
rangerx = [ x0, x0 + thirds, x0 + twothirds ]
rangery = [ y0, y0 + thirds, y0 + twothirds ]
rangerz = [ z0, z0 + thirds, z0 + twothirds ]
block = 1
skip=[5,11,13,14,15,17,23]
# collect the arguments for the algorithm in a list
args=[]
for i in rangerx:
for j in rangery:
for k in rangerz:
if block not in skip:
args.append((level-1,i,j,k))
block+=1
numJobs = 4
threads=[]
while numJobs > 0:
size = len(args)
count = size / numJobs
numJobs-=1
thr=MengerThread(args[:count])
threads.append(thr)
args=args[count:]
print "Number of threads: %i" % (len(threads))
for thr in threads:
thr.start()
for thr in threads:
thr.join()
mesh=Mesh.Mesh()
for thr in threads:
mesh.addMesh(thr.mesh)
del thr.mesh
print mesh
mesh.removeDuplicatedPoints()
mesh.removeFacets(mesh.getInternalFacets())
mesh.rebuildNeighbourHood()
print "Mesh is solid: %s" % (mesh.isSolid())
Mesh.show(mesh)
### Single-threaded ###
def makeMengerSponge(level=3,x0=0,y0=0,z0=0):
mesh=Sierpinski(level,x0,y0,z0)
mesh.removeDuplicatedPoints()
mesh.removeFacets(mesh.getInternalFacets())
mesh.rebuildNeighbourHood()
print "Mesh is solid: %s" % (mesh.isSolid())
Mesh.show(mesh)
| lgpl-2.1 |