repo_name
stringlengths 5
100
| path
stringlengths 4
299
| copies
stringclasses 990
values | size
stringlengths 4
7
| content
stringlengths 666
1.03M
| license
stringclasses 15
values | hash
int64 -9,223,351,895,964,839,000
9,223,297,778B
| line_mean
float64 3.17
100
| line_max
int64 7
1k
| alpha_frac
float64 0.25
0.98
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
dmort27/panphon | panphon/bin/align_wordlists.py | 1 | 2458 | #!/usr/bin/env python
from __future__ import print_function
import unicodecsv as csv
import argparse
import panphon
import Levenshtein
import munkres
import panphon.distance
from functools import partial
def levenshtein_dist(_, a, b):
return Levenshtein.distance(a, b)
def dogol_leven_dist(_, a, b):
return Levenshtein.distance(dist.map_to_dogol_prime(a),
dist.map_to_dogol_prime(b))
def feature_hamming_dist(dist, a, b):
return dist.feature_edit_distance(a, b)
def feature_weighted_dist(dist, a, b):
return dist.weighted_feature_edit_distance(a, b)
def construct_cost_matrix(words_a, words_b, dist):
def matrix_row(word_a, words_b):
return [dist(word_a, word_b) for (word_b, _) in words_b]
return [matrix_row(word_a, words_b) for (word_a, _) in words_a]
def score(indices):
pairs, errors = 0, 0
for row, column in indices:
pairs += 1
if row != column:
errors += 1
return pairs, errors
def main(wordlist1, wordlist2, dist_funcs):
with open(wordlist1, 'rb') as file_a, open(wordlist2, 'rb') as file_b:
reader_a = csv.reader(file_a, encoding='utf-8')
reader_b = csv.reader(file_b, encoding='utf-8')
print('Reading word lists...')
words = zip([(w, g) for (g, w) in reader_a],
[(w, g) for (g, w) in reader_b])
words_a, words_b = zip(*[(a, b) for (a, b) in words if a and b])
print('Constructing cost matrix...')
matrix = construct_cost_matrix(words_a, words_b, dist_funcs)
m = munkres.Munkres()
print('Computing matrix using Hungarian Algorithm...')
indices = m.compute(matrix)
print(score(indices))
print('Done.')
if __name__ == '__main__':
parser = argparse.ArgumentParser(usage='Align two lists of "cognates" using a specified distance metric.')
parser.add_argument('wordlists', nargs=2, help='Filenames of two wordlists in corresponding order.')
parser.add_argument('-d', '--dist', default='hamming', help='Distance metric (e.g. Hamming).')
args = parser.parse_args()
dists = {'levenshtein': levenshtein_dist,
'dogol-leven': dogol_leven_dist,
'hamming': feature_hamming_dist,
'weighted': feature_weighted_dist}
dist = panphon.distance.Distance()
dist_funcs = partial(dists[args.dist], dist)
main(args.wordlists[0], args.wordlists[1], dist_funcs)
| mit | -8,969,342,033,401,456,000 | 32.671233 | 110 | 0.626932 | false |
jounex/hue | desktop/core/ext-py/Django-1.6.10/django/contrib/humanize/templatetags/humanize.py | 98 | 9276 | # -*- encoding: utf-8 -*-
from __future__ import unicode_literals
import re
from datetime import date, datetime
from decimal import Decimal
from django import template
from django.conf import settings
from django.template import defaultfilters
from django.utils.encoding import force_text
from django.utils.formats import number_format
from django.utils.translation import pgettext, ungettext, ugettext as _
from django.utils.timezone import is_aware, utc
register = template.Library()
@register.filter(is_safe=True)
def ordinal(value):
"""
Converts an integer to its ordinal as a string. 1 is '1st', 2 is '2nd',
3 is '3rd', etc. Works for any integer.
"""
try:
value = int(value)
except (TypeError, ValueError):
return value
suffixes = (_('th'), _('st'), _('nd'), _('rd'), _('th'), _('th'), _('th'), _('th'), _('th'), _('th'))
if value % 100 in (11, 12, 13): # special case
return "%d%s" % (value, suffixes[0])
return "%d%s" % (value, suffixes[value % 10])
@register.filter(is_safe=True)
def intcomma(value, use_l10n=True):
"""
Converts an integer to a string containing commas every three digits.
For example, 3000 becomes '3,000' and 45000 becomes '45,000'.
"""
if settings.USE_L10N and use_l10n:
try:
if not isinstance(value, (float, Decimal)):
value = int(value)
except (TypeError, ValueError):
return intcomma(value, False)
else:
return number_format(value, force_grouping=True)
orig = force_text(value)
new = re.sub("^(-?\d+)(\d{3})", '\g<1>,\g<2>', orig)
if orig == new:
return new
else:
return intcomma(new, use_l10n)
# A tuple of standard large number to their converters
intword_converters = (
(6, lambda number: (
ungettext('%(value).1f million', '%(value).1f million', number),
ungettext('%(value)s million', '%(value)s million', number),
)),
(9, lambda number: (
ungettext('%(value).1f billion', '%(value).1f billion', number),
ungettext('%(value)s billion', '%(value)s billion', number),
)),
(12, lambda number: (
ungettext('%(value).1f trillion', '%(value).1f trillion', number),
ungettext('%(value)s trillion', '%(value)s trillion', number),
)),
(15, lambda number: (
ungettext('%(value).1f quadrillion', '%(value).1f quadrillion', number),
ungettext('%(value)s quadrillion', '%(value)s quadrillion', number),
)),
(18, lambda number: (
ungettext('%(value).1f quintillion', '%(value).1f quintillion', number),
ungettext('%(value)s quintillion', '%(value)s quintillion', number),
)),
(21, lambda number: (
ungettext('%(value).1f sextillion', '%(value).1f sextillion', number),
ungettext('%(value)s sextillion', '%(value)s sextillion', number),
)),
(24, lambda number: (
ungettext('%(value).1f septillion', '%(value).1f septillion', number),
ungettext('%(value)s septillion', '%(value)s septillion', number),
)),
(27, lambda number: (
ungettext('%(value).1f octillion', '%(value).1f octillion', number),
ungettext('%(value)s octillion', '%(value)s octillion', number),
)),
(30, lambda number: (
ungettext('%(value).1f nonillion', '%(value).1f nonillion', number),
ungettext('%(value)s nonillion', '%(value)s nonillion', number),
)),
(33, lambda number: (
ungettext('%(value).1f decillion', '%(value).1f decillion', number),
ungettext('%(value)s decillion', '%(value)s decillion', number),
)),
(100, lambda number: (
ungettext('%(value).1f googol', '%(value).1f googol', number),
ungettext('%(value)s googol', '%(value)s googol', number),
)),
)
@register.filter(is_safe=False)
def intword(value):
"""
Converts a large integer to a friendly text representation. Works best
for numbers over 1 million. For example, 1000000 becomes '1.0 million',
1200000 becomes '1.2 million' and '1200000000' becomes '1.2 billion'.
"""
try:
value = int(value)
except (TypeError, ValueError):
return value
if value < 1000000:
return value
def _check_for_i18n(value, float_formatted, string_formatted):
"""
Use the i18n enabled defaultfilters.floatformat if possible
"""
if settings.USE_L10N:
value = defaultfilters.floatformat(value, 1)
template = string_formatted
else:
template = float_formatted
return template % {'value': value}
for exponent, converters in intword_converters:
large_number = 10 ** exponent
if value < large_number * 1000:
new_value = value / float(large_number)
return _check_for_i18n(new_value, *converters(new_value))
return value
@register.filter(is_safe=True)
def apnumber(value):
"""
For numbers 1-9, returns the number spelled out. Otherwise, returns the
number. This follows Associated Press style.
"""
try:
value = int(value)
except (TypeError, ValueError):
return value
if not 0 < value < 10:
return value
return (_('one'), _('two'), _('three'), _('four'), _('five'), _('six'), _('seven'), _('eight'), _('nine'))[value-1]
# Perform the comparison in the default time zone when USE_TZ = True
# (unless a specific time zone has been applied with the |timezone filter).
@register.filter(expects_localtime=True)
def naturalday(value, arg=None):
"""
For date values that are tomorrow, today or yesterday compared to
present day returns representing string. Otherwise, returns a string
formatted according to settings.DATE_FORMAT.
"""
try:
tzinfo = getattr(value, 'tzinfo', None)
value = date(value.year, value.month, value.day)
except AttributeError:
# Passed value wasn't a date object
return value
except ValueError:
# Date arguments out of range
return value
today = datetime.now(tzinfo).date()
delta = value - today
if delta.days == 0:
return _('today')
elif delta.days == 1:
return _('tomorrow')
elif delta.days == -1:
return _('yesterday')
return defaultfilters.date(value, arg)
# This filter doesn't require expects_localtime=True because it deals properly
# with both naive and aware datetimes. Therefore avoid the cost of conversion.
@register.filter
def naturaltime(value):
"""
For date and time values shows how many seconds, minutes or hours ago
compared to current timestamp returns representing string.
"""
if not isinstance(value, date): # datetime is a subclass of date
return value
now = datetime.now(utc if is_aware(value) else None)
if value < now:
delta = now - value
if delta.days != 0:
return pgettext(
'naturaltime', '%(delta)s ago'
) % {'delta': defaultfilters.timesince(value, now)}
elif delta.seconds == 0:
return _('now')
elif delta.seconds < 60:
return ungettext(
# Translators: please keep a non-breaking space (U+00A0)
# between count and time unit.
'a second ago', '%(count)s seconds ago', delta.seconds
) % {'count': delta.seconds}
elif delta.seconds // 60 < 60:
count = delta.seconds // 60
return ungettext(
# Translators: please keep a non-breaking space (U+00A0)
# between count and time unit.
'a minute ago', '%(count)s minutes ago', count
) % {'count': count}
else:
count = delta.seconds // 60 // 60
return ungettext(
# Translators: please keep a non-breaking space (U+00A0)
# between count and time unit.
'an hour ago', '%(count)s hours ago', count
) % {'count': count}
else:
delta = value - now
if delta.days != 0:
return pgettext(
'naturaltime', '%(delta)s from now'
) % {'delta': defaultfilters.timeuntil(value, now)}
elif delta.seconds == 0:
return _('now')
elif delta.seconds < 60:
return ungettext(
# Translators: please keep a non-breaking space (U+00A0)
# between count and time unit.
'a second from now', '%(count)s seconds from now', delta.seconds
) % {'count': delta.seconds}
elif delta.seconds // 60 < 60:
count = delta.seconds // 60
return ungettext(
# Translators: please keep a non-breaking space (U+00A0)
# between count and time unit.
'a minute from now', '%(count)s minutes from now', count
) % {'count': count}
else:
count = delta.seconds // 60 // 60
return ungettext(
# Translators: please keep a non-breaking space (U+00A0)
# between count and time unit.
'an hour from now', '%(count)s hours from now', count
) % {'count': count}
| apache-2.0 | 93,445,721,547,630,080 | 36.991803 | 119 | 0.587271 | false |
nvoron23/hue | apps/oozie/src/oozie/migrations/0009_auto__add_decision.py | 39 | 20608 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Decision'
db.create_table('oozie_decision', (
('node_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['oozie.Node'], unique=True, primary_key=True)),
))
db.send_create_signal('oozie', ['Decision'])
def backwards(self, orm):
# Deleting model 'Decision'
db.delete_table('oozie_decision')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '80', 'unique': 'True'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '30', 'unique': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'oozie.coordinator': {
'Meta': {'object_name': 'Coordinator', '_ormbases': ['oozie.Job']},
'concurrency': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'end': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 11, 3, 12, 54, 7, 295114)'}),
'execution': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'frequency_number': ('django.db.models.fields.SmallIntegerField', [], {'default': '1'}),
'frequency_unit': ('django.db.models.fields.CharField', [], {'default': "'days'", 'max_length': '20'}),
'job_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Job']", 'unique': 'True', 'primary_key': 'True'}),
'start': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 10, 31, 12, 54, 7, 295060)'}),
'throttle': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'timeout': ('django.db.models.fields.SmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'timezone': ('django.db.models.fields.CharField', [], {'default': "'America/Los_Angeles'", 'max_length': '24'}),
'workflow': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['oozie.Workflow']", 'null': 'True'})
},
'oozie.datainput': {
'Meta': {'object_name': 'DataInput'},
'coordinator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['oozie.Coordinator']"}),
'dataset': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Dataset']", 'unique': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '40'})
},
'oozie.dataoutput': {
'Meta': {'object_name': 'DataOutput'},
'coordinator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['oozie.Coordinator']"}),
'dataset': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Dataset']", 'unique': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '40'})
},
'oozie.dataset': {
'Meta': {'object_name': 'Dataset'},
'coordinator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['oozie.Coordinator']"}),
'description': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024', 'blank': 'True'}),
'done_flag': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '64', 'blank': 'True'}),
'frequency_number': ('django.db.models.fields.SmallIntegerField', [], {'default': '1'}),
'frequency_unit': ('django.db.models.fields.CharField', [], {'default': "'days'", 'max_length': '20'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'start': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 10, 31, 12, 54, 7, 295858)'}),
'timezone': ('django.db.models.fields.CharField', [], {'default': "'America/Los_Angeles'", 'max_length': '24'}),
'uri': ('django.db.models.fields.CharField', [], {'default': "'/data/${YEAR}${MONTH}${DAY}'", 'max_length': '1024'})
},
'oozie.decision': {
'Meta': {'object_name': 'Decision'},
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'})
},
'oozie.distcp': {
'Meta': {'object_name': 'DistCp'},
'job_properties': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'job_xml': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '512', 'blank': 'True'}),
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'}),
'params': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'prepares': ('django.db.models.fields.TextField', [], {'default': "'[]'"})
},
'oozie.end': {
'Meta': {'object_name': 'End'},
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'})
},
'oozie.fork': {
'Meta': {'object_name': 'Fork'},
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'})
},
'oozie.history': {
'Meta': {'object_name': 'History'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'job': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['oozie.Job']"}),
'oozie_job_id': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'properties': ('django.db.models.fields.TextField', [], {}),
'submission_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'}),
'submitter': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'oozie.hive': {
'Meta': {'object_name': 'Hive'},
'archives': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'files': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'job_properties': ('django.db.models.fields.TextField', [], {'default': '\'[{"name":"oozie.hive.defaults","value":"hive-default.xml"}]\''}),
'job_xml': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '512', 'blank': 'True'}),
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'}),
'params': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'prepares': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'script_path': ('django.db.models.fields.CharField', [], {'max_length': '256'})
},
'oozie.java': {
'Meta': {'object_name': 'Java'},
'archives': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'args': ('django.db.models.fields.CharField', [], {'max_length': '4096', 'blank': 'True'}),
'files': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'jar_path': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'java_opts': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}),
'job_properties': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'job_xml': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '512', 'blank': 'True'}),
'main_class': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'}),
'prepares': ('django.db.models.fields.TextField', [], {'default': "'[]'"})
},
'oozie.job': {
'Meta': {'object_name': 'Job'},
'deployment_dir': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_shared': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True', 'blank': 'True'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'parameters': ('django.db.models.fields.TextField', [], {'default': '\'[{"name":"oozie.use.system.libpath","value":"true"}]\''}),
'schema_version': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'oozie.join': {
'Meta': {'object_name': 'Join'},
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'})
},
'oozie.kill': {
'Meta': {'object_name': 'Kill'},
'message': ('django.db.models.fields.CharField', [], {'default': "'Action failed, error message[${wf:errorMessage(wf:lastErrorNode())}]'", 'max_length': '256'}),
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'})
},
'oozie.link': {
'Meta': {'object_name': 'Link'},
'child': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'parent_node'", 'to': "orm['oozie.Node']"}),
'comment': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'child_node'", 'to': "orm['oozie.Node']"})
},
'oozie.mapreduce': {
'Meta': {'object_name': 'Mapreduce'},
'archives': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'files': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'jar_path': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'job_properties': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'job_xml': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '512', 'blank': 'True'}),
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True'}),
'prepares': ('django.db.models.fields.TextField', [], {'default': "'[]'"})
},
'oozie.node': {
'Meta': {'object_name': 'Node'},
'children': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'parents'", 'symmetrical': 'False', 'through': "orm['oozie.Link']", 'to': "orm['oozie.Node']"}),
'description': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'node_type': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'workflow': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['oozie.Workflow']"})
},
'oozie.pig': {
'Meta': {'object_name': 'Pig'},
'archives': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'files': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'job_properties': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'job_xml': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '512', 'blank': 'True'}),
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'}),
'params': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'prepares': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'script_path': ('django.db.models.fields.CharField', [], {'max_length': '256'})
},
'oozie.shell': {
'Meta': {'object_name': 'Shell'},
'archives': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'capture_output': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'command': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'files': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'job_properties': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'job_xml': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '512', 'blank': 'True'}),
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'}),
'params': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'prepares': ('django.db.models.fields.TextField', [], {'default': "'[]'"})
},
'oozie.sqoop': {
'Meta': {'object_name': 'Sqoop'},
'archives': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'files': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'job_properties': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'job_xml': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '512', 'blank': 'True'}),
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'}),
'params': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'prepares': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'script_path': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'})
},
'oozie.ssh': {
'Meta': {'object_name': 'Ssh'},
'capture_output': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'command': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'host': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'}),
'params': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'user': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'oozie.start': {
'Meta': {'object_name': 'Start'},
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True'})
},
'oozie.streaming': {
'Meta': {'object_name': 'Streaming'},
'archives': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'files': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'job_properties': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'mapper': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'}),
'reducer': ('django.db.models.fields.CharField', [], {'max_length': '512'})
},
'oozie.workflow': {
'Meta': {'object_name': 'Workflow', '_ormbases': ['oozie.Job']},
'end': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'end_workflow'", 'blank': 'True', 'null': 'True', 'to': "orm['oozie.End']"}),
'is_single': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'job_properties': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'job_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['oozie.Job']", 'unique': 'True', 'primary_key': 'True'}),
'job_xml': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '512', 'blank': 'True'}),
'start': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'start_workflow'", 'blank': 'True', 'null': 'True', 'to': "orm['oozie.Start']"})
}
}
complete_apps = ['oozie']
| apache-2.0 | 4,895,982,263,551,305,000 | 75.325926 | 194 | 0.530085 | false |
yausern/stlab | TimeDomain_v2/AWG_station.py | 2 | 13357 | # author: Wolfgang Pfaff
# modified by: Sarwan Peiter
"""
So I have already written the driver for the AWG.
Now the next step is to write an interface to communicates with driver.
An also usefull interface is to write a library to generate pulses.
"""
import time
import logging
import numpy as np
import struct
import os,sys
from datetime import datetime
import subprocess, threading
import itertools
import re, fnmatch
# some pulses use rounding when determining the correct sample at which to
# insert a particular value. this might require correct rounding -- the pulses
# are typically specified on short time scales, but the time unit we use is
# seconds. therefore we need a suitably chosen digit on which to round. 9 would
# round a pulse to 1 ns precision. 11 is 10 ps, and therefore probably beyond
# the lifetime of this code (no 10ps AWG available yet :))
SIGNIFICANT_DIGITS = 11
# Make a station which represents your instruments and define all the channels
# of the instrument and connections between them
# TODO: function which sets up AWG configuration
# modified by Sarwan Peiter
class AWG_Station():
"""
This object communicates with the AWG520 series
"""
# AWG = None
AWG_type = 'regular'
channels_ids = ['ch1', 'ch1_marker1', 'ch1_marker2',
'ch2', 'ch2_marker1', 'ch2_marker2']
AWG_sequence_cfg = {}
def __init__(self, AWG=None):
self.channels = {}
self.AWG = AWG
self.filename = None
print('init halfway')
if self.AWG is not None:
self.clock = float(self.AWG.get_clock())
print(type(self.clock))
print('init over')
# define channels to for connection to environment
def define_channels(self, id, name, type, delay, offset, high, low, active):
_doubles = []
# Check whether or not channels are already in use!
for c in self.channels:
if self.channels[c]['id'] == id:
logging.warning(
"Channel '%s' already in use, will overwrite." % id)
_doubles.append(c)
for c in _doubles:
del self.channels[c]
self.channels[name] = {'id': id,
'type': type,
'delay': delay,
'offset': offset,
'high': high,
'low': low,
'active': active}
# Get the channels names by id
def get_channel_names_by_id(self, id):
chans = {id: None, id+'_marker1': None, id+'_marker2': None}
for c in self.channels:
if self.channels[c]['id'] in chans:
chans[self.channels[c]['id']] = c
return chans
def get_channel_name_by_id(self, id):
for c in self.channels:
if self.channels[c]['id'] == id:
return c
def get_used_channel_ids(self):
chans = []
for c in self.channels:
if self.channels[c]['active'] and \
self.channels[c]['id'][:3] not in chans:
chans.append(self.channels[c]['id'][:3])
return chans
# Make function which programs AWG
def get_awg_channel_cfg(self):
channel_cfg = {}
self.AWG.get_all()
def delete_all_waveforms(self):
self.AWG.clear_waveforms()
def program_awg(self, sequence,*elements,**kw):
"""
Upload a single file to the AWG (.awg) which contains all waveforms
AND sequence information (i.e. nr of repetitions, event jumps etc)
Advantage is that it's much faster, since sequence information is sent
to the AWG in a single file.
"""
self.AWG.stop()
self.AWG.set_status('off',1)
self.AWG.set_status('off',2)
# self.init_dir()
self.last_sequence = sequence
self.last_elements = elements
# making directory to store waveforms and sequences
# old_timeout = self.AWG.timeout() # whats this function
# self.AWG.timeout(max(180, old_timeout))
verbose = kw.pop('verbose', False)
debug = kw.pop('debug', False)
channels = kw.pop('channels', 'all')
loop = kw.pop('loop', False)
allow_non_zero_first_point_on_trigger_wait = kw.pop('allow_first_zero', False)
elt_cnt = len(elements)
chan_ids = self.get_used_channel_ids()
packed_waveforms = {}
# Store offset settings to restore them after upload the seq
# Note that this is the AWG setting offset, as distinct from the
# channel parameter offset.
elements_with_non_zero_first_points = []
# order the waveforms according to physical AWG channels and
# make empty sequences where necessary
for i, element in enumerate(elements):
if verbose:
print ("%d / %d: %s (%d samples)... " %\
(i+1, elt_cnt, element.name, element.samples()), end = ' ')
_t0 = time.time()
tvals, wfs = element.normalized_waveforms()
for id in chan_ids:
wfname = element.name + '_%s.wfm' % id
chan_wfs = {id: None, id+'_marker1': None, id+'_marker2': None}
grp = self.get_channel_names_by_id(id)
for sid in grp:
if grp[sid] != None and grp[sid] in wfs:
chan_wfs[sid] = wfs[grp[sid]]
if chan_wfs[sid][0] != 0.:
elements_with_non_zero_first_points.append(element.name)
else:
chan_wfs[sid] = np.zeros(element.samples())
# create wform files and send them to AWG
self.AWG.gen_waveform_files(chan_wfs[id],
chan_wfs[id+'_marker1'],
chan_wfs[id+'_marker2'], wfname,
int(element.clock))
# packed_waveforms[wfname] = self.test_send(chan_wfs[id],
# chan_wfs[id+'_marker1'],
# chan_wfs[id+'_marker2'], wfname,
# int(element.clock))
_t = time.time() - _t0
if verbose:
print ("finished in %.2f seconds." % _t)
# sequence programming
_t0 = time.time()
if (sequence.element_count() > 8000):
logging.warning("Error: trying to program '{:s}' ({:d}'".format(
sequence.name, sequence.element_count()) +
" element(s))...\n Sequence contains more than " +
"8000 elements, Aborting", end=' ')
return
print("Programming '%s' (%d element(s)) \t"
% (sequence.name, sequence.element_count()), end=' ')
# determine which channels are involved in the sequence
if channels == 'all':
chan_ids = self.get_used_channel_ids()
else:
chan_ids = []
for c in channels:
if self.channels[c]['id'][:3] not in chan_ids:
chan_ids.append(self.channels[c]['id'][:3])
# Create lists with sequence information:
# wfname_l = list of waveform names [[wf1_ch1,wf2_ch1..],[wf1_ch2,wf2_ch2..],...]
# nrep_l = list specifying the number of reps for each seq element
# wait_l = idem for wait_trigger_state
# goto_l = idem for goto_state (goto is the element where it hops to in case the element is finished)
wfname_l = []
nrep_l = []
wait_l = []
goto_l = []
logic_jump_l = []
for id in chan_ids:
#set all the waveforms
el_wfnames = []
# add all wf names of channel
for elt in sequence.elements:
el_wfnames.append(elt['wfname'] + '_%s.wfm' % id)
# should the name include id nr?
wfname_l.append(el_wfnames)
for elt in sequence.elements:
nrep_l.append(elt['repetitions'])
if (elt['repetitions'] < 1) or (elt['repetitions'] > 65536):
raise Exception('pulsar: The number of repetitions of ' +
'AWG element "%s" are out of range. Valid '
% elt['wfname'] +
'range = 1 to 65536 ("%s" recieved)'
% elt['repetitions'])
if elt['goto_l'] != None:
goto_l.append(sequence.element_index(elt['goto_l']))
else:
goto_l.append(0)
if elt['jump_target'] != None:
logic_jump_l.append(sequence.element_index(elt['jump_target']))
else:
logic_jump_l.append(0)
if elt['trigger_wait']:
wait_l.append(1)
else:
wait_l.append(0)
if loop:
goto_l[-1] = 1
# setting jump modes and loading the djump table
if sequence.djump_table != None and self.AWG_type not in ['opt09']:
raise Exception('AWG Station: The AWG configured does not support dynamic jumping')
if self.AWG_type in ['opt09']:
# TODO as self.AWG_sequence_cfg no longer exists but is generated
# from the sequence_cfg file, make these set the values on the AWG
# itself.
if sequence.djump_table != None:
# self.AWG_sequence_cfg['EVENT_JUMP_MODE'] = 2 # DYNAMIC JUMP
print('AWG set to dynamical jump')
awg_djump_table = np.zeros(16, dtype='l')
for i in list(sequence.djump_table.keys()):
el_idx = sequence.element_index(sequence.djump_table[i])
awg_djump_table[i] = el_idx
# self.AWG_sequence_cfg['TABLE_JUMP_DEFINITION'] = awg_djump_table
else:
# self.AWG_sequence_cfg['EVENT_JUMP_MODE'] = 1 # EVENT JUMP
pass
if debug:
self.check_sequence_consistency(packed_waveforms,
wfname_l,
nrep_l, wait_l, goto_l,
logic_jump_l)
self.filename = sequence.name+'_FILE.seq'
# # Loading the sequence onto the AWG memory
self.AWG.gen_sequence_file(wfname_l[0],wfname_l[1],nrep_l,wait_l,goto_l,logic_jump_l,self.filename)
# self.test_send_sequence2(wfname_l[0],wfname_l[1],nrep_l,wait_l,goto_l,logic_jump_l,self.filename)
time.sleep(.1)
# # # Waits for AWG to be ready
self.AWG.sync_awg()
self.finished= False
self.upload()
self.finished = True
_t = time.time() - _t0
self.AWG.set_sequence(self.filename)
print(" finished in %.2f seconds." % _t)
return 0
def AWGrun(self):
# default mode is triggered
self.AWG.write('*WAI')
self.AWG.set_run_mode('ENH')
self.AWG.set_status('on',1)
self.AWG.set_status('on',2)
self.AWG.start()
def upload(self,folder_path = None, timestamp = None):
# if folder_path is None:
# folder_path = os.getcwd()
# use_latest = True
# if timestamp is not None:
# use_latest = False
# dirname = fnmatch.filter(os.listdir(folder_path),"AwgFiles*")
# dirpath = None
# if use_latest:
# dirpath = os.path.join(os.getcwd(),dirname[-1])
# else:
# pattern = re.findall(r'\d+',timestamp)
# for dir in dirname:
# if pattern == re.findall(r'\d+',dir):
# dirpath = os.path.join(os.getcwd(),dir)
# if dirpath == None:
# raise IOError("Cannot find directory with timestamp {}".format(timestamp))
folder_path = os.getcwd()
dirpath = self.AWG.awg_file_dir
os.chdir(dirpath)
f = open('ftp.txt','w')
f.write('open 192.168.1.51\n')
f.write('\n')
f.write('\n')
f.write('binary\n')
f.write('mput "*.wfm"\n')
f.write('mput "*.seq"\n')
f.write('disconnect\n')
f.write('bye')
f.close()
t = threading.Thread(target=self.animate)
t.start()
if subprocess.call('ftp -v -i -s:ftp.txt') == 0:
os.remove('ftp.txt')
os.path.normpath(os.getcwd() + os.sep + os.pardir)
def animate(self):
sys.stdout.write('uploading waveforms ' + '...')
for c in itertools.cycle(['|', '/', '-', '\\']):
if self.finished:
break
sys.stdout.write('' + c)
sys.stdout.flush()
time.sleep(0.1)
sys.stdout.write('\rDone! ')
def check_sequence_consistency(self, packed_waveforms,
wfname_l,
nrep_l, wait_l, goto_l, logic_jump_l):
'''
Specific for 2 channel tektronix 520 where all channels are used.
'''
if not (len(wfname_l[0]) == len(wfname_l[1]) ==
len(nrep_l) == len(wait_l) == len(goto_l) ==
len(logic_jump_l)):
raise Exception('pulsar: sequence list of elements/properties has unequal length')
# def test_send(self,w,m1,m2,filename,clock):
# """
# Sends a complete waveform. All parameters need to be specified.
# choose a file extension 'wfm' (must end with .pat)
# See also: resend_waveform()
# Input:
# w (float[numpoints]) : waveform
# m1 (int[numpoints]) : marker1
# m2 (int[numpoints]) : marker2
# filename (string) : filename
# clock (int) : frequency (Hz)
# Output:
# None
# """
# logging.debug(__name__ + ' : Generating wfm files %s for instrument' % filename)
# # Check for errors
# dim = len(w)
# if (not((len(w)==len(m1)) and ((len(m1)==len(m2))))):
# return 'error'
# m = m1 + np.multiply(m2,2)
# ws = b''
# for i in range(0,len(w)):
# ws = ws + struct.pack('<fB', w[i], int(m[i]))
# s1 = 'MAGIC 1000\r\n'
# s3 = ws
# s4 = 'CLOCK %.10e\r\n' % clock
# s2 = '#' + str(len(str(len(s3)))) + str(len(s3))
# mes = s1.encode('ASCII') + s2.encode('ASCII') + s3 + s4.encode('ASCII')
# with open(os.path.join(self.dir, filename), 'wb') as d:
# d.write(mes)
# d.close()
# def test_send_sequence2(self,wfs1,wfs2,rep,wait,goto,logic_jump,filename):
# '''
# Sends a sequence file
# Inputs (mandatory):
# wfs1: list of filenames for ch1 (all must end with .pat)
# wfs2: list of filenames for ch2 (all must end with .pat)
# rep: list
# wait: list
# goto: list
# logic_jump: list
# filename: name of output file (must end with .seq)
# Output:
# None
# '''
# logging.debug(__name__ + ' : Generating sequence %s for instrument' % filename)
# N = str(len(rep))
# s1 = 'MAGIC 3002\r\n'
# s3 = 'LINES %s\n'%N
# s4 = ''
# for k in range(len(rep)):
# s4 = s4+ '"%s","%s",%s,%s,%s,%s\r\n'%(wfs1[k],wfs2[k],rep[k],wait[k],goto[k],logic_jump[k])
# mes = s1.encode("ASCII") + s3.encode("ASCII")+ s4.encode("ASCII")
# with open(os.path.join(self.dir, filename), 'wb') as d:
# d.write(mes)
# d.close()
# def init_dir(self):
# print ( 'Initializing directory for AWG file transfering......' )
# self.dir = os.path.join(os.getcwd(),
# 'AwgFiles'+datetime.now().strftime('%Y-%m-%d_%H-%M-%S'))
# try:
# os.makedirs(self.dir)
# except OSError as e:
# if e.errno != errno.EEXIST:
# raise # This was not a "directory exist" error..
| gpl-3.0 | 151,102,286,414,462,370 | 24.785714 | 103 | 0.62791 | false |
stewartpark/django | django/contrib/gis/geos/base.py | 437 | 1280 | from ctypes import c_void_p
from django.contrib.gis.geos.error import GEOSException
class GEOSBase(object):
"""
Base object for GEOS objects that has a pointer access property
that controls access to the underlying C pointer.
"""
# Initially the pointer is NULL.
_ptr = None
# Default allowed pointer type.
ptr_type = c_void_p
# Pointer access property.
def _get_ptr(self):
# Raise an exception if the pointer isn't valid don't
# want to be passing NULL pointers to routines --
# that's very bad.
if self._ptr:
return self._ptr
else:
raise GEOSException('NULL GEOS %s pointer encountered.' % self.__class__.__name__)
def _set_ptr(self, ptr):
# Only allow the pointer to be set with pointers of the
# compatible type or None (NULL).
if ptr is None or isinstance(ptr, self.ptr_type):
self._ptr = ptr
else:
raise TypeError('Incompatible pointer type')
# Property for controlling access to the GEOS object pointers. Using
# this raises an exception when the pointer is NULL, thus preventing
# the C library from attempting to access an invalid memory location.
ptr = property(_get_ptr, _set_ptr)
| bsd-3-clause | -6,763,576,273,197,999,000 | 32.684211 | 94 | 0.642188 | false |
rossgoodwin/musapaedia | musapaedia/muse/lib/python2.7/site-packages/setuptools/tests/test_dist_info.py | 148 | 2261 | """Test .dist-info style distributions.
"""
import os
import shutil
import tempfile
import pytest
import pkg_resources
from .textwrap import DALS
class TestDistInfo:
def test_distinfo(self):
dists = dict(
(d.project_name, d)
for d in pkg_resources.find_distributions(self.tmpdir)
)
assert len(dists) == 2, dists
unversioned = dists['UnversionedDistribution']
versioned = dists['VersionedDistribution']
assert versioned.version == '2.718' # from filename
assert unversioned.version == '0.3' # from METADATA
@pytest.mark.importorskip('ast')
def test_conditional_dependencies(self):
specs = 'splort==4', 'quux>=1.1'
requires = list(map(pkg_resources.Requirement.parse, specs))
for d in pkg_resources.find_distributions(self.tmpdir):
assert d.requires() == requires[:1]
assert d.requires(extras=('baz',)) == requires
assert d.extras == ['baz']
metadata_template = DALS("""
Metadata-Version: 1.2
Name: {name}
{version}
Requires-Dist: splort (==4)
Provides-Extra: baz
Requires-Dist: quux (>=1.1); extra == 'baz'
""")
def setup_method(self, method):
self.tmpdir = tempfile.mkdtemp()
dist_info_name = 'VersionedDistribution-2.718.dist-info'
versioned = os.path.join(self.tmpdir, dist_info_name)
os.mkdir(versioned)
with open(os.path.join(versioned, 'METADATA'), 'w+') as metadata_file:
metadata = self.metadata_template.format(
name='VersionedDistribution',
version='',
).replace('\n\n', '\n')
metadata_file.write(metadata)
dist_info_name = 'UnversionedDistribution.dist-info'
unversioned = os.path.join(self.tmpdir, dist_info_name)
os.mkdir(unversioned)
with open(os.path.join(unversioned, 'METADATA'), 'w+') as metadata_file:
metadata = self.metadata_template.format(
name='UnversionedDistribution',
version='Version: 0.3',
)
metadata_file.write(metadata)
def teardown_method(self, method):
shutil.rmtree(self.tmpdir)
| mit | -6,936,935,225,838,370,000 | 31.3 | 80 | 0.598408 | false |
victorzhao/miniblink49 | third_party/skia/tools/reformat-json.py | 208 | 1741 | #!/usr/bin/python
'''
Copyright 2013 Google Inc.
Use of this source code is governed by a BSD-style license that can be
found in the LICENSE file.
'''
'''
Rewrites a JSON file to use Python's standard JSON pretty-print format,
so that subsequent runs of rebaseline.py will generate useful diffs
(only the actual checksum differences will show up as diffs, not obscured
by format differences).
Should not modify the JSON contents in any meaningful way.
'''
# System-level imports
import argparse
import os
import sys
# Imports from within Skia
#
# We need to add the 'gm' directory, so that we can import gm_json.py within
# that directory. That script allows us to parse the actual-results.json file
# written out by the GM tool.
# Make sure that the 'gm' dir is in the PYTHONPATH, but add it at the *end*
# so any dirs that are already in the PYTHONPATH will be preferred.
#
# This assumes that the 'gm' directory has been checked out as a sibling of
# the 'tools' directory containing this script, which will be the case if
# 'trunk' was checked out as a single unit.
GM_DIRECTORY = os.path.realpath(
os.path.join(os.path.dirname(os.path.dirname(__file__)), 'gm'))
if GM_DIRECTORY not in sys.path:
sys.path.append(GM_DIRECTORY)
import gm_json
def Reformat(filename):
print 'Reformatting file %s...' % filename
gm_json.WriteToFile(gm_json.LoadFromFile(filename), filename)
def _Main():
parser = argparse.ArgumentParser(description='Reformat JSON files in-place.')
parser.add_argument('filenames', metavar='FILENAME', nargs='+',
help='file to reformat')
args = parser.parse_args()
for filename in args.filenames:
Reformat(filename)
sys.exit(0)
if __name__ == '__main__':
_Main()
| gpl-3.0 | -3,019,449,073,377,126,000 | 30.654545 | 79 | 0.723148 | false |
cherez/youtube-dl | youtube_dl/extractor/screencast.py | 147 | 4200 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from .common import InfoExtractor
from ..compat import (
compat_parse_qs,
compat_urllib_request,
)
from ..utils import (
ExtractorError,
)
class ScreencastIE(InfoExtractor):
_VALID_URL = r'https?://www\.screencast\.com/t/(?P<id>[a-zA-Z0-9]+)'
_TESTS = [{
'url': 'http://www.screencast.com/t/3ZEjQXlT',
'md5': '917df1c13798a3e96211dd1561fded83',
'info_dict': {
'id': '3ZEjQXlT',
'ext': 'm4v',
'title': 'Color Measurement with Ocean Optics Spectrometers',
'description': 'md5:240369cde69d8bed61349a199c5fb153',
'thumbnail': 're:^https?://.*\.(?:gif|jpg)$',
}
}, {
'url': 'http://www.screencast.com/t/V2uXehPJa1ZI',
'md5': 'e8e4b375a7660a9e7e35c33973410d34',
'info_dict': {
'id': 'V2uXehPJa1ZI',
'ext': 'mov',
'title': 'The Amadeus Spectrometer',
'description': 're:^In this video, our friends at.*To learn more about Amadeus, visit',
'thumbnail': 're:^https?://.*\.(?:gif|jpg)$',
}
}, {
'url': 'http://www.screencast.com/t/aAB3iowa',
'md5': 'dedb2734ed00c9755761ccaee88527cd',
'info_dict': {
'id': 'aAB3iowa',
'ext': 'mp4',
'title': 'Google Earth Export',
'description': 'Provides a demo of a CommunityViz export to Google Earth, one of the 3D viewing options.',
'thumbnail': 're:^https?://.*\.(?:gif|jpg)$',
}
}, {
'url': 'http://www.screencast.com/t/X3ddTrYh',
'md5': '669ee55ff9c51988b4ebc0877cc8b159',
'info_dict': {
'id': 'X3ddTrYh',
'ext': 'wmv',
'title': 'Toolkit 6 User Group Webinar (2014-03-04) - Default Judgment and First Impression',
'description': 'md5:7b9f393bc92af02326a5c5889639eab0',
'thumbnail': 're:^https?://.*\.(?:gif|jpg)$',
}
},
]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
video_url = self._html_search_regex(
r'<embed name="Video".*?src="([^"]+)"', webpage,
'QuickTime embed', default=None)
if video_url is None:
flash_vars_s = self._html_search_regex(
r'<param name="flashVars" value="([^"]+)"', webpage, 'flash vars',
default=None)
if not flash_vars_s:
flash_vars_s = self._html_search_regex(
r'<param name="initParams" value="([^"]+)"', webpage, 'flash vars',
default=None)
if flash_vars_s:
flash_vars_s = flash_vars_s.replace(',', '&')
if flash_vars_s:
flash_vars = compat_parse_qs(flash_vars_s)
video_url_raw = compat_urllib_request.quote(
flash_vars['content'][0])
video_url = video_url_raw.replace('http%3A', 'http:')
if video_url is None:
video_meta = self._html_search_meta(
'og:video', webpage, default=None)
if video_meta:
video_url = self._search_regex(
r'src=(.*?)(?:$|&)', video_meta,
'meta tag video URL', default=None)
if video_url is None:
raise ExtractorError('Cannot find video')
title = self._og_search_title(webpage, default=None)
if title is None:
title = self._html_search_regex(
[r'<b>Title:</b> ([^<]*)</div>',
r'class="tabSeperator">></span><span class="tabText">(.*?)<'],
webpage, 'title')
thumbnail = self._og_search_thumbnail(webpage)
description = self._og_search_description(webpage, default=None)
if description is None:
description = self._html_search_meta('description', webpage)
return {
'id': video_id,
'url': video_url,
'title': title,
'description': description,
'thumbnail': thumbnail,
}
| unlicense | 2,076,698,057,036,148,000 | 36.837838 | 118 | 0.515714 | false |
jesramirez/odoo | addons/hr_timesheet_sheet/report/__init__.py | 342 | 1074 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import hr_timesheet_report
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 164,198,059,339,103,230 | 41.96 | 78 | 0.616387 | false |
AmandaMoen/AmandaMoen | notes/resources/UW_IntroClass/class8.5/code/basic_app_4.py | 1 | 7168 | #!/usr/bin/env python
"""
Example of the very basic, minimal framework for a wxPython application
This version adds a single button
"""
import wx
import os
#--------------------------------------------------------------
# This is how you pre-establish a file filter so that the dialog
# only shows the extension(s) you want it to.
wildcard = "Python source (*.py)|*.py|" \
"Compiled Python (*.pyc)|*.pyc|" \
"SPAM files (*.spam)|*.spam|" \
"Egg file (*.egg)|*.egg|" \
"All files (*.*)|*.*"
#--------------------------------------------------------------
class AppLogic(object):
"""
A class to hold the application Application Logic.
You generally don't want the real logic of the app mixed
in with the GUI
In a real app, this would be a substantial collection of
modules, classes, etc...
"""
def file_open(self, filename="default_name"):
"""This method opens a file"""
print "Open a file: "
print "I'd be opening file: %s now"%filename
def file_close(self):
"""This method closes a file"""
print "Close a file: "
print "I'd be closing a file now"
class TestFrame(wx.Frame):
def __init__(self, app_logic, *args, **kwargs):
kwargs.setdefault('title', "Simple test App")
wx.Frame.__init__(self, *args, **kwargs)
self.app_logic = app_logic
# Build up the menu bar:
menuBar = wx.MenuBar()
fileMenu = wx.Menu()
saveasMenuItem = fileMenu.Append(wx.ID_ANY, "&Save As", "Create a new file")
self.Bind(wx.EVT_MENU, self.onSaveAs, saveasMenuItem )
openMenuItem = fileMenu.Append(wx.ID_ANY, "&Open", "Open an existing file" )
self.Bind(wx.EVT_MENU, self.onOpen, openMenuItem)
closeMenuItem = fileMenu.Append(wx.ID_ANY, "&Close", "Close a file" )
self.Bind(wx.EVT_MENU, self.onClose, closeMenuItem)
exitMenuItem = fileMenu.Append(wx.ID_EXIT, "Exit", "Exit the application")
self.Bind(wx.EVT_MENU, self.onExit, exitMenuItem)
menuBar.Append(fileMenu, "&File")
helpMenu = wx.Menu()
helpMenuItem = helpMenu.Append(wx.ID_HELP, "Help", "Get help")
menuBar.Append(helpMenu, "&Help")
self.SetMenuBar(menuBar)
## add just a single button:
self.theButton = wx.Button(self, label="Push Me")
self.theButton.Bind(wx.EVT_BUTTON, self.onButton)
self.theButton.Bind(wx.EVT_RIGHT_DOWN, self.onRight)
def onButton(self, evt=None):
print "You pushed the button!"
evt.Skip()
def onRight(self, evt=None):
print "right click!"
evt.Skip()
def onClose(self, evt=None):
print "close menu selected"
self.file_close()
def onExit(self, evt=None):
print "Exit the program here"
print "The event passed to onExit is type ", type(evt),
self.Close()
def onSaveAs ( self, evt=None ):
"""This method saves the file with a new name"""
# Create the dialog. In this case the current directory is forced as the starting
# directory for the dialog, and no default file name is forced. This can easilly
# be changed in your program. This is an 'save' dialog.
#
# Unlike the 'open dialog' example found elsewhere, this example does NOT
# force the current working directory to change if the user chooses a different
# directory than the one initially set.
dlg = wx.FileDialog(self,
message="Save file as ...",
defaultDir=os.getcwd(),
defaultFile="",
wildcard=wildcard,
style=wx.SAVE )
# This sets the default filter that the user will initially see. Otherwise,
# the first filter in the list will be used by default.
dlg.SetFilterIndex(2)
# Show the dialog and retrieve the user response. If it is the OK response,
# process the data.
if dlg.ShowModal() == wx.ID_OK:
path = dlg.GetPath()
print "In onSaveAs, the path is %s" % path
# Normally, at this point you would save your data using the file and path
# data that the user provided to you, but since we didn't actually start
# with any data to work with, that would be difficult.
#
# The code to do so would be similar to this, assuming 'data' contains
# the data you want to save:
#
# fp = file(path, 'w') # Create file anew
# fp.write(data)
# fp.close()
#
# You might want to add some error checking :-)
else :
print "The file dialog was canceled before anything was selected"
# Note that the current working dir didn't change. This is good since
# that's the way we set it up.
# Destroy the dialog. Don't do this until you are done with it!
# BAD things can happen otherwise!
dlg.Destroy()
def onOpen(self, evt=None):
"""This method opens an existing file"""
print "Open a file: "
# Create the dialog. In this case the current directory is forced as the starting
# directory for the dialog, and no default file name is forced. This can easilly
# be changed in your program. This is an 'open' dialog, and allows multitple
# file selections as well.
#
# Finally, if the directory is changed in the process of getting files, this
# dialog is set up to change the current working directory to the path chosen.
dlg = wx.FileDialog( self,
message="Choose a file",
defaultDir=os.getcwd(),
defaultFile="",
wildcard=wildcard,
style=wx.OPEN | wx.CHANGE_DIR
)
# Show the dialog and retrieve the user response. If it is the OK response,
# process the data.
if dlg.ShowModal() == wx.ID_OK:
# This returns a Python list of files that were selected.
path = dlg.GetPath()
print "I'd be opening file in onOpen ", path
self.app_logic.file_open( path )
else :
print "The file dialog was canceled before anything was selected"
# Destroy the dialog. Don't do this until you are done with it!
# BAD things can happen otherwise!
dlg.Destroy()
def file_close(self):
"""This method closes a file"""
print "Close a file: "
print "I'd be closing a file now"
class TestApp(wx.App):
def OnInit(self):
"""
App initilization goes here -- not much to do, in this case
"""
app_logic = AppLogic()
f = TestFrame(app_logic, parent=None)
f.Show()
return True
if __name__ == "__main__":
app = TestApp(False)
app.MainLoop()
| gpl-2.0 | -6,373,040,933,122,016,000 | 34.661692 | 89 | 0.563616 | false |
jaggu303619/asylum-v2.0 | openerp/addons/resource/faces/pcalendar.py | 433 | 28436 | #@+leo-ver=4
#@+node:@file pcalendar.py
#@@language python
#@<< Copyright >>
#@+node:<< Copyright >>
############################################################################
# Copyright (C) 2005, 2006, 2007, 2008 by Reithinger GmbH
# mreithinger@web.de
#
# This file is part of faces.
#
# faces is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# faces is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the
# Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
############################################################################
#@-node:<< Copyright >>
#@nl
"""
This module contains all classes and functions for the project plan calendar
"""
#@<< Imports >>
#@+node:<< Imports >>
from string import *
import datetime
import time
import re
import locale
import bisect
import sys
TIME_RANGE_PATTERN = re.compile("(\\d+):(\\d+)\\s*-\\s*(\\d+):(\\d+)")
TIME_DELTA_PATTERN = re.compile("([-+]?\\d+(\\.\\d+)?)([dwmyMH])")
DEFAULT_MINIMUM_TIME_UNIT = 15
DEFAULT_WORKING_DAYS_PER_WEEK = 5
DEFAULT_WORKING_DAYS_PER_MONTH = 20
DEFAULT_WORKING_DAYS_PER_YEAR = 200
DEFAULT_WORKING_HOURS_PER_DAY = 8
DEFAULT_WORKING_TIMES = ( (8 * 60, 12 * 60 ),
(13 * 60, 17 * 60 ) )
DEFAULT_WORKING_DAYS = { 0 : DEFAULT_WORKING_TIMES,
1 : DEFAULT_WORKING_TIMES,
2 : DEFAULT_WORKING_TIMES,
3 : DEFAULT_WORKING_TIMES,
4 : DEFAULT_WORKING_TIMES,
5 : (),
6 : () }
#@-node:<< Imports >>
#@nl
#@+others
#@+node:to_time_range
def to_time_range(src):
"""
converts a string to a timerange, i.e
(from, to)
from, to are ints, specifing the minutes since midnight
"""
if not src: return ()
mo = TIME_RANGE_PATTERN.match(src)
if not mo:
raise ValueError("%s is no time range" % src)
from_time = int(mo.group(1)) * 60 + int(mo.group(2))
to_time = int(mo.group(3)) * 60 + int(mo.group(4))
return from_time, to_time
#@-node:to_time_range
#@+node:to_datetime
def to_datetime(src):
"""
a tolerant conversion function to convert different strings
to a datetime.dateime
"""
#to get the original value for wrappers
new = getattr(src, "_value", src)
while new is not src:
src = new
new = getattr(src, "_value", src)
if isinstance(src, _WorkingDateBase):
src = src.to_datetime()
if isinstance(src, datetime.datetime):
return src
src = str(src)
formats = [ "%x %H:%M",
"%x",
"%Y-%m-%d %H:%M",
"%y-%m-%d %H:%M",
"%d.%m.%Y %H:%M",
"%d.%m.%y %H:%M",
"%Y%m%d %H:%M",
"%d/%m/%y %H:%M",
"%d/%m/%Y %H:%M",
"%d/%m/%Y",
"%d/%m/%y",
"%Y-%m-%d",
"%y-%m-%d",
"%d.%m.%Y",
"%d.%m.%y",
"%Y%m%d" ]
for f in formats:
try:
conv = time.strptime(src, f)
return datetime.datetime(*conv[0:-3])
except Exception, e:
pass
raise TypeError("'%s' (%s) is not a datetime" % (src, str(type(src))))
#@-node:
#@+node:_to_days
def _to_days(src):
"""
converts a string of the day abreviations mon, tue, wed,
thu, fri, sat, sun to a dir with correct weekday indices.
For Example
convert_to_days('mon, tue, thu') results in
{ 0:1, 1:1, 3:1 }
"""
tokens = src.split(",")
result = { }
for t in tokens:
try:
index = { "mon" : 0,
"tue" : 1,
"wed" : 2,
"thu" : 3,
"fri" : 4,
"sat" : 5,
"sun" : 6 } [ lower(t.strip()) ]
result[index] = 1
except:
raise ValueError("%s is not a day" % (t))
return result
#@-node:_to_days
#@+node:_add_to_time_spans
def _add_to_time_spans(src, to_add, is_free):
if not isinstance(to_add, (tuple, list)):
to_add = (to_add,)
tmp = []
for start, end, f in src:
tmp.append((start, True, f))
tmp.append((end, False, f))
for v in to_add:
if isinstance(v, (tuple, list)):
start = to_datetime(v[0])
end = to_datetime(v[1])
else:
start = to_datetime(v)
end = start.replace(hour=0, minute=0) + datetime.timedelta(1)
tmp.append((start, start <= end, is_free))
tmp.append((end, start > end, is_free))
tmp.sort()
# 0: date
# 1: is_start
# 2: is_free
sequence = []
free_count = 0
work_count = 0
last = None
for date, is_start, is_free in tmp:
if is_start:
if is_free:
if not free_count and not work_count:
last = date
free_count += 1
else:
if not work_count:
if free_count: sequence.append((last, date, True))
last = date
work_count += 1
else:
if is_free:
assert(free_count > 0)
free_count -= 1
if not free_count and not work_count:
sequence.append((last, date, True))
else:
assert(work_count > 0)
work_count -= 1
if not work_count: sequence.append((last, date, False))
if free_count: last = date
return tuple(sequence)
#@-node:_add_to_time_spans
#@+node:to_timedelta
def to_timedelta(src, cal=None, is_duration=False):
"""
converts a string to a datetime.timedelta. If cal is specified
it will be used for getting the working times. if is_duration=True
working times will not be considered. Valid units are
d for Days
w for Weeks
m for Months
y for Years
H for Hours
M for Minutes
"""
cal = cal or _default_calendar
if isinstance(src, datetime.timedelta):
return datetime.timedelta(src.days, seconds=src.seconds, calendar=cal)
if isinstance(src, (long, int, float)):
src = "%sM" % str(src)
if not isinstance(src, basestring):
raise ValueError("%s is not a duration" % (repr(src)))
src = src.strip()
if is_duration:
d_p_w = 7
d_p_m = 30
d_p_y = 360
d_w_h = 24
else:
d_p_w = cal.working_days_per_week
d_p_m = cal.working_days_per_month
d_p_y = cal.working_days_per_year
d_w_h = cal.working_hours_per_day
def convert_minutes(minutes):
minutes = int(minutes)
hours = minutes / 60
minutes = minutes % 60
days = hours / d_w_h
hours = hours % d_w_h
return [ days, 0, 0, 0, minutes, hours ]
def convert_days(value):
days = int(value)
value -= days
value *= d_w_h
hours = int(value)
value -= hours
value *= 60
minutes = round(value)
return [ days, 0, 0, 0, minutes, hours ]
sum_args = [ 0, 0, 0, 0, 0, 0 ]
split = src.split(" ")
for s in split:
mo = TIME_DELTA_PATTERN.match(s)
if not mo:
raise ValueError(src +
" is not a valid duration: valid"
" units are: d w m y M H")
unit = mo.group(3)
val = float(mo.group(1))
if unit == 'd':
args = convert_days(val)
elif unit == 'w':
args = convert_days(val * d_p_w)
elif unit == 'm':
args = convert_days(val * d_p_m)
elif unit == 'y':
args = convert_days(val * d_p_y)
elif unit == 'M':
args = convert_minutes(val)
elif unit == 'H':
args = convert_minutes(val * 60)
sum_args = [ a + b for a, b in zip(sum_args, args) ]
sum_args = tuple(sum_args)
return datetime.timedelta(*sum_args)
#@-node:to_timedelta
#@+node:timedelta_to_str
def timedelta_to_str(delta, format, cal=None, is_duration=False):
cal = cal or _default_calendar
if is_duration:
d_p_w = 7
d_p_m = 30
d_p_y = 365
d_w_h = 24
else:
d_p_w = cal.working_days_per_week
d_p_m = cal.working_days_per_month
d_p_y = cal.working_days_per_year
d_w_h = cal.working_hours_per_day
has_years = format.find("%y") > -1
has_minutes = format.find("%M") > -1
has_hours = format.find("%H") > -1 or has_minutes
has_days = format.find("%d") > -1
has_weeks = format.find("%w") > -1
has_months = format.find("%m") > -1
result = format
days = delta.days
d_r = (days, format)
minutes = delta.seconds / 60
def rebase(d_r, cond1, cond2, letter, divisor):
#rebase the days
if not cond1: return d_r
days, result = d_r
if cond2:
val = days / divisor
if not val:
result = re.sub("{[^{]*?%" + letter + "[^}]*?}", "", result)
result = result.replace("%" + letter, str(val))
days %= divisor
else:
result = result.replace("%" + letter,
locale.format("%.2f",
(float(days) / divisor)))
return (days, result)
d_r = rebase(d_r, has_years, has_months or has_weeks or has_days, "y", d_p_y)
d_r = rebase(d_r, has_months, has_weeks or has_days, "m", d_p_m)
d_r = rebase(d_r, has_weeks, has_days, "w", d_p_w)
days, result = d_r
if not has_days:
minutes += days * d_w_h * 60
days = 0
if has_hours:
if not days:
result = re.sub("{[^{]*?%d[^}]*?}", "", result)
result = result.replace("%d", str(days))
else:
result = result.replace("%d",
"%.2f" % (days + float(minutes)
/ (d_w_h * 60)))
if has_hours:
if has_minutes:
val = minutes / 60
if not val:
result = re.sub("{[^{]*?%H[^}]*?}", "", result)
result = result.replace("%H", str(val))
minutes %= 60
else:
result = result.replace("%H", "%.2f" % (float(minutes) / 60))
if not minutes:
result = re.sub("{[^{]*?%M[^}]*?}", "", result)
result = result.replace("%M", str(minutes))
result = result.replace("{", "")
result = result.replace("}", "")
return result.strip()
#@-node:timedelta_to_str
#@+node:strftime
def strftime(dt, format):
"""
an extended version of strftime, that introduces some new
directives:
%IW iso week number
%IY iso year
%IB full month name appropriate to iso week
%ib abbreviated month name appropriate to iso week
%im month as decimal number appropriate to iso week
"""
iso = dt.isocalendar()
if iso[0] != dt.year:
iso_date = dt.replace(day=1, month=1)
format = format \
.replace("%IB", iso_date.strftime("%B"))\
.replace("%ib", iso_date.strftime("%b"))\
.replace("%im", iso_date.strftime("%m"))
else:
format = format \
.replace("%IB", "%B")\
.replace("%ib", "%b")\
.replace("%im", "%m")
format = format \
.replace("%IW", str(iso[1]))\
.replace("%IY", str(iso[0]))\
return dt.strftime(format)
#@-node:strftime
#@+node:union
def union(*calendars):
"""
returns a calendar that unifies all working times
"""
#@ << check arguments >>
#@+node:<< check arguments >>
if len(calendars) == 1:
calendars = calendars[0]
#@nonl
#@-node:<< check arguments >>
#@nl
#@ << intersect vacations >>
#@+node:<< intersect vacations >>
free_time = []
for c in calendars:
for start, end, is_free in c.time_spans:
if is_free:
free_time.append((start, False))
free_time.append((end, True))
count = len(calendars)
open = 0
time_spans = []
free_time.sort()
for date, is_end in free_time:
if is_end:
if open == count:
time_spans.append((start, date, True))
open -= 1
else:
open += 1
start = date
#@-node:<< intersect vacations >>
#@nl
#@ << unify extra worktime >>
#@+node:<< unify extra worktime >>
for c in calendars:
for start, end, is_free in c.time_spans:
if not is_free:
time_spans = _add_to_time_spans(time_spans, start, end)
#@nonl
#@-node:<< unify extra worktime >>
#@nl
#@ << unify working times >>
#@+node:<< unify working times >>
working_times = {}
for d in range(0, 7):
times = []
for c in calendars:
for start, end in c.working_times.get(d, []):
times.append((start, False))
times.append((end, True))
times.sort()
open = 0
ti = []
start = None
for time, is_end in times:
if not is_end:
if not start: start = time
open += 1
else:
open -= 1
if not open:
ti.append((start, time))
start = None
if ti:
working_times[d] = ti
#@-node:<< unify working times >>
#@nl
#@ << create result calendar >>
#@+node:<< create result calendar >>
result = Calendar()
result.working_times = working_times
result.time_spans = time_spans
result._recalc_working_time()
result._build_mapping()
#@nonl
#@-node:<< create result calendar >>
#@nl
return result
#@nonl
#@-node:union
#@+node:class _CalendarItem
class _CalendarItem(int):
#@ << class _CalendarItem declarations >>
#@+node:<< class _CalendarItem declarations >>
__slots__ = ()
calender = None
#@-node:<< class _CalendarItem declarations >>
#@nl
#@ @+others
#@+node:__new__
def __new__(cls, val):
try:
return int.__new__(cls, val)
except OverflowError:
return int.__new__(cls, sys.maxint)
#@-node:__new__
#@+node:round
def round(self, round_up=True):
m_t_u = self.calendar.minimum_time_unit
minutes = int(self)
base = (minutes / m_t_u) * m_t_u
minutes %= m_t_u
round_up = round_up and minutes > 0 or minutes > m_t_u / 2
if round_up: base += m_t_u
return self.__class__(base)
#@-node:round
#@-others
#@-node:class _CalendarItem
#@+node:class _Minutes
class _Minutes(_CalendarItem):
#@ << class _Minutes declarations >>
#@+node:<< class _Minutes declarations >>
__slots__ = ()
STR_FORMAT = "{%dd}{ %HH}{ %MM}"
#@-node:<< class _Minutes declarations >>
#@nl
#@ @+others
#@+node:__new__
def __new__(cls, src=0, is_duration=False):
"""
converts a timedelta in working minutes.
"""
if isinstance(src, cls) or type(src) is int:
return _CalendarItem.__new__(cls, src)
cal = cls.calendar
if not isinstance(src, datetime.timedelta):
src = to_timedelta(src, cal, is_duration)
d_w_h = is_duration and 24 or cal.working_hours_per_day
src = src.days * d_w_h * 60 + src.seconds / 60
return _CalendarItem.__new__(cls, src)
#@-node:__new__
#@+node:__cmp__
def __cmp__(self, other):
return cmp(int(self), int(self.__class__(other)))
#@-node:__cmp__
#@+node:__add__
def __add__(self, other):
try:
return self.__class__(int(self) + int(self.__class__(other)))
except:
return NotImplemented
#@-node:__add__
#@+node:__sub__
def __sub__(self, other):
try:
return self.__class__(int(self) - int(self.__class__(other)))
except:
return NotImplemented
#@-node:__sub__
#@+node:to_timedelta
def to_timedelta(self, is_duration=False):
d_w_h = is_duration and 24 or self.calendar.working_hours_per_day
minutes = int(self)
hours = minutes / 60
minutes = minutes % 60
days = hours / d_w_h
hours = hours % d_w_h
return datetime.timedelta(days, hours=hours, minutes=minutes)
#@nonl
#@-node:to_timedelta
#@+node:strftime
def strftime(self, format=None, is_duration=False):
td = self.to_timedelta(is_duration)
return timedelta_to_str(td, format or self.STR_FORMAT,
self.calendar, is_duration)
#@nonl
#@-node:strftime
#@-others
#@-node:class _Minutes
#@+node:class _WorkingDateBase
class _WorkingDateBase(_CalendarItem):
"""
A daytetime which has only valid values within the
workingtimes of a specific calendar
"""
#@ << class _WorkingDateBase declarations >>
#@+node:<< class _WorkingDateBase declarations >>
timetuple = True
STR_FORMAT = "%x %H:%M"
_minutes = _Minutes
__slots__ = ()
#@-node:<< class _WorkingDateBase declarations >>
#@nl
#@ @+others
#@+node:__new__
def __new__(cls, src):
#cls.__bases__[0] is the base of
#the calendar specific StartDate and EndDate
if isinstance(src, cls.__bases__[0]) or type(src) in (int, float):
return _CalendarItem.__new__(cls, src)
src = cls.calendar.from_datetime(to_datetime(src))
return _CalendarItem.__new__(cls, src)
#@-node:__new__
#@+node:__repr__
def __repr__(self):
return self.strftime()
#@-node:__repr__
#@+node:to_datetime
def to_datetime(self):
return self.to_starttime()
#@-node:to_datetime
#@+node:to_starttime
def to_starttime(self):
return self.calendar.to_starttime(self)
#@-node:to_starttime
#@+node:to_endtime
def to_endtime(self):
return self.calendar.to_endtime(self)
#@-node:to_endtime
#@+node:__cmp__
def __cmp__(self, other):
return cmp(int(self), int(self.__class__(other)))
#@-node:__cmp__
#@+node:__add__
def __add__(self, other):
try:
return self.__class__(int(self) + int(self._minutes(other)))
except ValueError, e:
raise e
except:
return NotImplemented
#@-node:__add__
#@+node:__sub__
def __sub__(self, other):
if isinstance(other, (datetime.timedelta, str, _Minutes)):
try:
other = self._minutes(other)
except:
pass
if isinstance(other, self._minutes):
return self.__class__(int(self) - int(other))
try:
return self._minutes(int(self) - int(self.__class__(other)))
except:
return NotImplemented
#@-node:__sub__
#@+node:strftime
def strftime(self, format=None):
return strftime(self.to_datetime(), format or self.STR_FORMAT)
#@-node:strftime
#@-others
#@-node:class _WorkingDateBase
#@+node:class Calendar
class Calendar(object):
"""
A calendar to specify working times and vacations.
The calendars epoch start at 1.1.1979
"""
#@ << declarations >>
#@+node:<< declarations >>
# january the first must be a monday
EPOCH = datetime.datetime(1979, 1, 1)
minimum_time_unit = DEFAULT_MINIMUM_TIME_UNIT
working_days_per_week = DEFAULT_WORKING_DAYS_PER_WEEK
working_days_per_month = DEFAULT_WORKING_DAYS_PER_MONTH
working_days_per_year = DEFAULT_WORKING_DAYS_PER_YEAR
working_hours_per_day = DEFAULT_WORKING_HOURS_PER_DAY
now = EPOCH
#@-node:<< declarations >>
#@nl
#@ @+others
#@+node:__init__
def __init__(self):
self.time_spans = ()
self._dt_num_can = ()
self._num_dt_can = ()
self.working_times = { }
self._recalc_working_time()
self._make_classes()
#@-node:__init__
#@+node:__or__
def __or__(self, other):
if isinstance(other, Calendar):
return union(self, other)
return NotImplemented
#@nonl
#@-node:__or__
#@+node:clone
def clone(self):
result = Calendar()
result.working_times = self.working_times.copy()
result.time_spans = self.time_spans
result._recalc_working_time()
result._build_mapping()
return result
#@nonl
#@-node:clone
#@+node:set_working_days
def set_working_days(self, day_range, trange, *further_tranges):
"""
Sets the working days of an calendar
day_range is a string of day abbreviations like 'mon, tue'
trange and further_tranges is a time range string like
'8:00-10:00'
"""
time_ranges = [ trange ] + list(further_tranges)
time_ranges = filter(bool, map(to_time_range, time_ranges))
days = _to_days(day_range)
for k in days.keys():
self.working_times[k] = time_ranges
self._recalc_working_time()
self._build_mapping()
#@-node:set_working_days
#@+node:set_vacation
def set_vacation(self, value):
"""
Sets vacation time.
value is either a datetime literal or
a sequence of items that can be
a datetime literals and or pair of datetime literals
"""
self.time_spans = _add_to_time_spans(self.time_spans, value, True)
self._build_mapping()
#@-node:set_vacation
#@+node:set_extra_work
def set_extra_work(self, value):
"""
Sets extra working time
value is either a datetime literal or
a sequence of items that can be
a datetime literals and or pair of datetime literals
"""
self.time_spans = _add_to_time_spans(self.time_spans, value, False)
self._build_mapping()
#@-node:set_extra_work
#@+node:from_datetime
def from_datetime(self, value):
assert(isinstance(value, datetime.datetime))
delta = value - self.EPOCH
days = delta.days
minutes = delta.seconds / 60
# calculate the weektime
weeks = days / 7
wtime = self.week_time * weeks
# calculate the daytime
days %= 7
dtime = sum(self.day_times[:days])
# calculate the minute time
slots = self.working_times.get(days, DEFAULT_WORKING_DAYS[days])
mtime = 0
for start, end in slots:
if minutes > end:
mtime += end - start
else:
if minutes > start:
mtime += minutes - start
break
result = wtime + dtime + mtime
# map exceptional timespans
dt_num_can = self._dt_num_can
pos = bisect.bisect(dt_num_can, (value,)) - 1
if pos >= 0:
start, end, nstart, nend, cend = dt_num_can[pos]
if value < end:
if nstart < nend:
delta = value - start
delta = delta.days * 24 * 60 + delta.seconds / 60
result = nstart + delta
else:
result = nstart
else:
result += (nend - cend) # == (result - cend) + nend
return result
#@-node:from_datetime
#@+node:split_time
def split_time(self, value):
#map exceptional timespans
num_dt_can = self._num_dt_can
pos = bisect.bisect(num_dt_can, (value, sys.maxint)) - 1
if pos >= 0:
nstart, nend, start, end, cend = num_dt_can[pos]
if value < nend:
value = start + datetime.timedelta(minutes=value - nstart)
delta = value - self.EPOCH
return delta.days / 7, delta.days % 7, delta.seconds / 60, -1
else:
value += (cend - nend) # (value - nend + cend)
#calculate the weeks since the epoch
weeks = value / self.week_time
value %= self.week_time
#calculate the remaining days
days = 0
for day_time in self.day_times:
if value < day_time: break
value -= day_time
days += 1
#calculate the remaining minutes
minutes = 0
slots = self.working_times.get(days, DEFAULT_WORKING_DAYS[days])
index = 0
for start, end in slots:
delta = end - start
if delta > value:
minutes = start + value
break
else:
value -= delta
index += 1
return weeks, days, minutes, index
#@-node:split_time
#@+node:to_starttime
def to_starttime(self, value):
weeks, days, minutes, index = self.split_time(value)
return self.EPOCH + datetime.timedelta(weeks=weeks,
days=days,
minutes=minutes)
#@-node:to_starttime
#@+node:to_endtime
def to_endtime(self, value):
return self.to_starttime(value - 1) + datetime.timedelta(minutes=1)
#@-node:to_endtime
#@+node:get_working_times
def get_working_times(self, day):
return self.working_times.get(day, DEFAULT_WORKING_DAYS[day])
#@-node:get_working_times
#@+node:_build_mapping
def _build_mapping(self):
self._dt_num_can = self._num_dt_can = ()
dt_num_can = []
num_dt_can = []
delta = self.Minutes()
for start, end, is_free in self.time_spans:
cstart = self.StartDate(start)
cend = self.EndDate(end)
nstart = cstart + delta
if not is_free:
d = end - start
d = d.days * 24 * 60 + d.seconds / 60
nend = nstart + d
else:
nend = nstart
delta += (nend - nstart) - (cend - cstart)
dt_num_can.append((start, end, nstart, nend, cend))
num_dt_can.append((nstart, nend, start, end, cend))
self._dt_num_can = tuple(dt_num_can)
self._num_dt_can = tuple(num_dt_can)
#@-node:_build_mapping
#@+node:_recalc_working_time
def _recalc_working_time(self):
def slot_sum_time(day):
slots = self.working_times.get(day, DEFAULT_WORKING_DAYS[day])
return sum(map(lambda slot: slot[1] - slot[0], slots))
self.day_times = map(slot_sum_time, range(0, 7))
self.week_time = sum(self.day_times)
#@-node:_recalc_working_time
#@+node:_make_classes
def _make_classes(self):
#ensure that the clases are instance specific
class minutes(_Minutes):
calendar = self
__slots__ = ()
class db(_WorkingDateBase):
calendar = self
_minutes = minutes
__slots__ = ()
class wdt(db): __slots__ = ()
class edt(db):
__slots__ = ()
def to_datetime(self):
return self.to_endtime()
self.Minutes, self.StartDate, self.EndDate = minutes, wdt, edt
self.WorkingDate = self.StartDate
#@-node:_make_classes
#@-others
_default_calendar = Calendar()
WorkingDate = _default_calendar.WorkingDate
StartDate = _default_calendar.StartDate
EndDate = _default_calendar.EndDate
Minutes = _default_calendar.Minutes
#@-node:class Calendar
#@-others
if __name__ == '__main__':
cal = Calendar()
start = EndDate("10.1.2005")
delay = Minutes("4H")
start2 = cal.StartDate(start)
start3 = cal.StartDate("10.1.2005")
#@-node:@file pcalendar.py
#@-leo
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -3,814,108,212,738,946,000 | 28.838405 | 81 | 0.521557 | false |
heihachi/PokemonGo-Bot | pokemongo_bot/cell_workers/pokemon_optimizer.py | 1 | 49784 | from __future__ import unicode_literals
# import datetime
import difflib
import itertools
import json
import math
import os
import time
import datetime
from pokemongo_bot import inventory
from pokemongo_bot.base_dir import _base_dir
from pokemongo_bot.base_task import BaseTask
from pokemongo_bot.human_behaviour import sleep, action_delay
from pokemongo_bot.item_list import Item
from pokemongo_bot.tree_config_builder import ConfigException
from pokemongo_bot.worker_result import WorkerResult
SUCCESS = 1
ERROR_XP_BOOST_ALREADY_ACTIVE = 3
LOG_TIME_INTERVAL = 120
class PokemonOptimizer(BaseTask):
SUPPORTED_TASK_API_VERSION = 1
def __init__(self, bot, config):
super(PokemonOptimizer, self).__init__(bot, config)
def initialize(self):
self.max_pokemon_storage = inventory.get_pokemon_inventory_size()
self.last_pokemon_count = 0
self.pokemon_names = [p.name for p in inventory.pokemons().STATIC_DATA]
self.evolution_map = {}
self.debug = self.config.get('debug', False)
self.ongoing_stardust_count = 0
self.buddy = None
self.buddyid = 0
self.lock_buddy = True
self.no_log_until = 0
self.ignore_favorite = []
self.used_lucky_egg = None
pokemon_upgrade_cost_file = os.path.join(_base_dir, "data", "pokemon_upgrade_cost.json")
with open(pokemon_upgrade_cost_file, "r") as fd:
self.pokemon_upgrade_cost = json.load(fd)
if self.config.get("keep", None) is not None:
raise ConfigException("Pokemon Optimizer configuration has changed. See docs/pokemon_optimized.md or configs/config.json.optimizer.example")
if self.debug:
log_file_path = os.path.join(_base_dir, "data", "pokemon-optimizer-%s.log" % self.bot.config.username)
with open(log_file_path, "a") as _:
pass
self.log_file = open(log_file_path, "r+")
self.log_file.seek(0, 2)
self.config_bulktransfer_enabled = self.config.get("bulktransfer_enabled", False)
self.config_use_evolution_items = self.config.get("use_evolution_items", False)
self.config_max_bulktransfer = self.config.get("max_bulktransfer", 10)
self.config_min_slots_left = self.config.get("min_slots_left", 5)
self.config_action_wait_min = self.config.get("action_wait_min", 3)
self.config_action_wait_max = self.config.get("action_wait_max", 5)
self.config_transfer = self.config.get("transfer", False)
self.config_evolve = self.config.get("evolve", False)
self.config_evolve_to_final = self.config.get("evolve_to_final", True)
self.config_evolve_time = self.config.get("evolve_time", 25)
self.config_evolve_for_xp = self.config.get("evolve_for_xp", True)
self.config_transfer_after_xp_evolve = self.config.get("transfer_after_xp_evolve", True)
self.config_evolve_only_with_lucky_egg = self.config.get("evolve_only_with_lucky_egg", False)
self.config_evolve_count_for_lucky_egg = self.config.get("evolve_count_for_lucky_egg", 80)
self.config_may_use_lucky_egg = self.config.get("may_use_lucky_egg", False)
self.config_may_evolve_favorites = self.config.get("may_evolve_favorites", True)
self.config_may_upgrade_favorites = self.config.get("may_upgrade_favorites", True)
self.config_may_unfavor_pokemon = self.config.get("may_unfavor_pokemon", False)
self.config_upgrade = self.config.get("upgrade", False)
self.config_upgrade_level = self.config.get("upgrade_level", 30)
self.config_groups = self.config.get("groups", {"gym": ["Dragonite", "Snorlax", "Lapras", "Arcanine"]})
self.config_rules = self.config.get("rules", [{"mode": "overall", "top": 1, "sort": ["max_cp", "cp"], "keep": {"candy": -124}, "evolve": False, "buddy": True},
{"mode": "overall", "top": 1, "sort": ["-candy", "max_cp", "cp"], "evolve": False, "buddy": True},
{"mode": "by_family", "top": 3, "names": ["gym"], "sort": ["iv", "ncp"], "evolve": {"iv": 0.9, "ncp": 0.9}, "upgrade": {"iv": 0.9, "ncp": 0.9}},
{"mode": "by_family", "top": 1, "sort": ["iv"], "evolve": {"iv": 0.9}},
{"mode": "by_family", "top": 1, "sort": ["ncp"], "evolve": {"ncp": 0.9}},
{"mode": "by_family", "top": 1, "sort": ["cp"], "evolve": False},
{"mode": "by_pokemon", "names": ["!with_next_evolution"], "top": 1, "sort": ["dps_attack", "iv"], "keep": {"iv": 0.9}}])
if (not self.config_may_use_lucky_egg) and self.config_evolve_only_with_lucky_egg:
self.config_evolve = False
if self.config_evolve_for_xp is True:
self.config_evolve_for_xp = ["Caterpie", "Weedle", "Pidgey", "Rattata", "Nidoran F", "Nidoran M",
"Zubat", "Oddish", "Paras", "Venonat", "Psyduck", "Tentacool",
"Magnemite", "Krabby", "Voltorb", "Goldeen", "Staryu", "Eevee"]
elif self.config_evolve_for_xp is False:
self.config_evolve_for_xp = []
self.config_evolve_for_xp_whitelist, self.config_evolve_for_xp_blacklist = self.get_colorlist(self.config_evolve_for_xp)
self.config_groups["with_next_evolution"] = []
self.config_groups["with_previous_evolution"] = []
for pokemon in inventory.Pokemons.STATIC_DATA:
if pokemon.has_next_evolution:
self.config_groups["with_next_evolution"].append(pokemon.name)
if pokemon.prev_evolutions_all:
self.config_groups["with_previous_evolution"].append(pokemon.name)
def log(self, txt):
if self.log_file.tell() >= 1024 * 1024:
self.log_file.seek(0, 0)
self.log_file.write("[%s] %s\n" % (datetime.datetime.now().isoformat(str(" ")), txt))
self.log_file.flush()
def active_lucky_egg(self):
if self.used_lucky_egg is None:
return False
# If last used is bigger then 30 minutes ago
if self.used_lucky_egg > datetime.datetime.now()-datetime.timedelta(minutes=30):
return True
else:
return False
def get_pokemon_slot_left(self):
pokemon_count = inventory.Pokemons.get_space_used()
if pokemon_count != self.last_pokemon_count:
self.last_pokemon_count = pokemon_count
self.logger.info("Pokemon Bag: %s / %s", pokemon_count, self.max_pokemon_storage)
inventory.update_web_inventory()
return inventory.Pokemons.get_space_left()
def work(self):
if not self.enabled:
return WorkerResult.SUCCESS
# Repeat the optimizer 2 times, to get rid of the trash evolved.
run_number = 0
for _ in itertools.repeat(None, 2):
run_number += 1
self.check_buddy()
self.open_inventory()
keep_all = []
try_evolve_all = []
try_upgrade_all = []
buddy_all = []
favor_all = []
for rule in self.config_rules:
mode = rule.get("mode", "by_family")
names = rule.get("names", [])
check_top = rule.get("top", "all")
check_keep = rule.get("keep", True)
whitelist, blacklist = self.get_colorlist(names)
if check_top == "all" and names == [] and check_keep:
self.logger.info("WARNING!! Will not transfer any Pokemon!!")
self.logger.info(rule)
self.logger.info("This rule is set to keep (`keep` is true) all Pokemon (no `top` and no `names` set!!)")
self.logger.info("Are you sure you want this?")
if mode == "by_pokemon":
for pokemon_id, pokemon_list in self.group_by_pokemon_id(inventory.pokemons().all()):
name = inventory.pokemons().name_for(pokemon_id)
if name in blacklist:
continue
if whitelist and (name not in whitelist):
continue
sorted_list = self.score_and_sort(pokemon_list, rule)
if len(sorted_list) == 0:
continue
keep, try_evolve, try_upgrade, buddy, favor = self.get_best_pokemon_for_rule(sorted_list, rule)
keep_all += keep
try_evolve_all += try_evolve
try_upgrade_all += try_upgrade
buddy_all += buddy
favor_all += favor
elif mode == "by_family":
for family_id, pokemon_list in self.group_by_family_id(inventory.pokemons().all()):
matching_names = self.get_family_names(family_id)
if any(n in blacklist for n in matching_names):
continue
if whitelist and not any(n in whitelist for n in matching_names):
continue
sorted_list = self.score_and_sort(pokemon_list, rule)
if len(sorted_list) == 0:
continue
if family_id == 133: # "Eevee"
keep, try_evolve, try_upgrade, buddy, favor = self.get_multi_best_pokemon_for_rule(sorted_list, rule, 3)
else:
keep, try_evolve, try_upgrade, buddy, favor = self.get_best_pokemon_for_rule(sorted_list, rule)
keep_all += keep
try_evolve_all += try_evolve
try_upgrade_all += try_upgrade
buddy_all += buddy
favor_all += favor
elif mode == "overall":
pokemon_list = []
for pokemon in inventory.pokemons().all():
name = pokemon.name
if name in blacklist:
continue
if whitelist and (name not in whitelist):
continue
pokemon_list.append(pokemon)
sorted_list = self.score_and_sort(pokemon_list, rule)
if len(sorted_list) == 0:
continue
keep, try_evolve, try_upgrade, buddy, favor = self.get_best_pokemon_for_rule(sorted_list, rule)
keep_all += keep
try_evolve_all += try_evolve
try_upgrade_all += try_upgrade
buddy_all += buddy
favor_all += favor
keep_all = self.unique_pokemon_list(keep_all)
try_evolve_all = self.unique_pokemon_list(try_evolve_all)
try_upgrade_all = self.unique_pokemon_list(try_upgrade_all)
buddy_all = self.unique_pokemon_list(buddy_all)
try_favor_all = self.unique_pokemon_list(favor_all)
# Favorites has nothing to do with evolve, can be done even when bag not full
# Like a buddy
if self.config_may_unfavor_pokemon:
unfavor = []
for pokemon in inventory.pokemons().all():
if not pokemon in try_favor_all and pokemon.is_favorite:
unfavor.append(pokemon)
if len(unfavor) > 0:
self.logger.info("Marking %s Pokemon as no longer favorite", len(unfavor))
for pokemon in unfavor:
self.unfavor_pokemon(pokemon)
# Dont favor Pokemon if already a favorite
try_favor_all = [p for p in try_favor_all if not p.is_favorite]
try_favor_all = [p for p in try_favor_all if p.unique_id not in self.ignore_favorite]
if len(try_favor_all) > 0:
self.logger.info("Marking %s Pokemon as favorite", len(try_favor_all))
for pokemon in try_favor_all:
if pokemon.is_favorite is False:
self.favor_pokemon(pokemon)
if (not self.lock_buddy) and (len(buddy_all) > 0):
new_buddy = buddy_all[0]
if (not self.buddy) or (self.buddy["id"] != new_buddy.unique_id):
self.set_buddy_pokemon(new_buddy)
# Only check bag on the first run, second run ignores if the bag is empty enough
if run_number == 1 and self.get_pokemon_slot_left() > self.config_min_slots_left:
return WorkerResult.SUCCESS
transfer_all = []
evolve_all = []
upgrade_all = []
xp_all = []
for family_id, pokemon_list in self.group_by_family_id(inventory.pokemons().all()):
keep = [p for p in keep_all if self.get_family_id(p) == family_id]
try_evolve = [p for p in try_evolve_all if self.get_family_id(p) == family_id]
try_upgrade = [p for p in try_upgrade_all if self.get_family_id(p) == family_id]
transfer, evolve, upgrade, xp = self.get_evolution_plan(family_id, pokemon_list, keep, try_evolve, try_upgrade)
transfer_all += transfer
evolve_all += evolve
upgrade_all += upgrade
xp_all += xp
if not self.config_may_evolve_favorites:
self.logger.info("Removing favorites from evolve list.")
evolve_all = [p for p in evolve_all if not p.is_favorite]
if not self.config_may_upgrade_favorites:
self.logger.info("Removing favorites from upgrade list.")
upgrade_all = [p for p in upgrade_all if not p.is_favorite]
self.apply_optimization(transfer_all, evolve_all, upgrade_all, xp_all)
return WorkerResult.SUCCESS
def check_buddy(self):
self.buddy = self.bot.player_data.get("buddy_pokemon", {})
self.buddyid = self._get_buddyid()
if not self.buddy:
self.lock_buddy = False
return
pokemon = next((p for p in inventory.pokemons().all() if p.unique_id == self.buddy["id"]), None)
if not pokemon:
return
km_walked = inventory.player().player_stats.get("km_walked", 0)
last_km_awarded = self.buddy.setdefault("last_km_awarded", km_walked)
distance_walked = km_walked - last_km_awarded
distance_needed = pokemon.buddy_distance_needed
if distance_walked >= distance_needed:
self.get_buddy_walked(pokemon)
# self.buddy["start_km_walked"] can be empty here
if 'start_km_walked' not in self.buddy:
self.buddy["start_km_walked"] = 0
self.buddy["last_km_awarded"] = self.buddy["start_km_walked"] + distance_needed * int(distance_walked / distance_needed)
self.lock_buddy = False
else:
now = time.time()
if self.no_log_until < now:
self.no_log_until = now + LOG_TIME_INTERVAL
self.emit_event("buddy_walked",
formatted="Buddy {pokemon} walking: {distance_walked:.2f} / {distance_needed:.2f} km",
data={"pokemon": pokemon.name,
"distance_walked": distance_walked,
"distance_needed": distance_needed})
def open_inventory(self):
for pokemon in inventory.pokemons().all():
setattr(pokemon, "ncp", pokemon.cp_percent)
setattr(pokemon, "max_cp", pokemon.static.max_cp)
setattr(pokemon, "dps", pokemon.moveset.dps)
setattr(pokemon, "dps1", pokemon.fast_attack.dps)
setattr(pokemon, "dps2", pokemon.charged_attack.dps)
setattr(pokemon, "dps_attack", pokemon.moveset.dps_attack)
setattr(pokemon, "dps_defense", pokemon.moveset.dps_defense)
setattr(pokemon, "attack_perfection", pokemon.moveset.attack_perfection)
setattr(pokemon, "defense_perfection", pokemon.moveset.defense_perfection)
setattr(pokemon, "candy", pokemon.candy_quantity)
candy_to_evolution = max(pokemon.evolution_cost - pokemon.candy_quantity, 0)
setattr(pokemon, "candy_to_evolution", candy_to_evolution)
self.ongoing_stardust_count = self.bot.stardust
def get_colorlist(self, names):
whitelist = []
blacklist = []
for name in names:
if not name:
continue
if name[0] not in ['!', '-']:
group = self.config_groups.get(name, [])
if not group:
name = self.get_closest_name(name)
if name:
whitelist.append(name)
whitelist_sub, blacklist_sub = self.get_colorlist(group)
whitelist += whitelist_sub
blacklist += blacklist_sub
else:
name = name[1:]
group = self.config_groups.get(name, [])
if not group:
name = self.get_closest_name(name)
if name:
blacklist.append(name)
blacklist_sub, whitelist_sub = self.get_colorlist(group)
blacklist += blacklist_sub
whitelist += whitelist_sub
return (whitelist, blacklist)
def get_family_names(self, family_id):
ids = [family_id]
ids += inventory.pokemons().data_for(family_id).next_evolutions_all[:]
return [inventory.pokemons().name_for(x) for x in ids]
def get_closest_name(self, name):
mapping = {ord(x): ord(y) for x, y in zip("\u2641\u2642.-", "fm ")}
clean_names = {n.lower().translate(mapping): n for n in self.pokemon_names}
closest_names = difflib.get_close_matches(name.lower().translate(mapping), clean_names.keys(), 1)
if closest_names:
closest_name = clean_names[closest_names[0]]
if name != closest_name:
self.logger.warning("Unknown Pokemon name [%s]. Assuming it is [%s]", name, closest_name)
return closest_name
else:
raise ConfigException("Unknown Pokemon name [%s]" % name)
def group_by_pokemon_id(self, pokemon_list):
sorted_list = sorted(pokemon_list, key=self.get_pokemon_id)
return itertools.groupby(sorted_list, self.get_pokemon_id)
def group_by_family_id(self, pokemon_list):
sorted_list = sorted(pokemon_list, key=self.get_family_id)
return itertools.groupby(sorted_list, self.get_family_id)
def get_pokemon_id(self, pokemon):
return pokemon.pokemon_id
def get_family_id(self, pokemon):
return pokemon.first_evolution_id
def score_and_sort(self, pokemon_list, rule):
pokemon_list = list(pokemon_list)
if self.debug:
self.log("Pokemon %s" % pokemon_list)
self.log("Rule %s" % rule)
for pokemon in pokemon_list:
setattr(pokemon, "__score__", self.get_score(pokemon, rule))
keep = [p for p in pokemon_list if p.__score__[1] is True]
keep.sort(key=lambda p: p.__score__[0], reverse=True)
return keep
def get_score(self, pokemon, rule):
score = []
for a in rule.get("sort", []):
if a[0] == "-":
value = -getattr(pokemon, a[1:], 0)
else:
value = getattr(pokemon, a, 0)
score.append(value)
rule_keep = rule.get("keep", True)
rule_evolve = rule.get("evolve", True)
rule_upgrade = rule.get("upgrade", False)
rule_buddy = rule.get("buddy", False)
rule_favor = rule.get("favorite", False)
keep = rule_keep not in [False, {}]
keep &= self.satisfy_requirements(pokemon, rule_keep)
may_try_evolve = (hasattr(pokemon, "has_next_evolution") and pokemon.has_next_evolution())
may_try_evolve &= rule_evolve not in [False, {}]
may_try_evolve &= self.satisfy_requirements(pokemon, rule_evolve)
may_try_upgrade = rule_upgrade not in [False, {}]
may_try_upgrade &= self.satisfy_requirements(pokemon, rule_upgrade)
may_buddy = rule_buddy not in [False, {}]
may_buddy &= pokemon.in_fort is False
may_buddy &= self.satisfy_requirements(pokemon, may_buddy)
may_favor = rule_favor not in [False, {}]
may_favor &= self.satisfy_requirements(pokemon, may_favor)
if self.debug:
self.log("P:%s S:%s K:%s E:%s U:%s B:%s F:%s" % (pokemon, tuple(score), keep, may_try_evolve, may_try_upgrade, may_buddy, may_favor))
return tuple(score), keep, may_try_evolve, may_try_upgrade, may_buddy, may_favor
def satisfy_requirements(self, pokemon, req):
if type(req) is bool:
return req
satisfy = True
for a, v in req.items():
value = getattr(pokemon, a, 0)
if (type(v) is str) or (type(v) is unicode):
v = float(v)
if type(v) is list:
if type(v[0]) is list:
satisfy_range = False
for r in v:
satisfy_range |= (value >= r[0]) and (value <= r[1])
satisfy &= satisfy_range
else:
satisfy &= (value >= v[0]) and (value <= v[1])
elif v < 0:
satisfy &= (value <= abs(v))
else:
satisfy &= (value >= v)
return satisfy
def get_best_pokemon_for_rule(self, pokemon_list, rule):
pokemon_list = list(pokemon_list)
if len(pokemon_list) == 0:
return ([], [], [], [])
top = max(rule.get("top", 0), 0)
index = int(math.ceil(top)) - 1
if 0 < top < 1:
worst = object()
for a in rule.get("sort", []):
best_attribute = getattr(pokemon_list[0], a)
setattr(worst, a, best_attribute * (1 - top))
setattr(worst, "__score__", self.get_score(worst, rule))
elif 0 <= index < len(pokemon_list):
worst = pokemon_list[index]
else:
worst = pokemon_list[-1]
return self.get_better_pokemon(pokemon_list, worst)
def get_multi_best_pokemon_for_rule(self, family_list, rule, nb_branch):
family_list = list(family_list)
if len(family_list) == 0:
return ([], [], [], [])
# Handle each group of senior independently
senior_pokemon_list = [p for p in family_list if not p.has_next_evolution()]
other_family_list = [p for p in family_list if p.has_next_evolution()]
senior_pids = set(p.pokemon_id for p in senior_pokemon_list)
keep_all = []
try_evolve_all = []
try_upgrade_all = []
buddy_all = []
favor_all = []
if not self.config_evolve:
# Player handle evolution manually = Fall-back to per Pokemon behavior
for _, pokemon_list in self.group_by_pokemon_id(family_list):
keep, try_evolve, try_upgrade, buddy, favor = self.get_best_pokemon_for_rule(pokemon_list, rule)
keep_all += keep
try_evolve_all += try_evolve
try_upgrade_all += try_upgrade
buddy_all += buddy
favor_all += favor
else:
for _, pokemon_list in self.group_by_pokemon_id(senior_pokemon_list):
keep, try_evolve, try_upgrade, buddy, favor = self.get_best_pokemon_for_rule(pokemon_list, rule)
keep_all += keep
try_evolve_all += try_evolve
try_upgrade_all += try_upgrade
buddy_all += buddy
favor_all += favor
if len(other_family_list) > 0:
if len(senior_pids) < nb_branch:
# We did not get every combination yet = All other Pokemon are potentially good to keep
worst = other_family_list[-1]
else:
best = keep_all + try_evolve_all + try_upgrade_all
best.sort(key=lambda p: p.__score__[0], reverse=True)
worst = best[-1]
keep, try_evolve, try_upgrade, buddy, favor = self.get_better_pokemon(other_family_list, worst, 12)
keep_all += keep
try_evolve_all += try_evolve
try_upgrade_all += try_upgrade
buddy_all += buddy
favor_all += favor
return keep_all, try_evolve_all, try_upgrade_all, buddy_all, favor_all
def get_better_pokemon(self, pokemon_list, worst, limit=1000):
keep = [p for p in pokemon_list if p.__score__[0] >= worst.__score__[0]][:limit]
try_evolve = [p for p in keep if p.__score__[2] is True]
try_upgrade = [p for p in keep if (p.__score__[2] is False) and (p.__score__[3] is True)]
buddy = [p for p in keep if p.__score__[4] is True]
favor = [p for p in keep if p.__score__[5] is True]
return keep, try_evolve, try_upgrade, buddy, favor
def get_evolution_plan(self, family_id, family_list, keep, try_evolve, try_upgrade):
candies = inventory.candies().get(family_id).quantity
family_name = inventory.Pokemons().name_for(family_id)
# All the rest is crap, for now
crap = list(family_list)
crap = [p for p in crap if p not in keep]
crap = [p for p in crap if not p.in_fort and not p.is_favorite and not (p.unique_id == self.buddyid)]
crap.sort(key=lambda p: (p.iv, p.cp), reverse=True)
# We will gain a candy whether we choose to transfer or evolve these Pokemon
candies += len(crap)
evolve = []
for pokemon in try_evolve:
pokemon_id = pokemon.pokemon_id
needed_evolution_item = inventory.pokemons().evolution_item_for(pokemon_id)
if needed_evolution_item is not None:
if self.config_use_evolution_items:
# We need a special Item to evolve this Pokemon!
item = inventory.items().get(needed_evolution_item)
needed = inventory.pokemons().evolution_items_needed_for(pokemon_id)
if item.count < needed:
self.logger.info("To evolve a {} we need {} of {}. We have {}".format(pokemon.name, needed, item.name, item.count))
continue
else:
# pass for this Pokemon
continue
if self.config_evolve_to_final:
pokemon_id = pokemon.pokemon_id
while inventory.pokemons().has_next_evolution(pokemon_id):
candies -= inventory.pokemons().evolution_cost_for(pokemon_id)
pokemon_id = inventory.pokemons().next_evolution_ids_for(pokemon_id)[0]
else:
candies -= pokemon.evolution_cost
if candies < 0:
continue
if self.config_evolve_to_final:
pokemon_id = pokemon.pokemon_id
while inventory.pokemons().has_next_evolution(pokemon_id):
candies += 1
evolve.append(pokemon)
pokemon_id = inventory.pokemons().next_evolution_ids_for(pokemon_id)[0]
else:
candies += 1
evolve.append(pokemon)
upgrade = []
upgrade_level = min(self.config_upgrade_level, inventory.player().level + 1.5, 40)
# Highest CP on top.
if len(try_upgrade) > 0:
try_upgrade.sort(key=lambda p: (p.cp), reverse=True)
for pokemon in try_upgrade:
# self.log("Considering %s for upgrade" % pokemon.name)
if pokemon.level >= upgrade_level:
# self.log("Pokemon already at target level. %s" % pokemon.level)
continue
full_upgrade_candy_cost = 0
full_upgrade_stardust_cost = 0
for i in range(int(pokemon.level * 2), int(upgrade_level * 2)):
upgrade_cost = self.pokemon_upgrade_cost[i - 2]
full_upgrade_candy_cost += upgrade_cost[0]
full_upgrade_stardust_cost += upgrade_cost[1]
candies -= full_upgrade_candy_cost
self.ongoing_stardust_count -= full_upgrade_stardust_cost
if (candies < 0) or (self.ongoing_stardust_count < 0):
# self.log("Not enough candy: %s" % candies)
# self.log("or stardust %s" % self.ongoing_stardust_count)
# We didn' t use the stardust, so refund it...
self.ongoing_stardust_count += full_upgrade_stardust_cost
continue
# self.log("Pokemon can be upgraded!!")
upgrade.append(pokemon)
if (not self.config_evolve_for_xp) or (family_name in self.config_evolve_for_xp_blacklist):
xp = []
transfer = crap
elif self.config_evolve_for_xp_whitelist and (family_name not in self.config_evolve_for_xp_whitelist):
xp = []
transfer = crap
else:
# Compute how many crap we should keep if we want to batch evolve them for xp
lowest_evolution_cost = inventory.pokemons().evolution_cost_for(family_id)
# transfer + keep_for_xp = len(crap)
# leftover_candies = candies - len(crap) + transfer * 1
# keep_for_xp = (leftover_candies - 1) / (lowest_evolution_cost - 1)
# keep_for_xp = (candies - len(crap) + transfer - 1) / (lowest_evolution_cost - 1)
# keep_for_xp = (candies - keep_for_xp - 1) / (lowest_evolution_cost - 1)
if (candies > 0) and lowest_evolution_cost:
keep_for_xp = int((candies - 1) / lowest_evolution_cost)
else:
keep_for_xp = 0
xp = [p for p in crap if p.has_next_evolution() and p.evolution_cost == lowest_evolution_cost][:keep_for_xp]
transfer = [p for p in crap if p not in xp]
return (transfer, evolve, upgrade, xp)
def unique_pokemon_list(self, pokemon_list):
seen = set()
return [p for p in pokemon_list if not (p.unique_id in seen or seen.add(p.unique_id))]
def apply_optimization(self, transfer, evolve, upgrade, xp):
transfer_count = len(transfer)
evolve_count = len(evolve)
upgrade_count = len(upgrade)
xp_count = len(xp)
if self.config_transfer or self.bot.config.test:
if transfer_count > 0:
self.logger.info("Transferring %s Pokemon", transfer_count)
self.transfer_pokemon(transfer)
if self.config_upgrade or self.bot.config.test:
if upgrade_count > 0:
self.logger.info("Upgrading %s Pokemon [%s stardust]", upgrade_count, self.bot.stardust)
for pokemon in upgrade:
self.upgrade_pokemon(pokemon)
if self.config_evolve or self.bot.config.test:
evolve_xp_count = evolve_count + xp_count
if evolve_xp_count > 0:
skip_evolve = False
if self.config_evolve and self.config_may_use_lucky_egg and (not self.bot.config.test):
lucky_egg = inventory.items().get(Item.ITEM_LUCKY_EGG.value) # @UndefinedVariable
if lucky_egg.count == 0:
if self.config_evolve_only_with_lucky_egg:
skip_evolve = True
self.emit_event("skip_evolve",
formatted="Skipping evolution step. No lucky egg available")
elif evolve_xp_count < self.config_evolve_count_for_lucky_egg:
if self.config_evolve_only_with_lucky_egg:
skip_evolve = True
self.emit_event("skip_evolve",
formatted="Skipping evolution step. Not enough Pokemon to evolve with lucky egg: %s/%s" % (evolve_xp_count, self.config_evolve_count_for_lucky_egg))
elif self.get_pokemon_slot_left() > self.config_min_slots_left:
skip_evolve = True
self.emit_event("skip_evolve",
formatted="Waiting for more Pokemon to evolve with lucky egg: %s/%s" % (evolve_xp_count, self.config_evolve_count_for_lucky_egg))
else:
self.use_lucky_egg()
if not skip_evolve:
self.evolution_map = {}
if evolve_count > 0:
self.logger.info("Evolving %s Pokemon (the best)", evolve_count)
for pokemon in evolve:
self.evolve_pokemon(pokemon)
if xp_count > 0:
self.logger.info("Evolving %s Pokemon (for xp)", xp_count)
for pokemon in xp:
self.evolve_pokemon(pokemon, self.config_transfer_after_xp_evolve)
def transfer_pokemon(self, pokemons, skip_delay=False):
error_codes = {
0: 'UNSET',
1: 'SUCCESS',
2: 'POKEMON_DEPLOYED',
3: 'FAILED',
4: 'ERROR_POKEMON_IS_EGG',
5: 'ERROR_POKEMON_IS_BUDDY'
}
if self.config_bulktransfer_enabled and len(pokemons) > 1:
while len(pokemons) > 0:
action_delay(self.config_action_wait_min, self.config_action_wait_max)
pokemon_ids = []
count = 0
transfered = []
while len(pokemons) > 0 and count < self.config_max_bulktransfer:
pokemon = pokemons.pop()
transfered.append(pokemon)
pokemon_ids.append(pokemon.unique_id)
count = count + 1
try:
if self.config_transfer:
response_dict = self.bot.api.release_pokemon(pokemon_ids=pokemon_ids)
result = response_dict['responses']['RELEASE_POKEMON']['result']
if result != 1:
self.logger.error(u'Error while transfer pokemon: {}'.format(error_codes[result]))
return False
except Exception:
return False
for pokemon in transfered:
candy = inventory.candies().get(pokemon.pokemon_id)
if self.config_transfer and (not self.bot.config.test):
candy.add(1)
self.emit_event("pokemon_release",
formatted="Exchanged {pokemon} [IV {iv}] [CP {cp}] [{candy} candies]",
data={"pokemon": pokemon.name,
"iv": pokemon.iv,
"cp": pokemon.cp,
"candy": candy.quantity})
if self.config_transfer:
inventory.pokemons().remove(pokemon.unique_id)
with self.bot.database as db:
cursor = db.cursor()
cursor.execute("SELECT COUNT(name) FROM sqlite_master WHERE type='table' AND name='transfer_log'")
db_result = cursor.fetchone()
if db_result[0] == 1:
db.execute("INSERT INTO transfer_log (pokemon, iv, cp) VALUES (?, ?, ?)", (pokemon.name, pokemon.iv, pokemon.cp))
else:
for pokemon in pokemons:
if self.config_transfer and (not self.bot.config.test):
response_dict = self.bot.api.release_pokemon(pokemon_id=pokemon.unique_id)
else:
response_dict = {"responses": {"RELEASE_POKEMON": {"candy_awarded": 0}}}
if not response_dict:
return False
candy_awarded = response_dict.get("responses", {}).get("RELEASE_POKEMON", {}).get("candy_awarded", 0)
candy = inventory.candies().get(pokemon.pokemon_id)
if self.config_transfer and (not self.bot.config.test):
candy.add(candy_awarded)
self.emit_event("pokemon_release",
formatted="Exchanged {pokemon} [IV {iv}] [CP {cp}] [{candy} candies]",
data={"pokemon": pokemon.name,
"iv": pokemon.iv,
"cp": pokemon.cp,
"candy": candy.quantity})
if self.config_transfer and (not self.bot.config.test):
inventory.pokemons().remove(pokemon.unique_id)
with self.bot.database as db:
cursor = db.cursor()
cursor.execute("SELECT COUNT(name) FROM sqlite_master WHERE type='table' AND name='transfer_log'")
db_result = cursor.fetchone()
if db_result[0] == 1:
db.execute("INSERT INTO transfer_log (pokemon, iv, cp) VALUES (?, ?, ?)", (pokemon.name, pokemon.iv, pokemon.cp))
if not skip_delay:
action_delay(self.config_action_wait_min, self.config_action_wait_max)
return True
def use_lucky_egg(self):
lucky_egg = inventory.items().get(Item.ITEM_LUCKY_EGG.value) # @UndefinedVariable
if lucky_egg.count == 0:
return False
response_dict = self.bot.use_lucky_egg()
if not response_dict:
self.emit_event("lucky_egg_error",
level='error',
formatted="Failed to use lucky egg!")
return False
result = response_dict.get("responses", {}).get("USE_ITEM_XP_BOOST", {}).get("result", 0)
if result == SUCCESS:
lucky_egg.remove(1)
self.emit_event("used_lucky_egg",
formatted="Used lucky egg ({amount_left} left).",
data={"amount_left": lucky_egg.count})
self.used_lucky_egg = datetime.datetime.now()
return True
elif result == ERROR_XP_BOOST_ALREADY_ACTIVE:
self.emit_event("used_lucky_egg",
formatted="Lucky egg already active ({amount_left} left).",
data={"amount_left": lucky_egg.count})
return True
else:
self.emit_event("lucky_egg_error",
level='error',
formatted="Failed to use lucky egg!")
return False
def evolve_pokemon(self, pokemon, transfer=False):
while pokemon.unique_id in self.evolution_map:
pokemon = self.evolution_map[pokemon.unique_id]
if self.config_evolve and (not self.bot.config.test):
needed_evolution_item = inventory.pokemons().evolution_item_for(pokemon.pokemon_id)
if needed_evolution_item is not None:
if self.config_use_evolution_items:
# We need evolution_item_requirement with some!!
response_dict = self.bot.api.evolve_pokemon(pokemon_id=pokemon.unique_id, evolution_item_requirement=needed_evolution_item)
else:
return False
else:
response_dict = self.bot.api.evolve_pokemon(pokemon_id=pokemon.unique_id)
else:
response_dict = {"responses": {"EVOLVE_POKEMON": {"result": SUCCESS}}}
if not response_dict:
return False
result = response_dict.get("responses", {}).get("EVOLVE_POKEMON", {}).get("result", 0)
if result != SUCCESS:
self.logger.info("Can't evolve %s" % pokemon.name)
self.logger.info(response_dict)
self.logger.info(result)
return False
xp = response_dict.get("responses", {}).get("EVOLVE_POKEMON", {}).get("experience_awarded", 0)
candy_awarded = response_dict.get("responses", {}).get("EVOLVE_POKEMON", {}).get("candy_awarded", 0)
candy = inventory.candies().get(pokemon.pokemon_id)
evolution = response_dict.get("responses", {}).get("EVOLVE_POKEMON", {}).get("evolved_pokemon_data", {})
if self.config_evolve and (not self.bot.config.test):
candy.consume(pokemon.evolution_cost - candy_awarded)
inventory.player().exp += xp
new_pokemon = inventory.Pokemon(evolution)
self.emit_event("pokemon_evolved",
formatted="Evolved {pokemon} [CP {old_cp}] into {new} [IV {iv}] [CP {cp}] [{candy} candies] [+{xp} xp]",
data={"pokemon": pokemon.name,
"new": new_pokemon.name,
"iv": pokemon.iv,
"old_cp": pokemon.cp,
"cp": new_pokemon.cp,
"candy": candy.quantity,
"xp": xp})
if self.config_evolve and (not self.bot.config.test):
new_pokemon = inventory.Pokemon(evolution)
self.evolution_map[pokemon.unique_id] = new_pokemon
inventory.pokemons().remove(pokemon.unique_id)
inventory.pokemons().add(new_pokemon)
with self.bot.database as db:
cursor = db.cursor()
cursor.execute("SELECT COUNT(name) FROM sqlite_master WHERE type='table' AND name='evolve_log'")
db_result = cursor.fetchone()
if db_result[0] == 1:
db.execute("INSERT INTO evolve_log (pokemon, iv, cp) VALUES (?, ?, ?)", (pokemon.name, pokemon.iv, pokemon.cp))
sleep(self.config_evolve_time, 0.1)
if transfer and not self.used_lucky_egg:
# Transfer the new Pokemon imediately!
self.transfer_pokemon([new_pokemon], True)
return True
def upgrade_pokemon(self, pokemon):
upgrade_level = min(self.config_upgrade_level, inventory.player().level + 1.5, 40)
candy = inventory.candies().get(pokemon.pokemon_id)
for i in range(int(pokemon.level * 2), int(upgrade_level * 2)):
upgrade_cost = self.pokemon_upgrade_cost[i - 2]
upgrade_candy_cost = upgrade_cost[0]
upgrade_stardust_cost = upgrade_cost[1]
if self.config_upgrade and (not self.bot.config.test):
response_dict = self.bot.api.upgrade_pokemon(pokemon_id=pokemon.unique_id)
else:
response_dict = {"responses": {"UPGRADE_POKEMON": {"result": SUCCESS}}}
if not response_dict:
return False
result = response_dict.get("responses", {}).get("UPGRADE_POKEMON", {}).get("result", 0)
if result != SUCCESS:
return False
upgrade = response_dict.get("responses", {}).get("UPGRADE_POKEMON", {}).get("upgraded_pokemon", {})
if self.config_upgrade and (not self.bot.config.test):
candy.consume(upgrade_candy_cost)
self.bot.stardust -= upgrade_stardust_cost
new_pokemon = inventory.Pokemon(upgrade)
self.emit_event("pokemon_upgraded",
formatted="Upgraded {pokemon} [IV {iv}] [CP {cp} -> {new_cp}] [{candy} candies] [{stardust} stardust]",
data={"pokemon": pokemon.name,
"iv": pokemon.iv,
"cp": pokemon.cp,
"new_cp": new_pokemon.cp,
"candy": candy.quantity,
"stardust": self.bot.stardust})
if self.config_upgrade and (not self.bot.config.test):
inventory.pokemons().remove(pokemon.unique_id)
new_pokemon = inventory.Pokemon(upgrade)
inventory.pokemons().add(new_pokemon)
pokemon = new_pokemon
action_delay(self.config_action_wait_min, self.config_action_wait_max)
return True
def set_buddy_pokemon(self, pokemon):
if not self.bot.config.test:
response_dict = self.bot.api.set_buddy_pokemon(pokemon_id=pokemon.unique_id)
else:
response_dict = {"responses": {"SET_BUDDY_POKEMON": {"result": SUCCESS, "updated_buddy": {"start_km_walked": 0, "last_km_awarded": 0, "id": 0}}}}
if not response_dict:
return False
result = response_dict.get("responses", {}).get("SET_BUDDY_POKEMON", {}).get("result", 0)
if result != SUCCESS:
return False
if not self.bot.config.test:
self.buddy = response_dict.get("responses", {}).get("SET_BUDDY_POKEMON", {}).get("updated_buddy", {})
self.buddyid = self._get_buddyid()
self.emit_event("buddy_pokemon",
formatted="Buddy {pokemon} [IV {iv}] [CP {cp}]",
data={"pokemon": pokemon.name,
"iv": pokemon.iv,
"cp": pokemon.cp})
self.lock_buddy = True
if not self.bot.config.test:
action_delay(self.config_action_wait_min, self.config_action_wait_max)
return True
def get_buddy_walked(self, pokemon):
if not self.bot.config.test:
response_dict = self.bot.api.get_buddy_walked()
else:
response_dict = {"responses": {"GET_BUDDY_WALKED": {"success": True, "family_candy_id": 0, "candy_earned_count": 0}}}
if not response_dict:
return False
success = response_dict.get("responses", {}).get("GET_BUDDY_WALKED", {}).get("success", False)
if not success:
return False
candy_earned_count = response_dict.get("responses", {}).get("GET_BUDDY_WALKED", {}).get("candy_earned_count", 0)
if candy_earned_count == 0:
return
family_candy_id = self.get_family_id(pokemon)
candy = inventory.candies().get(family_candy_id)
if not self.bot.config.test:
candy.add(candy_earned_count)
self.emit_event("buddy_reward",
formatted="Buddy {pokemon} rewards {family} candies [+{candy_earned} candies] [{candy} candies]",
data={"pokemon": pokemon.name,
"family": candy.type,
"candy_earned": candy_earned_count,
"candy": candy.quantity})
if not self.bot.config.test:
action_delay(self.config_action_wait_min, self.config_action_wait_max)
return True
def _get_buddyid(self):
if self.buddy and'id' in self.buddy:
return self.buddy['id']
return 0
def favor_pokemon(self, pokemon):
response_dict = self.bot.api.set_favorite_pokemon(pokemon_id=pokemon.unique_id, is_favorite=True)
sleep(1.2) # wait a bit after request
if response_dict:
result = response_dict.get('responses', {}).get('SET_FAVORITE_POKEMON', {}).get('result', 0)
if result is 1: # Request success
action_delay(self.config_action_wait_min, self.config_action_wait_max)
# Mark Pokemon as favorite
pokemon.is_favorite = True
self.emit_event("pokemon_favored",
formatted="Favored {pokemon} [IV {iv}] [CP {cp}]",
data={"pokemon": pokemon.name,
"iv": pokemon.iv,
"cp": pokemon.cp})
else:
# Pokemon not found??
self.ignore_favorite.append(pokemon.unique_id)
pokemon.is_favorite = True
self.logger.info("Unable to set %s as favorite!" % pokemon.name)
def unfavor_pokemon(self, pokemon):
response_dict = self.bot.api.set_favorite_pokemon(pokemon_id=pokemon.unique_id, is_favorite=False)
sleep(1.2) # wait a bit after request
if response_dict:
result = response_dict.get('responses', {}).get('SET_FAVORITE_POKEMON', {}).get('result', 0)
if result is 1: # Request success
# Mark Pokemon as no longer favorite
pokemon.is_favorite = False
self.emit_event("pokemon_unfavored",
formatted="Unfavored {pokemon} [IV {iv}] [CP {cp}]",
data={"pokemon": pokemon.name,
"iv": pokemon.iv,
"cp": pokemon.cp})
action_delay(self.config_action_wait_min, self.config_action_wait_max)
| mit | -5,109,187,052,893,834,000 | 43.213144 | 198 | 0.534429 | false |
Titulacion-Sistemas/PythonTitulacion-EV | Lib/site-packages/django/contrib/gis/geos/prototypes/__init__.py | 314 | 1305 | """
This module contains all of the GEOS ctypes function prototypes. Each
prototype handles the interaction between the GEOS library and Python
via ctypes.
"""
# Coordinate sequence routines.
from django.contrib.gis.geos.prototypes.coordseq import (create_cs, get_cs,
cs_clone, cs_getordinate, cs_setordinate, cs_getx, cs_gety, cs_getz,
cs_setx, cs_sety, cs_setz, cs_getsize, cs_getdims)
# Geometry routines.
from django.contrib.gis.geos.prototypes.geom import (from_hex, from_wkb, from_wkt,
create_point, create_linestring, create_linearring, create_polygon, create_collection,
destroy_geom, get_extring, get_intring, get_nrings, get_geomn, geom_clone,
geos_normalize, geos_type, geos_typeid, geos_get_srid, geos_set_srid,
get_dims, get_num_coords, get_num_geoms,
to_hex, to_wkb, to_wkt)
# Miscellaneous routines.
from django.contrib.gis.geos.prototypes.misc import *
# Predicates
from django.contrib.gis.geos.prototypes.predicates import (geos_hasz, geos_isempty,
geos_isring, geos_issimple, geos_isvalid, geos_contains, geos_crosses,
geos_disjoint, geos_equals, geos_equalsexact, geos_intersects,
geos_intersects, geos_overlaps, geos_relatepattern, geos_touches, geos_within)
# Topology routines
from django.contrib.gis.geos.prototypes.topology import *
| mit | 6,113,816,080,719,589,000 | 42.5 | 90 | 0.757088 | false |
IronLanguages/ironpython2 | Src/StdLib/Lib/site-packages/win32/Demos/security/localized_names.py | 34 | 2030 | # A Python port of the MS knowledge base article Q157234
# "How to deal with localized and renamed user and group names"
# http://support.microsoft.com/default.aspx?kbid=157234
import sys
from win32net import NetUserModalsGet
from win32security import LookupAccountSid
import pywintypes
from ntsecuritycon import *
def LookupAliasFromRid(TargetComputer, Rid):
# Sid is the same regardless of machine, since the well-known
# BUILTIN domain is referenced.
sid = pywintypes.SID()
sid.Initialize(SECURITY_NT_AUTHORITY, 2)
for i, r in enumerate((SECURITY_BUILTIN_DOMAIN_RID, Rid)):
sid.SetSubAuthority(i, r)
name, domain, typ = LookupAccountSid(TargetComputer, sid)
return name
def LookupUserGroupFromRid(TargetComputer, Rid):
# get the account domain Sid on the target machine
# note: if you were looking up multiple sids based on the same
# account domain, only need to call this once.
umi2 = NetUserModalsGet(TargetComputer, 2)
domain_sid = umi2['domain_id']
SubAuthorityCount = domain_sid.GetSubAuthorityCount()
# create and init new sid with acct domain Sid + acct Rid
sid = pywintypes.SID()
sid.Initialize(domain_sid.GetSidIdentifierAuthority(),
SubAuthorityCount+1)
# copy existing subauthorities from account domain Sid into
# new Sid
for i in range(SubAuthorityCount):
sid.SetSubAuthority(i, domain_sid.GetSubAuthority(i))
# append Rid to new Sid
sid.SetSubAuthority(SubAuthorityCount, Rid)
name, domain, typ = LookupAccountSid(TargetComputer, sid)
return name
def main():
if len(sys.argv) == 2:
targetComputer = sys.argv[1]
else:
targetComputer = None
name = LookupUserGroupFromRid(targetComputer, DOMAIN_USER_RID_ADMIN)
print "'Administrator' user name = %s" % (name,)
name = LookupAliasFromRid(targetComputer, DOMAIN_ALIAS_RID_ADMINS)
print "'Administrators' local group/alias name = %s" % (name,)
if __name__=='__main__':
main()
| apache-2.0 | -2,388,378,379,766,461,400 | 32.278689 | 72 | 0.706404 | false |
mhct/ardupilot | Tools/autotest/param_metadata/xmlemit.py | 238 | 2717 | #!/usr/bin/env python
from xml.sax.saxutils import escape, quoteattr
from param import *
from emit import Emit
# Emit APM documentation in an machine readable XML format
class XmlEmit(Emit):
def __init__(self):
wiki_fname = 'apm.pdef.xml'
self.f = open(wiki_fname, mode='w')
preamble = '''<?xml version="1.0" encoding="utf-8"?>
<!-- Dynamically generated list of documented parameters (generated by param_parse.py) -->
<paramfile>
<vehicles>
'''
self.f.write(preamble)
def close(self):
self.f.write('</libraries>')
self.f.write('''</paramfile>\n''')
self.f.close
def emit_comment(self, s):
self.f.write("<!-- " + s + " -->")
def start_libraries(self):
self.f.write('</vehicles>')
self.f.write('<libraries>')
def emit(self, g, f):
t = '''<parameters name=%s>\n''' % quoteattr(g.name) # i.e. ArduPlane
for param in g.params:
# Begin our parameter node
if hasattr(param, 'DisplayName'):
t += '<param humanName=%s name=%s' % (quoteattr(param.DisplayName),quoteattr(param.name)) # i.e. ArduPlane (ArduPlane:FOOPARM)
else:
t += '<param name=%s' % quoteattr(param.name)
if hasattr(param, 'Description'):
t += ' documentation=%s' % quoteattr(param.Description) # i.e. parameter docs
if hasattr(param, 'User'):
t += ' user=%s' % quoteattr(param.User) # i.e. Standard or Advanced
t += ">\n"
# Add values as chidren of this node
for field in param.__dict__.keys():
if field not in ['name', 'DisplayName', 'Description', 'User'] and field in known_param_fields:
if field == 'Values' and Emit.prog_values_field.match(param.__dict__[field]):
t+= "<values>\n"
values = (param.__dict__[field]).split(',')
for value in values:
v = value.split(':')
t+='''<value code=%s>%s</value>\n''' % (quoteattr(v[0]), escape(v[1])) # i.e. numeric value, string label
t += "</values>\n"
else:
t += '''<field name=%s>%s</field>\n''' % (quoteattr(field), escape(param.__dict__[field])) # i.e. Range: 0 10
t += '''</param>\n'''
t += '''</parameters>\n'''
#print t
self.f.write(t)
| gpl-3.0 | 8,954,940,029,272,926,000 | 36.736111 | 142 | 0.471108 | false |
apache/airflow | tests/providers/amazon/aws/hooks/test_batch_client.py | 2 | 13457 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
from unittest import mock
import botocore.exceptions
import pytest
from parameterized import parameterized
from airflow.exceptions import AirflowException
from airflow.providers.amazon.aws.hooks.batch_client import AwsBatchClientHook
# Use dummy AWS credentials
AWS_REGION = "eu-west-1"
AWS_ACCESS_KEY_ID = "airflow_dummy_key"
AWS_SECRET_ACCESS_KEY = "airflow_dummy_secret"
JOB_ID = "8ba9d676-4108-4474-9dca-8bbac1da9b19"
class TestAwsBatchClient(unittest.TestCase):
MAX_RETRIES = 2
STATUS_RETRIES = 3
@mock.patch.dict("os.environ", AWS_DEFAULT_REGION=AWS_REGION)
@mock.patch.dict("os.environ", AWS_ACCESS_KEY_ID=AWS_ACCESS_KEY_ID)
@mock.patch.dict("os.environ", AWS_SECRET_ACCESS_KEY=AWS_SECRET_ACCESS_KEY)
@mock.patch("airflow.providers.amazon.aws.hooks.batch_client.AwsBaseHook.get_client_type")
def setUp(self, get_client_type_mock):
self.get_client_type_mock = get_client_type_mock
self.batch_client = AwsBatchClientHook(
max_retries=self.MAX_RETRIES,
status_retries=self.STATUS_RETRIES,
aws_conn_id='airflow_test',
region_name=AWS_REGION,
)
self.client_mock = get_client_type_mock.return_value
assert self.batch_client.client == self.client_mock # setup client property
# don't pause in these unit tests
self.mock_delay = mock.Mock(return_value=None)
self.batch_client.delay = self.mock_delay
self.mock_exponential_delay = mock.Mock(return_value=0)
self.batch_client.exponential_delay = self.mock_exponential_delay
def test_init(self):
assert self.batch_client.max_retries == self.MAX_RETRIES
assert self.batch_client.status_retries == self.STATUS_RETRIES
assert self.batch_client.region_name == AWS_REGION
assert self.batch_client.aws_conn_id == 'airflow_test'
assert self.batch_client.client == self.client_mock
self.get_client_type_mock.assert_called_once_with("batch", region_name=AWS_REGION)
def test_wait_for_job_with_success(self):
self.client_mock.describe_jobs.return_value = {"jobs": [{"jobId": JOB_ID, "status": "SUCCEEDED"}]}
with mock.patch.object(
self.batch_client,
"poll_for_job_running",
wraps=self.batch_client.poll_for_job_running,
) as job_running:
self.batch_client.wait_for_job(JOB_ID)
job_running.assert_called_once_with(JOB_ID, None)
with mock.patch.object(
self.batch_client,
"poll_for_job_complete",
wraps=self.batch_client.poll_for_job_complete,
) as job_complete:
self.batch_client.wait_for_job(JOB_ID)
job_complete.assert_called_once_with(JOB_ID, None)
assert self.client_mock.describe_jobs.call_count == 4
def test_wait_for_job_with_failure(self):
self.client_mock.describe_jobs.return_value = {"jobs": [{"jobId": JOB_ID, "status": "FAILED"}]}
with mock.patch.object(
self.batch_client,
"poll_for_job_running",
wraps=self.batch_client.poll_for_job_running,
) as job_running:
self.batch_client.wait_for_job(JOB_ID)
job_running.assert_called_once_with(JOB_ID, None)
with mock.patch.object(
self.batch_client,
"poll_for_job_complete",
wraps=self.batch_client.poll_for_job_complete,
) as job_complete:
self.batch_client.wait_for_job(JOB_ID)
job_complete.assert_called_once_with(JOB_ID, None)
assert self.client_mock.describe_jobs.call_count == 4
def test_poll_job_running_for_status_running(self):
self.client_mock.describe_jobs.return_value = {"jobs": [{"jobId": JOB_ID, "status": "RUNNING"}]}
self.batch_client.poll_for_job_running(JOB_ID)
self.client_mock.describe_jobs.assert_called_once_with(jobs=[JOB_ID])
def test_poll_job_complete_for_status_success(self):
self.client_mock.describe_jobs.return_value = {"jobs": [{"jobId": JOB_ID, "status": "SUCCEEDED"}]}
self.batch_client.poll_for_job_complete(JOB_ID)
self.client_mock.describe_jobs.assert_called_once_with(jobs=[JOB_ID])
def test_poll_job_complete_raises_for_max_retries(self):
self.client_mock.describe_jobs.return_value = {"jobs": [{"jobId": JOB_ID, "status": "RUNNING"}]}
with pytest.raises(AirflowException) as ctx:
self.batch_client.poll_for_job_complete(JOB_ID)
msg = f"AWS Batch job ({JOB_ID}) status checks exceed max_retries"
assert msg in str(ctx.value)
self.client_mock.describe_jobs.assert_called_with(jobs=[JOB_ID])
assert self.client_mock.describe_jobs.call_count == self.MAX_RETRIES + 1
def test_poll_job_status_hit_api_throttle(self):
self.client_mock.describe_jobs.side_effect = botocore.exceptions.ClientError(
error_response={"Error": {"Code": "TooManyRequestsException"}},
operation_name="get job description",
)
with pytest.raises(AirflowException) as ctx:
self.batch_client.poll_for_job_complete(JOB_ID)
msg = f"AWS Batch job ({JOB_ID}) description error"
assert msg in str(ctx.value)
# It should retry when this client error occurs
self.client_mock.describe_jobs.assert_called_with(jobs=[JOB_ID])
assert self.client_mock.describe_jobs.call_count == self.STATUS_RETRIES
def test_poll_job_status_with_client_error(self):
self.client_mock.describe_jobs.side_effect = botocore.exceptions.ClientError(
error_response={"Error": {"Code": "InvalidClientTokenId"}},
operation_name="get job description",
)
with pytest.raises(AirflowException) as ctx:
self.batch_client.poll_for_job_complete(JOB_ID)
msg = f"AWS Batch job ({JOB_ID}) description error"
assert msg in str(ctx.value)
# It will not retry when this client error occurs
self.client_mock.describe_jobs.assert_called_once_with(jobs=[JOB_ID])
def test_check_job_success(self):
self.client_mock.describe_jobs.return_value = {"jobs": [{"jobId": JOB_ID, "status": "SUCCEEDED"}]}
status = self.batch_client.check_job_success(JOB_ID)
assert status
self.client_mock.describe_jobs.assert_called_once_with(jobs=[JOB_ID])
def test_check_job_success_raises_failed(self):
self.client_mock.describe_jobs.return_value = {
"jobs": [
{
"jobId": JOB_ID,
"status": "FAILED",
"statusReason": "This is an error reason",
"attempts": [{"exitCode": 1}],
}
]
}
with pytest.raises(AirflowException) as ctx:
self.batch_client.check_job_success(JOB_ID)
self.client_mock.describe_jobs.assert_called_once_with(jobs=[JOB_ID])
msg = f"AWS Batch job ({JOB_ID}) failed"
assert msg in str(ctx.value)
def test_check_job_success_raises_failed_for_multiple_attempts(self):
self.client_mock.describe_jobs.return_value = {
"jobs": [
{
"jobId": JOB_ID,
"status": "FAILED",
"statusReason": "This is an error reason",
"attempts": [{"exitCode": 1}, {"exitCode": 10}],
}
]
}
with pytest.raises(AirflowException) as ctx:
self.batch_client.check_job_success(JOB_ID)
self.client_mock.describe_jobs.assert_called_once_with(jobs=[JOB_ID])
msg = f"AWS Batch job ({JOB_ID}) failed"
assert msg in str(ctx.value)
def test_check_job_success_raises_incomplete(self):
self.client_mock.describe_jobs.return_value = {"jobs": [{"jobId": JOB_ID, "status": "RUNNABLE"}]}
with pytest.raises(AirflowException) as ctx:
self.batch_client.check_job_success(JOB_ID)
self.client_mock.describe_jobs.assert_called_once_with(jobs=[JOB_ID])
msg = f"AWS Batch job ({JOB_ID}) is not complete"
assert msg in str(ctx.value)
def test_check_job_success_raises_unknown_status(self):
status = "STRANGE"
self.client_mock.describe_jobs.return_value = {"jobs": [{"jobId": JOB_ID, "status": status}]}
with pytest.raises(AirflowException) as ctx:
self.batch_client.check_job_success(JOB_ID)
self.client_mock.describe_jobs.assert_called_once_with(jobs=[JOB_ID])
msg = f"AWS Batch job ({JOB_ID}) has unknown status"
assert msg in str(ctx.value)
assert status in str(ctx.value)
def test_check_job_success_raises_without_jobs(self):
self.client_mock.describe_jobs.return_value = {"jobs": []}
with pytest.raises(AirflowException) as ctx:
self.batch_client.check_job_success(JOB_ID)
self.client_mock.describe_jobs.assert_called_once_with(jobs=[JOB_ID])
msg = f"AWS Batch job ({JOB_ID}) description error"
assert msg in str(ctx.value)
def test_terminate_job(self):
self.client_mock.terminate_job.return_value = {}
reason = "Task killed by the user"
response = self.batch_client.terminate_job(JOB_ID, reason)
self.client_mock.terminate_job.assert_called_once_with(jobId=JOB_ID, reason=reason)
assert response == {}
class TestAwsBatchClientDelays(unittest.TestCase):
@mock.patch.dict("os.environ", AWS_DEFAULT_REGION=AWS_REGION)
@mock.patch.dict("os.environ", AWS_ACCESS_KEY_ID=AWS_ACCESS_KEY_ID)
@mock.patch.dict("os.environ", AWS_SECRET_ACCESS_KEY=AWS_SECRET_ACCESS_KEY)
def setUp(self):
self.batch_client = AwsBatchClientHook(aws_conn_id='airflow_test', region_name=AWS_REGION)
def test_init(self):
assert self.batch_client.max_retries == self.batch_client.MAX_RETRIES
assert self.batch_client.status_retries == self.batch_client.STATUS_RETRIES
assert self.batch_client.region_name == AWS_REGION
assert self.batch_client.aws_conn_id == 'airflow_test'
def test_add_jitter(self):
minima = 0
width = 5
result = self.batch_client.add_jitter(0, width=width, minima=minima)
assert result >= minima
assert result <= width
@mock.patch("airflow.providers.amazon.aws.hooks.batch_client.uniform")
@mock.patch("airflow.providers.amazon.aws.hooks.batch_client.sleep")
def test_delay_defaults(self, mock_sleep, mock_uniform):
assert AwsBatchClientHook.DEFAULT_DELAY_MIN == 1
assert AwsBatchClientHook.DEFAULT_DELAY_MAX == 10
mock_uniform.return_value = 0
self.batch_client.delay()
mock_uniform.assert_called_once_with(
AwsBatchClientHook.DEFAULT_DELAY_MIN, AwsBatchClientHook.DEFAULT_DELAY_MAX
)
mock_sleep.assert_called_once_with(0)
@mock.patch("airflow.providers.amazon.aws.hooks.batch_client.uniform")
@mock.patch("airflow.providers.amazon.aws.hooks.batch_client.sleep")
def test_delay_with_zero(self, mock_sleep, mock_uniform):
self.batch_client.delay(0)
mock_uniform.assert_called_once_with(0, 1) # in add_jitter
mock_sleep.assert_called_once_with(mock_uniform.return_value)
@mock.patch("airflow.providers.amazon.aws.hooks.batch_client.uniform")
@mock.patch("airflow.providers.amazon.aws.hooks.batch_client.sleep")
def test_delay_with_int(self, mock_sleep, mock_uniform):
self.batch_client.delay(5)
mock_uniform.assert_called_once_with(4, 6) # in add_jitter
mock_sleep.assert_called_once_with(mock_uniform.return_value)
@mock.patch("airflow.providers.amazon.aws.hooks.batch_client.uniform")
@mock.patch("airflow.providers.amazon.aws.hooks.batch_client.sleep")
def test_delay_with_float(self, mock_sleep, mock_uniform):
self.batch_client.delay(5.0)
mock_uniform.assert_called_once_with(4.0, 6.0) # in add_jitter
mock_sleep.assert_called_once_with(mock_uniform.return_value)
@parameterized.expand(
[
(0, 0, 1),
(1, 0, 2),
(2, 0, 3),
(3, 1, 5),
(4, 2, 7),
(5, 3, 11),
(6, 4, 14),
(7, 6, 19),
(8, 8, 25),
(9, 10, 31),
(45, 200, 600), # > 40 tries invokes maximum delay allowed
]
)
def test_exponential_delay(self, tries, lower, upper):
result = self.batch_client.exponential_delay(tries)
assert result >= lower
assert result <= upper
| apache-2.0 | 8,661,485,429,681,158,000 | 43.266447 | 106 | 0.643977 | false |
guorendong/iridium-browser-ubuntu | tools/telemetry/telemetry/page/page_test.py | 3 | 7843 | # Copyright 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.page import action_runner as action_runner_module
from telemetry.page import test_expectations
class TestNotSupportedOnPlatformError(Exception):
"""PageTest Exception raised when a required feature is unavailable.
The feature required to run the test could be part of the platform,
hardware configuration, or browser.
"""
class MultiTabTestAppCrashError(Exception):
"""PageTest Exception raised after browser or tab crash for multi-tab tests.
Used to abort the test rather than try to recover from an unknown state.
"""
class Failure(Exception):
"""PageTest Exception raised when an undesired but designed-for problem."""
class MeasurementFailure(Failure):
"""PageTest Exception raised when an undesired but designed-for problem."""
class PageTest(object):
"""A class styled on unittest.TestCase for creating page-specific tests.
Test should override ValidateAndMeasurePage to perform test
validation and page measurement as necessary.
class BodyChildElementMeasurement(PageTest):
def ValidateAndMeasurePage(self, page, tab, results):
body_child_count = tab.EvaluateJavaScript(
'document.body.children.length')
results.AddValue(scalar.ScalarValue(
page, 'body_children', 'count', body_child_count))
Args:
discard_first_run: Discard the first run of this page. This is
usually used with page_repeat and pageset_repeat options.
"""
def __init__(self,
needs_browser_restart_after_each_page=False,
discard_first_result=False,
clear_cache_before_each_run=False):
super(PageTest, self).__init__()
self.options = None
self._needs_browser_restart_after_each_page = (
needs_browser_restart_after_each_page)
self._discard_first_result = discard_first_result
self._clear_cache_before_each_run = clear_cache_before_each_run
self._close_tabs_before_run = True
@property
def is_multi_tab_test(self):
"""Returns True if the test opens multiple tabs.
If the test overrides TabForPage, it is deemed a multi-tab test.
Multi-tab tests do not retry after tab or browser crashes, whereas,
single-tab tests too. That is because the state of multi-tab tests
(e.g., how many tabs are open, etc.) is unknown after crashes.
"""
return self.TabForPage.__func__ is not PageTest.TabForPage.__func__
@property
def discard_first_result(self):
"""When set to True, the first run of the test is discarded. This is
useful for cases where it's desirable to have some test resource cached so
the first run of the test can warm things up. """
return self._discard_first_result
@discard_first_result.setter
def discard_first_result(self, discard):
self._discard_first_result = discard
@property
def clear_cache_before_each_run(self):
"""When set to True, the browser's disk and memory cache will be cleared
before each run."""
return self._clear_cache_before_each_run
@property
def close_tabs_before_run(self):
"""When set to True, all tabs are closed before running the test for the
first time."""
return self._close_tabs_before_run
@close_tabs_before_run.setter
def close_tabs_before_run(self, close_tabs):
self._close_tabs_before_run = close_tabs
def RestartBrowserBeforeEachPage(self):
""" Should the browser be restarted for the page?
This returns true if the test needs to unconditionally restart the
browser for each page. It may be called before the browser is started.
"""
return self._needs_browser_restart_after_each_page
def StopBrowserAfterPage(self, browser, page): # pylint: disable=W0613
"""Should the browser be stopped after the page is run?
This is called after a page is run to decide whether the browser needs to
be stopped to clean up its state. If it is stopped, then it will be
restarted to run the next page.
A test that overrides this can look at both the page and the browser to
decide whether it needs to stop the browser.
"""
return False
def CustomizeBrowserOptions(self, options):
"""Override to add test-specific options to the BrowserOptions object"""
def CustomizeBrowserOptionsForSinglePage(self, page, options):
"""Set options specific to the test and the given page.
This will be called with the current page when the browser is (re)started.
Changing options at this point only makes sense if the browser is being
restarted for each page. Note that if page has a startup_url, the browser
will always be restarted for each run.
"""
if page.startup_url:
options.browser_options.startup_url = page.startup_url
def WillStartBrowser(self, platform):
"""Override to manipulate the browser environment before it launches."""
def DidStartBrowser(self, browser):
"""Override to customize the browser right after it has launched."""
def SetOptions(self, options):
"""Sets the BrowserFinderOptions instance to use."""
self.options = options
def WillNavigateToPage(self, page, tab):
"""Override to do operations before the page is navigated, notably Telemetry
will already have performed the following operations on the browser before
calling this function:
* Ensure only one tab is open.
* Call WaitForDocumentReadyStateToComplete on the tab."""
def DidNavigateToPage(self, page, tab):
"""Override to do operations right after the page is navigated and after
all waiting for completion has occurred."""
def CleanUpAfterPage(self, page, tab):
"""Called after the test run method was run, even if it failed."""
def CreateExpectations(self, page_set): # pylint: disable=W0613
"""Override to make this test generate its own expectations instead of
any that may have been defined in the page set."""
return test_expectations.TestExpectations()
def TabForPage(self, page, browser): # pylint: disable=W0613
"""Override to select a different tab for the page. For instance, to
create a new tab for every page, return browser.tabs.New()."""
return browser.tabs[0]
def ValidateAndMeasurePage(self, page, tab, results):
"""Override to check test assertions and perform measurement.
When adding measurement results, call results.AddValue(...) for
each result. Raise an exception or add a failure.FailureValue on
failure. page_test.py also provides several base exception classes
to use.
Prefer metric value names that are in accordance with python
variable style. e.g., metric_name. The name 'url' must not be used.
Put together:
def ValidateAndMeasurePage(self, page, tab, results):
res = tab.EvaluateJavaScript('2+2')
if res != 4:
raise Exception('Oh, wow.')
results.AddValue(scalar.ScalarValue(
page, 'two_plus_two', 'count', res))
Args:
page: A telemetry.page.Page instance.
tab: A telemetry.core.Tab instance.
results: A telemetry.results.PageTestResults instance.
"""
raise NotImplementedError
def RunPage(self, page, tab, results):
# Run actions.
action_runner = action_runner_module.ActionRunner(
tab, skip_waits=page.skip_waits)
page.RunPageInteractions(action_runner)
self.ValidateAndMeasurePage(page, tab, results)
def RunNavigateSteps(self, page, tab):
"""Navigates the tab to the page URL attribute.
Runs the 'navigate_steps' page attribute as a compound action.
"""
action_runner = action_runner_module.ActionRunner(
tab, skip_waits=page.skip_waits)
page.RunNavigateSteps(action_runner)
| bsd-3-clause | -419,873,031,838,376,300 | 36.526316 | 80 | 0.713885 | false |
sunny94/temp | sympy/integrals/integrals.py | 7 | 49446 | from __future__ import print_function, division
from sympy.concrete.expr_with_limits import AddWithLimits
from sympy.core.add import Add
from sympy.core.basic import Basic, C
from sympy.core.compatibility import is_sequence, xrange
from sympy.core.containers import Tuple
from sympy.core.expr import Expr
from sympy.core.function import diff
from sympy.core.numbers import oo
from sympy.core.relational import Eq
from sympy.sets.sets import Interval
from sympy.core.singleton import S
from sympy.core.symbol import (Dummy, Symbol, Wild)
from sympy.core.sympify import sympify
from sympy.integrals.manualintegrate import manualintegrate
from sympy.integrals.trigonometry import trigintegrate
from sympy.integrals.deltafunctions import deltaintegrate
from sympy.integrals.rationaltools import ratint
from sympy.integrals.heurisch import heurisch, heurisch_wrapper
from sympy.integrals.meijerint import meijerint_definite, meijerint_indefinite
from sympy.utilities import xthreaded, flatten
from sympy.utilities.misc import filldedent
from sympy.polys import Poly, PolynomialError
from sympy.solvers.solvers import solve, posify
from sympy.functions import Piecewise, sqrt, sign
from sympy.geometry import Curve
from sympy.functions.elementary.piecewise import piecewise_fold
from sympy.series import limit
class Integral(AddWithLimits):
"""Represents unevaluated integral."""
__slots__ = ['is_commutative']
def __new__(cls, function, *symbols, **assumptions):
"""Create an unevaluated integral.
Arguments are an integrand followed by one or more limits.
If no limits are given and there is only one free symbol in the
expression, that symbol will be used, otherwise an error will be
raised.
>>> from sympy import Integral
>>> from sympy.abc import x, y
>>> Integral(x)
Integral(x, x)
>>> Integral(y)
Integral(y, y)
When limits are provided, they are interpreted as follows (using
``x`` as though it were the variable of integration):
(x,) or x - indefinite integral
(x, a) - "evaluate at" integral is an abstract antiderivative
(x, a, b) - definite integral
The ``as_dummy`` method can be used to see which symbols cannot be
targeted by subs: those with a preppended underscore cannot be
changed with ``subs``. (Also, the integration variables themselves --
the first element of a limit -- can never be changed by subs.)
>>> i = Integral(x, x)
>>> at = Integral(x, (x, x))
>>> i.as_dummy()
Integral(x, x)
>>> at.as_dummy()
Integral(_x, (_x, x))
"""
#This will help other classes define their own definitions
#of behaviour with Integral.
if hasattr(function, '_eval_Integral'):
return function._eval_Integral(*symbols, **assumptions)
obj = AddWithLimits.__new__(cls, function, *symbols, **assumptions)
return obj
def __getnewargs__(self):
return (self.function,) + tuple([tuple(xab) for xab in self.limits])
@property
def free_symbols(self):
"""
This method returns the symbols that will exist when the
integral is evaluated. This is useful if one is trying to
determine whether an integral depends on a certain
symbol or not.
Examples
========
>>> from sympy import Integral
>>> from sympy.abc import x, y
>>> Integral(x, (x, y, 1)).free_symbols
set([y])
See Also
========
function, limits, variables
"""
return AddWithLimits.free_symbols.fget(self)
def _eval_is_zero(self):
# This is a very naive and quick test, not intended to do the integral to
# answer whether it is zero or not, e.g. Integral(sin(x), (x, 0, 2*pi))
# is zero but this routine should return None for that case. But, like
# Mul, there are trivial situations for which the integral will be
# zero so we check for those.
if self.function.is_zero:
return True
got_none = False
for l in self.limits:
if len(l) == 3:
z = (l[1] == l[2]) or (l[1] - l[2]).is_zero
if z:
return True
elif z is None:
got_none = True
free = self.function.free_symbols
for xab in self.limits:
if len(xab) == 1:
free.add(xab[0])
continue
if len(xab) == 2 and xab[0] not in free:
if xab[1].is_zero:
return True
elif xab[1].is_zero is None:
got_none = True
# take integration symbol out of free since it will be replaced
# with the free symbols in the limits
free.discard(xab[0])
# add in the new symbols
for i in xab[1:]:
free.update(i.free_symbols)
if self.function.is_zero is False and got_none is False:
return False
def transform(self, x, u):
r"""
Performs a change of variables from `x` to `u` using the relationship
given by `x` and `u` which will define the transformations `f` and `F`
(which are inverses of each other) as follows:
1) If `x` is a Symbol (which is a variable of integration) then `u`
will be interpreted as some function, f(u), with inverse F(u).
This, in effect, just makes the substitution of x with f(x).
2) If `u` is a Symbol then `x` will be interpreted as some function,
F(x), with inverse f(u). This is commonly referred to as
u-substitution.
Once f and F have been identified, the transformation is made as
follows:
.. math:: \int_a^b x \mathrm{d}x \rightarrow \int_{F(a)}^{F(b)} f(x)
\frac{\mathrm{d}}{\mathrm{d}x}
where `F(x)` is the inverse of `f(x)` and the limits and integrand have
been corrected so as to retain the same value after integration.
Notes
=====
The mappings, F(x) or f(u), must lead to a unique integral. Linear
or rational linear expression, `2*x`, `1/x` and `sqrt(x)`, will
always work; quadratic expressions like `x**2 - 1` are acceptable
as long as the resulting integrand does not depend on the sign of
the solutions (see examples).
The integral will be returned unchanged if `x` is not a variable of
integration.
`x` must be (or contain) only one of of the integration variables. If
`u` has more than one free symbol then it should be sent as a tuple
(`u`, `uvar`) where `uvar` identifies which variable is replacing
the integration variable.
XXX can it contain another integration variable?
Examples
========
>>> from sympy.abc import a, b, c, d, x, u, y
>>> from sympy import Integral, S, cos, sqrt
>>> i = Integral(x*cos(x**2 - 1), (x, 0, 1))
transform can change the variable of integration
>>> i.transform(x, u)
Integral(u*cos(u**2 - 1), (u, 0, 1))
transform can perform u-substitution as long as a unique
integrand is obtained:
>>> i.transform(x**2 - 1, u)
Integral(cos(u)/2, (u, -1, 0))
This attempt fails because x = +/-sqrt(u + 1) and the
sign does not cancel out of the integrand:
>>> Integral(cos(x**2 - 1), (x, 0, 1)).transform(x**2 - 1, u)
Traceback (most recent call last):
...
ValueError:
The mapping between F(x) and f(u) did not give a unique integrand.
transform can do a substitution. Here, the previous
result is transformed back into the original expression
using "u-substitution":
>>> ui = _
>>> _.transform(sqrt(u + 1), x) == i
True
We can accomplish the same with a regular substitution:
>>> ui.transform(u, x**2 - 1) == i
True
If the `x` does not contain a symbol of integration then
the integral will be returned unchanged. Integral `i` does
not have an integration variable `a` so no change is made:
>>> i.transform(a, x) == i
True
When `u` has more than one free symbol the symbol that is
replacing `x` must be identified by passing `u` as a tuple:
>>> Integral(x, (x, 0, 1)).transform(x, (u + a, u))
Integral(a + u, (u, -a, -a + 1))
>>> Integral(x, (x, 0, 1)).transform(x, (u + a, a))
Integral(a + u, (a, -u, -u + 1))
See Also
========
variables : Lists the integration variables
as_dummy : Replace integration variables with dummy ones
"""
d = Dummy('d')
xfree = x.free_symbols.intersection(self.variables)
if len(xfree) > 1:
raise ValueError(
'F(x) can only contain one of: %s' % self.variables)
xvar = xfree.pop() if xfree else d
if xvar not in self.variables:
return self
u = sympify(u)
if isinstance(u, Expr):
ufree = u.free_symbols
if len(ufree) != 1:
raise ValueError(filldedent('''
When f(u) has more than one free symbol, the one replacing x
must be identified: pass f(u) as (f(u), u)'''))
uvar = ufree.pop()
else:
u, uvar = u
if uvar not in u.free_symbols:
raise ValueError(filldedent('''
Expecting a tuple (expr, symbol) where symbol identified
a free symbol in expr, but symbol is not in expr's free
symbols.'''))
if not isinstance(uvar, Symbol):
raise ValueError(filldedent('''
Expecting a tuple (expr, symbol) but didn't get
a symbol; got %s''' % uvar))
if x.is_Symbol and u.is_Symbol:
return self.xreplace({x: u})
if not x.is_Symbol and not u.is_Symbol:
raise ValueError('either x or u must be a symbol')
if uvar == xvar:
return self.transform(x, (u.subs(uvar, d), d)).xreplace({d: uvar})
if uvar in self.limits:
raise ValueError(filldedent('''
u must contain the same variable as in x
or a variable that is not already an integration variable'''))
if not x.is_Symbol:
F = [x.subs(xvar, d)]
soln = solve(u - x, xvar, check=False)
if not soln:
raise ValueError('no solution for solve(F(x) - f(u), x)')
f = [fi.subs(uvar, d) for fi in soln]
else:
f = [u.subs(uvar, d)]
pdiff, reps = posify(u - x)
puvar = uvar.subs([(v, k) for k, v in reps.items()])
soln = [s.subs(reps) for s in solve(pdiff, puvar)]
if not soln:
raise ValueError('no solution for solve(F(x) - f(u), u)')
F = [fi.subs(xvar, d) for fi in soln]
newfuncs = set([(self.function.subs(xvar, fi)*fi.diff(d)
).subs(d, uvar) for fi in f])
if len(newfuncs) > 1:
raise ValueError(filldedent('''
The mapping between F(x) and f(u) did not give
a unique integrand.'''))
newfunc = newfuncs.pop()
def _calc_limit_1(F, a, b):
"""
replace d with a, using subs if possible, otherwise limit
where sign of b is considered
"""
wok = F.subs(d, a)
if wok is S.NaN or wok.is_finite is False and a.is_finite:
return limit(sign(b)*F, d, a)
return wok
def _calc_limit(a, b):
"""
replace d with a, using subs if possible, otherwise limit
where sign of b is considered
"""
avals = list(set([_calc_limit_1(Fi, a, b) for Fi in F]))
if len(avals) > 1:
raise ValueError(filldedent('''
The mapping between F(x) and f(u) did not
give a unique limit.'''))
return avals[0]
newlimits = []
for xab in self.limits:
sym = xab[0]
if sym == xvar:
if len(xab) == 3:
a, b = xab[1:]
a, b = _calc_limit(a, b), _calc_limit(b, a)
if a - b > 0:
a, b = b, a
newfunc = -newfunc
newlimits.append((uvar, a, b))
elif len(xab) == 2:
a = _calc_limit(xab[1], 1)
newlimits.append((uvar, a))
else:
newlimits.append(uvar)
else:
newlimits.append(xab)
return self.func(newfunc, *newlimits)
def doit(self, **hints):
"""
Perform the integration using any hints given.
Examples
========
>>> from sympy import Integral
>>> from sympy.abc import x, i
>>> Integral(x**i, (i, 1, 3)).doit()
Piecewise((2, log(x) == 0), (x**3/log(x) - x/log(x), True))
See Also
========
sympy.integrals.trigonometry.trigintegrate
sympy.integrals.risch.heurisch
sympy.integrals.rationaltools.ratint
as_sum : Approximate the integral using a sum
"""
if not hints.get('integrals', True):
return self
deep = hints.get('deep', True)
meijerg = hints.get('meijerg', None)
conds = hints.get('conds', 'piecewise')
risch = hints.get('risch', None)
manual = hints.get('manual', None)
if conds not in ['separate', 'piecewise', 'none']:
raise ValueError('conds must be one of "separate", "piecewise", '
'"none", got: %s' % conds)
if risch and any(len(xab) > 1 for xab in self.limits):
raise ValueError('risch=True is only allowed for indefinite integrals.')
# check for the trivial zero
if self.is_zero:
return S.Zero
# now compute and check the function
function = self.function
if deep:
function = function.doit(**hints)
if function.is_zero:
return S.Zero
# There is no trivial answer, so continue
undone_limits = []
# ulj = free symbols of any undone limits' upper and lower limits
ulj = set()
for xab in self.limits:
# compute uli, the free symbols in the
# Upper and Lower limits of limit I
if len(xab) == 1:
uli = set(xab[:1])
elif len(xab) == 2:
uli = xab[1].free_symbols
elif len(xab) == 3:
uli = xab[1].free_symbols.union(xab[2].free_symbols)
# this integral can be done as long as there is no blocking
# limit that has been undone. An undone limit is blocking if
# it contains an integration variable that is in this limit's
# upper or lower free symbols or vice versa
if xab[0] in ulj or any(v[0] in uli for v in undone_limits):
undone_limits.append(xab)
ulj.update(uli)
function = self.func(*([function] + [xab]))
factored_function = function.factor()
if not isinstance(factored_function, Integral):
function = factored_function
continue
# There are a number of tradeoffs in using the meijer g method.
# It can sometimes be a lot faster than other methods, and
# sometimes slower. And there are certain types of integrals for
# which it is more likely to work than others.
# These heuristics are incorporated in deciding what integration
# methods to try, in what order.
# See the integrate() docstring for details.
def try_meijerg(function, xab):
ret = None
if len(xab) == 3 and meijerg is not False:
x, a, b = xab
try:
res = meijerint_definite(function, x, a, b)
except NotImplementedError:
from sympy.integrals.meijerint import _debug
_debug('NotImplementedError from meijerint_definite')
res = None
if res is not None:
f, cond = res
if conds == 'piecewise':
ret = Piecewise((f, cond),
(self.func(function, (x, a, b)), True))
elif conds == 'separate':
if len(self.limits) != 1:
raise ValueError('conds=separate not supported in '
'multiple integrals')
ret = f, cond
else:
ret = f
return ret
meijerg1 = meijerg
if len(xab) == 3 and xab[1].is_real and xab[2].is_real \
and not function.is_Poly and \
(xab[1].has(oo, -oo) or xab[2].has(oo, -oo)):
ret = try_meijerg(function, xab)
if ret is not None:
function = ret
continue
else:
meijerg1 = False
# If the special meijerg code did not succeed finding a definite
# integral, then the code using meijerint_indefinite will not either
# (it might find an antiderivative, but the answer is likely to be
# nonsensical).
# Thus if we are requested to only use meijer g-function methods,
# we give up at this stage. Otherwise we just disable g-function
# methods.
if meijerg1 is False and meijerg is True:
antideriv = None
else:
antideriv = self._eval_integral(
function, xab[0],
meijerg=meijerg1, risch=risch, manual=manual,
conds=conds)
if antideriv is None and meijerg1 is True:
ret = try_meijerg(function, xab)
if ret is not None:
function = ret
continue
if antideriv is None:
undone_limits.append(xab)
function = self.func(*([function] + [xab])).factor()
factored_function = function.factor()
if not isinstance(factored_function, Integral):
function = factored_function
continue
else:
if len(xab) == 1:
function = antideriv
else:
if len(xab) == 3:
x, a, b = xab
elif len(xab) == 2:
x, b = xab
a = None
else:
raise NotImplementedError
if deep:
if isinstance(a, Basic):
a = a.doit(**hints)
if isinstance(b, Basic):
b = b.doit(**hints)
if antideriv.is_Poly:
gens = list(antideriv.gens)
gens.remove(x)
antideriv = antideriv.as_expr()
function = antideriv._eval_interval(x, a, b)
function = Poly(function, *gens)
elif isinstance(antideriv, Add):
function = Add(*[i._eval_interval(x,a,b) for i in
Add.make_args(antideriv)])
else:
try:
function = antideriv._eval_interval(x, a, b)
except NotImplementedError:
# This can happen if _eval_interval depends in a
# complicated way on limits that cannot be computed
undone_limits.append(xab)
function = self.func(*([function] + [xab]))
factored_function = function.factor()
if not isinstance(factored_function, Integral):
function = factored_function
return function
def _eval_derivative(self, sym):
"""Evaluate the derivative of the current Integral object by
differentiating under the integral sign [1], using the Fundamental
Theorem of Calculus [2] when possible.
Whenever an Integral is encountered that is equivalent to zero or
has an integrand that is independent of the variable of integration
those integrals are performed. All others are returned as Integral
instances which can be resolved with doit() (provided they are integrable).
References:
[1] http://en.wikipedia.org/wiki/Differentiation_under_the_integral_sign
[2] http://en.wikipedia.org/wiki/Fundamental_theorem_of_calculus
Examples
========
>>> from sympy import Integral
>>> from sympy.abc import x, y
>>> i = Integral(x + y, y, (y, 1, x))
>>> i.diff(x)
Integral(x + y, (y, x)) + Integral(1, y, (y, 1, x))
>>> i.doit().diff(x) == i.diff(x).doit()
True
>>> i.diff(y)
0
The previous must be true since there is no y in the evaluated integral:
>>> i.free_symbols
set([x])
>>> i.doit()
2*x**3/3 - x/2 - 1/6
"""
# differentiate under the integral sign; we do not
# check for regularity conditions (TODO), see issue 4215
# get limits and the function
f, limits = self.function, list(self.limits)
# the order matters if variables of integration appear in the limits
# so work our way in from the outside to the inside.
limit = limits.pop(-1)
if len(limit) == 3:
x, a, b = limit
elif len(limit) == 2:
x, b = limit
a = None
else:
a = b = None
x = limit[0]
if limits: # f is the argument to an integral
f = self.func(f, *tuple(limits))
# assemble the pieces
def _do(f, ab):
dab_dsym = diff(ab, sym)
if not dab_dsym:
return S.Zero
if isinstance(f, Integral):
limits = [(x, x) if (len(l) == 1 and l[0] == x) else l
for l in f.limits]
f = self.func(f.function, *limits)
return f.subs(x, ab)*dab_dsym
rv = 0
if b is not None:
rv += _do(f, b)
if a is not None:
rv -= _do(f, a)
if len(limit) == 1 and sym == x:
# the dummy variable *is* also the real-world variable
arg = f
rv += arg
else:
# the dummy variable might match sym but it's
# only a dummy and the actual variable is determined
# by the limits, so mask off the variable of integration
# while differentiating
u = Dummy('u')
arg = f.subs(x, u).diff(sym).subs(u, x)
rv += self.func(arg, Tuple(x, a, b))
return rv
def _eval_integral(self, f, x, meijerg=None, risch=None, manual=None,
conds='piecewise'):
"""
Calculate the anti-derivative to the function f(x).
The following algorithms are applied (roughly in this order):
1. Simple heuristics (based on pattern matching and integral table):
- most frequently used functions (e.g. polynomials, products of trig functions)
2. Integration of rational functions:
- A complete algorithm for integrating rational functions is
implemented (the Lazard-Rioboo-Trager algorithm). The algorithm
also uses the partial fraction decomposition algorithm
implemented in apart() as a preprocessor to make this process
faster. Note that the integral of a rational function is always
elementary, but in general, it may include a RootSum.
3. Full Risch algorithm:
- The Risch algorithm is a complete decision
procedure for integrating elementary functions, which means that
given any elementary function, it will either compute an
elementary antiderivative, or else prove that none exists.
Currently, part of transcendental case is implemented, meaning
elementary integrals containing exponentials, logarithms, and
(soon!) trigonometric functions can be computed. The algebraic
case, e.g., functions containing roots, is much more difficult
and is not implemented yet.
- If the routine fails (because the integrand is not elementary, or
because a case is not implemented yet), it continues on to the
next algorithms below. If the routine proves that the integrals
is nonelementary, it still moves on to the algorithms below,
because we might be able to find a closed-form solution in terms
of special functions. If risch=True, however, it will stop here.
4. The Meijer G-Function algorithm:
- This algorithm works by first rewriting the integrand in terms of
very general Meijer G-Function (meijerg in SymPy), integrating
it, and then rewriting the result back, if possible. This
algorithm is particularly powerful for definite integrals (which
is actually part of a different method of Integral), since it can
compute closed-form solutions of definite integrals even when no
closed-form indefinite integral exists. But it also is capable
of computing many indefinite integrals as well.
- Another advantage of this method is that it can use some results
about the Meijer G-Function to give a result in terms of a
Piecewise expression, which allows to express conditionally
convergent integrals.
- Setting meijerg=True will cause integrate() to use only this
method.
5. The "manual integration" algorithm:
- This algorithm tries to mimic how a person would find an
antiderivative by hand, for example by looking for a
substitution or applying integration by parts. This algorithm
does not handle as many integrands but can return results in a
more familiar form.
- Sometimes this algorithm can evaluate parts of an integral; in
this case integrate() will try to evaluate the rest of the
integrand using the other methods here.
- Setting manual=True will cause integrate() to use only this
method.
6. The Heuristic Risch algorithm:
- This is a heuristic version of the Risch algorithm, meaning that
it is not deterministic. This is tried as a last resort because
it can be very slow. It is still used because not enough of the
full Risch algorithm is implemented, so that there are still some
integrals that can only be computed using this method. The goal
is to implement enough of the Risch and Meijer G methods so that
this can be deleted.
"""
from sympy.integrals.risch import risch_integrate
if risch:
try:
return risch_integrate(f, x, conds=conds)
except NotImplementedError:
return None
if manual:
try:
result = manualintegrate(f, x)
if result is not None and result.func != Integral:
return result
except (ValueError, PolynomialError):
pass
# if it is a poly(x) then let the polynomial integrate itself (fast)
#
# It is important to make this check first, otherwise the other code
# will return a sympy expression instead of a Polynomial.
#
# see Polynomial for details.
if isinstance(f, Poly) and not meijerg:
return f.integrate(x)
# Piecewise antiderivatives need to call special integrate.
if f.func is Piecewise:
return f._eval_integral(x)
# let's cut it short if `f` does not depend on `x`
if not f.has(x):
return f*x
# try to convert to poly(x) and then integrate if successful (fast)
poly = f.as_poly(x)
if poly is not None and not meijerg:
return poly.integrate().as_expr()
if risch is not False:
try:
result, i = risch_integrate(f, x, separate_integral=True, conds=conds)
except NotImplementedError:
pass
else:
if i:
# There was a nonelementary integral. Try integrating it.
return result + i.doit(risch=False)
else:
return result
# since Integral(f=g1+g2+...) == Integral(g1) + Integral(g2) + ...
# we are going to handle Add terms separately,
# if `f` is not Add -- we only have one term
# Note that in general, this is a bad idea, because Integral(g1) +
# Integral(g2) might not be computable, even if Integral(g1 + g2) is.
# For example, Integral(x**x + x**x*log(x)). But many heuristics only
# work term-wise. So we compute this step last, after trying
# risch_integrate. We also try risch_integrate again in this loop,
# because maybe the integral is a sum of an elementary part and a
# nonelementary part (like erf(x) + exp(x)). risch_integrate() is
# quite fast, so this is acceptable.
parts = []
args = Add.make_args(f)
for g in args:
coeff, g = g.as_independent(x)
# g(x) = const
if g is S.One and not meijerg:
parts.append(coeff*x)
continue
# g(x) = expr + O(x**n)
order_term = g.getO()
if order_term is not None:
h = self._eval_integral(g.removeO(), x)
if h is not None:
h_order_expr = self._eval_integral(order_term.expr, x)
if h_order_expr is not None:
h_order_term = order_term.func(
h_order_expr, *order_term.variables)
parts.append(coeff*(h + h_order_term))
continue
# NOTE: if there is O(x**n) and we fail to integrate then there is
# no point in trying other methods because they will fail anyway.
return None
# c
# g(x) = (a*x+b)
if g.is_Pow and not g.exp.has(x) and not meijerg:
a = Wild('a', exclude=[x])
b = Wild('b', exclude=[x])
M = g.base.match(a*x + b)
if M is not None:
if g.exp == -1:
h = C.log(g.base)
elif conds != 'piecewise':
h = g.base**(g.exp + 1) / (g.exp + 1)
else:
h1 = C.log(g.base)
h2 = g.base**(g.exp + 1) / (g.exp + 1)
h = Piecewise((h1, Eq(g.exp, -1)), (h2, True))
parts.append(coeff * h / M[a])
continue
# poly(x)
# g(x) = -------
# poly(x)
if g.is_rational_function(x) and not meijerg:
parts.append(coeff * ratint(g, x))
continue
if not meijerg:
# g(x) = Mul(trig)
h = trigintegrate(g, x, conds=conds)
if h is not None:
parts.append(coeff * h)
continue
# g(x) has at least a DiracDelta term
h = deltaintegrate(g, x)
if h is not None:
parts.append(coeff * h)
continue
# Try risch again.
if risch is not False:
try:
h, i = risch_integrate(g, x, separate_integral=True, conds=conds)
except NotImplementedError:
h = None
else:
if i:
h = h + i.doit(risch=False)
parts.append(coeff*h)
continue
# fall back to heurisch
try:
if conds == 'piecewise':
h = heurisch_wrapper(g, x, hints=[])
else:
h = heurisch(g, x, hints=[])
except PolynomialError:
# XXX: this exception means there is a bug in the
# implementation of heuristic Risch integration
# algorithm.
h = None
else:
h = None
if meijerg is not False and h is None:
# rewrite using G functions
try:
h = meijerint_indefinite(g, x)
except NotImplementedError:
from sympy.integrals.meijerint import _debug
_debug('NotImplementedError from meijerint_definite')
res = None
if h is not None:
parts.append(coeff * h)
continue
if h is None and manual is not False:
try:
result = manualintegrate(g, x)
if result is not None and not isinstance(result, Integral):
if result.has(Integral):
# try to have other algorithms do the integrals
# manualintegrate can't handle
result = result.func(*[
arg.doit(manual=False) if arg.has(Integral) else arg
for arg in result.args
]).expand(multinomial=False,
log=False,
power_exp=False,
power_base=False)
if not result.has(Integral):
parts.append(coeff * result)
continue
except (ValueError, PolynomialError):
# can't handle some SymPy expressions
pass
# if we failed maybe it was because we had
# a product that could have been expanded,
# so let's try an expansion of the whole
# thing before giving up; we don't try this
# at the outset because there are things
# that cannot be solved unless they are
# NOT expanded e.g., x**x*(1+log(x)). There
# should probably be a checker somewhere in this
# routine to look for such cases and try to do
# collection on the expressions if they are already
# in an expanded form
if not h and len(args) == 1:
f = f.expand(mul=True, deep=False)
if f.is_Add:
# Note: risch will be identical on the expanded
# expression, but maybe it will be able to pick out parts,
# like x*(exp(x) + erf(x)).
return self._eval_integral(f, x, meijerg=meijerg, risch=risch, conds=conds)
if h is not None:
parts.append(coeff * h)
else:
return None
return Add(*parts)
def _eval_lseries(self, x, logx):
expr = self.as_dummy()
symb = x
for l in expr.limits:
if x in l[1:]:
symb = l[0]
break
for term in expr.function.lseries(symb, logx):
yield integrate(term, *expr.limits)
def _eval_nseries(self, x, n, logx):
expr = self.as_dummy()
symb = x
for l in expr.limits:
if x in l[1:]:
symb = l[0]
break
terms, order = expr.function.nseries(
x=symb, n=n, logx=logx).as_coeff_add(C.Order)
return integrate(terms, *expr.limits) + Add(*order)*x
def as_sum(self, n, method="midpoint"):
"""
Approximates the definite integral by a sum.
method ... one of: left, right, midpoint, trapezoid
These are all basically the rectangle method [1], the only difference
is where the function value is taken in each interval to define the
rectangle.
[1] http://en.wikipedia.org/wiki/Rectangle_method
Examples
========
>>> from sympy import sin, sqrt
>>> from sympy.abc import x
>>> from sympy.integrals import Integral
>>> e = Integral(sin(x), (x, 3, 7))
>>> e
Integral(sin(x), (x, 3, 7))
For demonstration purposes, this interval will only be split into 2
regions, bounded by [3, 5] and [5, 7].
The left-hand rule uses function evaluations at the left of each
interval:
>>> e.as_sum(2, 'left')
2*sin(5) + 2*sin(3)
The midpoint rule uses evaluations at the center of each interval:
>>> e.as_sum(2, 'midpoint')
2*sin(4) + 2*sin(6)
The right-hand rule uses function evaluations at the right of each
interval:
>>> e.as_sum(2, 'right')
2*sin(5) + 2*sin(7)
The trapezoid rule uses function evaluations on both sides of the
intervals. This is equivalent to taking the average of the left and
right hand rule results:
>>> e.as_sum(2, 'trapezoid')
2*sin(5) + sin(3) + sin(7)
>>> (e.as_sum(2, 'left') + e.as_sum(2, 'right'))/2 == _
True
All but the trapexoid method may be used when dealing with a function
with a discontinuity. Here, the discontinuity at x = 0 can be avoided
by using the midpoint or right-hand method:
>>> e = Integral(1/sqrt(x), (x, 0, 1))
>>> e.as_sum(5).n(4)
1.730
>>> e.as_sum(10).n(4)
1.809
>>> e.doit().n(4) # the actual value is 2
2.000
The left- or trapezoid method will encounter the discontinuity and
return oo:
>>> e.as_sum(5, 'left')
oo
>>> e.as_sum(5, 'trapezoid')
oo
See Also
========
Integral.doit : Perform the integration using any hints
"""
limits = self.limits
if len(limits) > 1:
raise NotImplementedError(
"Multidimensional midpoint rule not implemented yet")
else:
limit = limits[0]
if len(limit) != 3:
raise ValueError("Expecting a definite integral.")
if n <= 0:
raise ValueError("n must be > 0")
if n == oo:
raise NotImplementedError("Infinite summation not yet implemented")
sym, lower_limit, upper_limit = limit
dx = (upper_limit - lower_limit)/n
if method == 'trapezoid':
l = self.function.limit(sym, lower_limit)
r = self.function.limit(sym, upper_limit, "-")
result = (l + r)/2
for i in range(1, n):
x = lower_limit + i*dx
result += self.function.subs(sym, x)
return result*dx
elif method not in ('left', 'right', 'midpoint'):
raise NotImplementedError("Unknown method %s" % method)
result = 0
for i in range(n):
if method == "midpoint":
xi = lower_limit + i*dx + dx/2
elif method == "left":
xi = lower_limit + i*dx
if i == 0:
result = self.function.limit(sym, lower_limit)
continue
elif method == "right":
xi = lower_limit + i*dx + dx
if i == n:
result += self.function.limit(sym, upper_limit, "-")
continue
result += self.function.subs(sym, xi)
return result*dx
@xthreaded
def integrate(*args, **kwargs):
"""integrate(f, var, ...)
Compute definite or indefinite integral of one or more variables
using Risch-Norman algorithm and table lookup. This procedure is
able to handle elementary algebraic and transcendental functions
and also a huge class of special functions, including Airy,
Bessel, Whittaker and Lambert.
var can be:
- a symbol -- indefinite integration
- a tuple (symbol, a) -- indefinite integration with result
given with `a` replacing `symbol`
- a tuple (symbol, a, b) -- definite integration
Several variables can be specified, in which case the result is
multiple integration. (If var is omitted and the integrand is
univariate, the indefinite integral in that variable will be performed.)
Indefinite integrals are returned without terms that are independent
of the integration variables. (see examples)
Definite improper integrals often entail delicate convergence
conditions. Pass conds='piecewise', 'separate' or 'none' to have
these returned, respectively, as a Piecewise function, as a separate
result (i.e. result will be a tuple), or not at all (default is
'piecewise').
**Strategy**
SymPy uses various approaches to definite integration. One method is to
find an antiderivative for the integrand, and then use the fundamental
theorem of calculus. Various functions are implemented to integrate
polynomial, rational and trigonometric functions, and integrands
containing DiracDelta terms.
SymPy also implements the part of the Risch algorithm, which is a decision
procedure for integrating elementary functions, i.e., the algorithm can
either find an elementary antiderivative, or prove that one does not
exist. There is also a (very successful, albeit somewhat slow) general
implementation of the heuristic Risch algorithm. This algorithm will
eventually be phased out as more of the full Risch algorithm is
implemented. See the docstring of Integral._eval_integral() for more
details on computing the antiderivative using algebraic methods.
The option risch=True can be used to use only the (full) Risch algorithm.
This is useful if you want to know if an elementary function has an
elementary antiderivative. If the indefinite Integral returned by this
function is an instance of NonElementaryIntegral, that means that the
Risch algorithm has proven that integral to be non-elementary. Note that
by default, additional methods (such as the Meijer G method outlined
below) are tried on these integrals, as they may be expressible in terms
of special functions, so if you only care about elementary answers, use
risch=True. Also note that an unevaluated Integral returned by this
function is not necessarily a NonElementaryIntegral, even with risch=True,
as it may just be an indication that the particular part of the Risch
algorithm needed to integrate that function is not yet implemented.
Another family of strategies comes from re-writing the integrand in
terms of so-called Meijer G-functions. Indefinite integrals of a
single G-function can always be computed, and the definite integral
of a product of two G-functions can be computed from zero to
infinity. Various strategies are implemented to rewrite integrands
as G-functions, and use this information to compute integrals (see
the ``meijerint`` module).
The option manual=True can be used to use only an algorithm that tries
to mimic integration by hand. This algorithm does not handle as many
integrands as the other algorithms implemented but may return results in
a more familiar form. The ``manualintegrate`` module has functions that
return the steps used (see the module docstring for more information).
In general, the algebraic methods work best for computing
antiderivatives of (possibly complicated) combinations of elementary
functions. The G-function methods work best for computing definite
integrals from zero to infinity of moderately complicated
combinations of special functions, or indefinite integrals of very
simple combinations of special functions.
The strategy employed by the integration code is as follows:
- If computing a definite integral, and both limits are real,
and at least one limit is +- oo, try the G-function method of
definite integration first.
- Try to find an antiderivative, using all available methods, ordered
by performance (that is try fastest method first, slowest last; in
particular polynomial integration is tried first, meijer
g-functions second to last, and heuristic risch last).
- If still not successful, try G-functions irrespective of the
limits.
The option meijerg=True, False, None can be used to, respectively:
always use G-function methods and no others, never use G-function
methods, or use all available methods (in order as described above).
It defaults to None.
Examples
========
>>> from sympy import integrate, log, exp, oo
>>> from sympy.abc import a, x, y
>>> integrate(x*y, x)
x**2*y/2
>>> integrate(log(x), x)
x*log(x) - x
>>> integrate(log(x), (x, 1, a))
a*log(a) - a + 1
>>> integrate(x)
x**2/2
Terms that are independent of x are dropped by indefinite integration:
>>> from sympy import sqrt
>>> integrate(sqrt(1 + x), (x, 0, x))
2*(x + 1)**(3/2)/3 - 2/3
>>> integrate(sqrt(1 + x), x)
2*(x + 1)**(3/2)/3
>>> integrate(x*y)
Traceback (most recent call last):
...
ValueError: specify integration variables to integrate x*y
Note that ``integrate(x)`` syntax is meant only for convenience
in interactive sessions and should be avoided in library code.
>>> integrate(x**a*exp(-x), (x, 0, oo)) # same as conds='piecewise'
Piecewise((gamma(a + 1), -re(a) < 1),
(Integral(x**a*exp(-x), (x, 0, oo)), True))
>>> integrate(x**a*exp(-x), (x, 0, oo), conds='none')
gamma(a + 1)
>>> integrate(x**a*exp(-x), (x, 0, oo), conds='separate')
(gamma(a + 1), -re(a) < 1)
See Also
========
Integral, Integral.doit
"""
meijerg = kwargs.pop('meijerg', None)
conds = kwargs.pop('conds', 'piecewise')
risch = kwargs.pop('risch', None)
manual = kwargs.pop('manual', None)
integral = Integral(*args, **kwargs)
if isinstance(integral, Integral):
return integral.doit(deep=False, meijerg=meijerg, conds=conds,
risch=risch, manual=manual)
else:
return integral
@xthreaded
def line_integrate(field, curve, vars):
"""line_integrate(field, Curve, variables)
Compute the line integral.
Examples
========
>>> from sympy import Curve, line_integrate, E, ln
>>> from sympy.abc import x, y, t
>>> C = Curve([E**t + 1, E**t - 1], (t, 0, ln(2)))
>>> line_integrate(x + y, C, [x, y])
3*sqrt(2)
See Also
========
integrate, Integral
"""
F = sympify(field)
if not F:
raise ValueError(
"Expecting function specifying field as first argument.")
if not isinstance(curve, Curve):
raise ValueError("Expecting Curve entity as second argument.")
if not is_sequence(vars):
raise ValueError("Expecting ordered iterable for variables.")
if len(curve.functions) != len(vars):
raise ValueError("Field variable size does not match curve dimension.")
if curve.parameter in vars:
raise ValueError("Curve parameter clashes with field parameters.")
# Calculate derivatives for line parameter functions
# F(r) -> F(r(t)) and finally F(r(t)*r'(t))
Ft = F
dldt = 0
for i, var in enumerate(vars):
_f = curve.functions[i]
_dn = diff(_f, curve.parameter)
# ...arc length
dldt = dldt + (_dn * _dn)
Ft = Ft.subs(var, _f)
Ft = Ft * sqrt(dldt)
integral = Integral(Ft, curve.limits).doit(deep=False)
return integral
| bsd-3-clause | -3,009,226,579,436,996,000 | 37.509346 | 95 | 0.549953 | false |
rishiloyola/bedrock | bedrock/press/forms.py | 19 | 6883 | # coding: utf-8
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from django import forms
from lib.l10n_utils.dotlang import _, _lazy
from bedrock.mozorg.forms import (DateInput, EmailInput, HoneyPotWidget,
NumberInput, TelInput, TimeInput, URLInput)
SPEAKER_REQUEST_FILE_SIZE_LIMIT = 5242880 # 5MB
class SpeakerRequestForm(forms.Form):
# event fields
sr_event_name = forms.CharField(
max_length=255,
required=True,
error_messages={
'required': _lazy(u'Please enter a name for the event.'),
},
widget=forms.TextInput(
attrs={
'class': 'required',
'required': 'required',
'aria-required': 'true',
}
),
)
sr_event_url = forms.URLField(
max_length=2000,
required=True,
error_messages={
'required': _lazy(u'Please enter a URL.'),
'invalid': _lazy(u'Please enter a valid URL.'),
},
widget=URLInput(
attrs={
'class': 'required',
'required': 'required',
'aria-required': 'true',
'placeholder': _lazy(u'http://www.my-event.com'),
}
),
)
sr_event_date = forms.CharField(
required=True,
error_messages={
'required': _lazy(u'Please provide a date.'),
},
widget=DateInput(
attrs={
'class': 'required',
'required': 'required',
'aria-required': 'true',
}
),
)
sr_event_time = forms.CharField(
required=True,
error_messages={
'required': _lazy(u'Please provide a time.'),
},
widget=TimeInput(
attrs={
'class': 'required',
'required': 'required',
'aria-required': 'true',
}
),
)
sr_guest_speaker1 = forms.CharField(
max_length=200,
required=False,
)
sr_guest_speaker2 = forms.CharField(
max_length=200,
required=False,
)
# contact fields
sr_contact_name = forms.CharField(
max_length=200,
required=True,
widget=forms.TextInput(
attrs={
'required': 'required',
'class': 'required',
'aria-required': 'true',
}
),
)
sr_contact_title = forms.CharField(
max_length=200,
required=False,
)
sr_contact_company = forms.CharField(
max_length=200,
required=False,
)
sr_contact_phone = forms.CharField(
max_length=50,
required=False,
widget=TelInput(),
)
sr_contact_email = forms.EmailField(
max_length=254, # max length allowed for emails
required=True,
error_messages={
'invalid': _lazy(u'Please enter a valid email address'),
},
widget=EmailInput(
attrs={
'required': 'required',
'class': 'required',
'aria-required': 'true',
}
),
)
sr_contact_company_url = forms.URLField(
max_length=2000,
required=False,
widget=forms.TextInput(
attrs={
'placeholder': _lazy(u'http://www.my-company.com'),
}
),
)
# event details fields
sr_event_venue = forms.CharField(
max_length=400,
required=False,
)
sr_event_theme = forms.CharField(
max_length=200,
required=False,
)
sr_event_goal = forms.CharField(
max_length=300,
required=False,
)
sr_event_format = forms.CharField(
max_length=200,
required=False,
)
sr_event_audience_size = forms.IntegerField(
required=False,
widget=NumberInput(
attrs={
'min': 1,
'placeholder': 25,
}
),
)
sr_event_audience_demographics = forms.CharField(
max_length=500,
required=False,
widget=forms.Textarea(),
)
sr_event_speakers_confirmed = forms.CharField(
max_length=500,
required=False,
widget=forms.Textarea(),
)
sr_event_speakers_invited = forms.CharField(
max_length=500,
required=False,
widget=forms.Textarea(),
)
sr_event_speakers_past = forms.CharField(
max_length=1000,
required=False,
widget=forms.Textarea(),
)
sr_event_media_coverage = forms.CharField(
max_length=500,
required=False,
widget=forms.Textarea(),
)
sr_event_sponsors = forms.CharField(
max_length=500,
required=False,
widget=forms.Textarea(),
)
sr_event_confirmation_deadline = forms.DateField(
required=False,
widget=DateInput(),
)
# presentation details fields
sr_presentation_type = forms.MultipleChoiceField(
required=False,
choices=(
('keynote', _lazy(u'Keynote')),
('presentation', _lazy(u'Presentation')),
('fireside chat', _lazy(u'Fireside Chat')),
('panel', _lazy(u'Panel')),
('other', _lazy(u'Other')),
),
widget=forms.CheckboxSelectMultiple(),
)
sr_presentation_panelists = forms.CharField(
max_length=500,
required=False,
widget=forms.Textarea(),
)
sr_presentation_topic = forms.CharField(
required=False,
max_length=255,
)
sr_presentation_length = forms.IntegerField(
required=False,
widget=NumberInput(
attrs={
'min': 0.5,
'step': 0.5,
'placeholder': 2.5,
}
)
)
# additional info fields
sr_attachment = forms.FileField(
required=False,
)
# honeypot
office_fax = forms.CharField(widget=HoneyPotWidget, required=False)
def clean_sr_attachment(self):
cleaned_data = super(SpeakerRequestForm, self).clean()
attachment = cleaned_data.get("sr_attachment")
if attachment:
if attachment._size > SPEAKER_REQUEST_FILE_SIZE_LIMIT:
raise forms.ValidationError(
_("Attachment must not exceed 5MB"))
return attachment
def clean_office_fax(self):
cleaned_data = super(SpeakerRequestForm, self).clean()
honeypot = cleaned_data.pop('office_fax', None)
if honeypot:
raise forms.ValidationError(
_('Your submission could not be processed'))
| mpl-2.0 | -7,612,150,388,246,540,000 | 26.313492 | 77 | 0.527677 | false |
xiandiancloud/edxplaltfom-xusong | lms/djangoapps/shoppingcart/migrations/0005_auto__add_paidcourseregistrationannotation__add_field_orderitem_report.py | 58 | 9807 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'PaidCourseRegistrationAnnotation'
db.create_table('shoppingcart_paidcourseregistrationannotation', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('course_id', self.gf('django.db.models.fields.CharField')(unique=True, max_length=128, db_index=True)),
('annotation', self.gf('django.db.models.fields.TextField')(null=True)),
))
db.send_create_signal('shoppingcart', ['PaidCourseRegistrationAnnotation'])
# Adding field 'OrderItem.report_comments'
db.add_column('shoppingcart_orderitem', 'report_comments',
self.gf('django.db.models.fields.TextField')(default=''),
keep_default=False)
def backwards(self, orm):
# Deleting model 'PaidCourseRegistrationAnnotation'
db.delete_table('shoppingcart_paidcourseregistrationannotation')
# Deleting field 'OrderItem.report_comments'
db.delete_column('shoppingcart_orderitem', 'report_comments')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'shoppingcart.certificateitem': {
'Meta': {'object_name': 'CertificateItem', '_ormbases': ['shoppingcart.OrderItem']},
'course_enrollment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['student.CourseEnrollment']"}),
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'mode': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'orderitem_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['shoppingcart.OrderItem']", 'unique': 'True', 'primary_key': 'True'})
},
'shoppingcart.order': {
'Meta': {'object_name': 'Order'},
'bill_to_cardtype': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'bill_to_ccnum': ('django.db.models.fields.CharField', [], {'max_length': '8', 'blank': 'True'}),
'bill_to_city': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'bill_to_country': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'bill_to_first': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'bill_to_last': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'bill_to_postalcode': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}),
'bill_to_state': ('django.db.models.fields.CharField', [], {'max_length': '8', 'blank': 'True'}),
'bill_to_street1': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'bill_to_street2': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'currency': ('django.db.models.fields.CharField', [], {'default': "'usd'", 'max_length': '8'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'processor_reply_dump': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'purchase_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'cart'", 'max_length': '32'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'shoppingcart.orderitem': {
'Meta': {'object_name': 'OrderItem'},
'currency': ('django.db.models.fields.CharField', [], {'default': "'usd'", 'max_length': '8'}),
'fulfilled_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'line_desc': ('django.db.models.fields.CharField', [], {'default': "'Misc. Item'", 'max_length': '1024'}),
'order': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['shoppingcart.Order']"}),
'qty': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'report_comments': ('django.db.models.fields.TextField', [], {'default': "''"}),
'status': ('django.db.models.fields.CharField', [], {'default': "'cart'", 'max_length': '32'}),
'unit_cost': ('django.db.models.fields.DecimalField', [], {'default': '0.0', 'max_digits': '30', 'decimal_places': '2'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'shoppingcart.paidcourseregistration': {
'Meta': {'object_name': 'PaidCourseRegistration', '_ormbases': ['shoppingcart.OrderItem']},
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'mode': ('django.db.models.fields.SlugField', [], {'default': "'honor'", 'max_length': '50'}),
'orderitem_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['shoppingcart.OrderItem']", 'unique': 'True', 'primary_key': 'True'})
},
'shoppingcart.paidcourseregistrationannotation': {
'Meta': {'object_name': 'PaidCourseRegistrationAnnotation'},
'annotation': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'course_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'student.courseenrollment': {
'Meta': {'ordering': "('user', 'course_id')", 'unique_together': "(('user', 'course_id'),)", 'object_name': 'CourseEnrollment'},
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'mode': ('django.db.models.fields.CharField', [], {'default': "'honor'", 'max_length': '100'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
}
}
complete_apps = ['shoppingcart'] | agpl-3.0 | -7,644,577,462,375,307,000 | 73.30303 | 182 | 0.563169 | false |
intgr/django | django/conf/locale/nb/formats.py | 65 | 1699 | # This file is distributed under the same license as the Django package.
#
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j. F Y'
TIME_FORMAT = 'H:i'
DATETIME_FORMAT = 'j. F Y H:i'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'j. F'
SHORT_DATE_FORMAT = 'd.m.Y'
SHORT_DATETIME_FORMAT = 'd.m.Y H:i'
FIRST_DAY_OF_WEEK = 1 # Monday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
# Kept ISO formats as they are in first position
DATE_INPUT_FORMATS = [
'%Y-%m-%d', '%d.%m.%Y', '%d.%m.%y', # '2006-10-25', '25.10.2006', '25.10.06'
# '%d. %b %Y', '%d %b %Y', # '25. okt 2006', '25 okt 2006'
# '%d. %b. %Y', '%d %b. %Y', # '25. okt. 2006', '25 okt. 2006'
# '%d. %B %Y', '%d %B %Y', # '25. oktober 2006', '25 oktober 2006'
]
DATETIME_INPUT_FORMATS = [
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M:%S.%f', # '2006-10-25 14:30:59.000200'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
'%d.%m.%Y %H:%M:%S', # '25.10.2006 14:30:59'
'%d.%m.%Y %H:%M:%S.%f', # '25.10.2006 14:30:59.000200'
'%d.%m.%Y %H:%M', # '25.10.2006 14:30'
'%d.%m.%Y', # '25.10.2006'
'%d.%m.%y %H:%M:%S', # '25.10.06 14:30:59'
'%d.%m.%y %H:%M:%S.%f', # '25.10.06 14:30:59.000200'
'%d.%m.%y %H:%M', # '25.10.06 14:30'
'%d.%m.%y', # '25.10.06'
]
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '\xa0' # non-breaking space
NUMBER_GROUPING = 3
| bsd-3-clause | 3,912,284,169,426,064,000 | 42.564103 | 81 | 0.520306 | false |
SickRage/SickRage | sickrage/libs/trakt/core/emitter.py | 7 | 6294 | from __future__ import absolute_import, division, print_function
import logging
# concurrent.futures is optional
try:
from concurrent.futures import ThreadPoolExecutor
except ImportError:
ThreadPoolExecutor = None
log = logging.getLogger(__name__)
class Emitter(object):
threading = False
threading_workers = 2
__constructed = False
__name = None
__callbacks = None
__threading_pool = None
def __ensure_constructed(self):
if self.__constructed:
return
self.__callbacks = {}
self.__constructed = True
if self.threading:
if ThreadPoolExecutor is None:
raise Exception('concurrent.futures is required for threading')
self.__threading_pool = ThreadPoolExecutor(max_workers=self.threading_workers)
def __log(self, message, *args, **kwargs):
if self.__name is None:
self.__name = '%s.%s' % (
self.__module__,
self.__class__.__name__
)
log.debug(
('[%s]:' % self.__name.ljust(34)) + str(message),
*args, **kwargs
)
def __wrap(self, callback, *args, **kwargs):
def wrap(func):
callback(func=func, *args, **kwargs)
return func
return wrap
def on(self, events, func=None, on_bound=None):
if not func:
# assume decorator, wrap
return self.__wrap(self.on, events, on_bound=on_bound)
if not isinstance(events, (list, tuple)):
events = [events]
self.__log('on(events: %s, func: %s)', repr(events), repr(func))
self.__ensure_constructed()
for event in events:
if event not in self.__callbacks:
self.__callbacks[event] = []
# Bind callback to event
self.__callbacks[event].append(func)
# Call 'on_bound' callback
if on_bound:
self.__call(on_bound, kwargs={
'func': func
})
return self
def once(self, event, func=None):
if not func:
# assume decorator, wrap
return self.__wrap(self.once, event)
self.__log('once(event: %s, func: %s)', repr(event), repr(func))
def once_callback(*args, **kwargs):
self.off(event, once_callback)
func(*args, **kwargs)
self.on(event, once_callback)
return self
def off(self, event=None, func=None):
self.__log('off(event: %s, func: %s)', repr(event), repr(func))
self.__ensure_constructed()
if event and event not in self.__callbacks:
return self
if func and func not in self.__callbacks[event]:
return self
if event and func:
self.__callbacks[event].remove(func)
elif event:
self.__callbacks[event] = []
elif func:
raise ValueError('"event" is required if "func" is specified')
else:
self.__callbacks = {}
return self
def emit(self, event, *args, **kwargs):
suppress = kwargs.pop('__suppress', False)
if not suppress:
self.__log('emit(event: %s, args: %s, kwargs: %s)', repr(event), repr_trim(args), repr_trim(kwargs))
self.__ensure_constructed()
if event not in self.__callbacks:
return
for callback in list(self.__callbacks[event]):
self.__call(callback, args, kwargs, event)
return self
def emit_on(self, event, *args, **kwargs):
func = kwargs.pop('func', None)
if not func:
# assume decorator, wrap
return self.__wrap(self.emit_on, event, *args, **kwargs)
self.__log('emit_on(event: %s, func: %s, args: %s, kwargs: %s)',
repr(event), repr(func), repr(args), repr(kwargs))
# Bind func from wrapper
self.on(event, func)
# Emit event (calling 'func')
self.emit(event, *args, **kwargs)
def pipe(self, events, other):
if type(events) is not list:
events = [events]
self.__log('pipe(events: %s, other: %s)', repr(events), repr(other))
self.__ensure_constructed()
for event in events:
self.on(event, PipeHandler(event, other.emit))
return self
def __call(self, callback, args=None, kwargs=None, event=None):
args = args or ()
kwargs = kwargs or {}
if self.threading:
return self.__call_async(callback, args, kwargs, event)
return self.__call_sync(callback, args, kwargs, event)
@classmethod
def __call_sync(cls, callback, args=None, kwargs=None, event=None):
try:
callback(*args, **kwargs)
return True
except Exception as ex:
log.warn('[%s] Exception raised in: %s - %s' % (event, cls.__function_name(callback), ex), exc_info=True)
return False
def __call_async(self, callback, args=None, kwargs=None, event=None):
self.__threading_pool.submit(self.__call_sync, callback, args, kwargs, event)
@staticmethod
def __function_name(func):
fragments = []
# Try append class name
cls = getattr(func, 'im_class', None)
if cls and hasattr(cls, '__name__'):
fragments.append(cls.__name__)
# Append function name
fragments.append(func.__name__)
return '.'.join(fragments)
class PipeHandler(object):
def __init__(self, event, callback):
self.event = event
self.callback = callback
def __call__(self, *args, **kwargs):
self.callback(self.event, *args, **kwargs)
def on(emitter, event, func=None):
emitter.on(event, func)
return {
'destroy': lambda: emitter.off(event, func)
}
def once(emitter, event, func=None):
return emitter.once(event, func)
def off(emitter, event, func=None):
return emitter.off(event, func)
def emit(emitter, event, *args, **kwargs):
return emitter.emit(event, *args, **kwargs)
def repr_trim(value, length=1000):
value = repr(value)
if len(value) < length:
return value
return '<%s - %s characters>' % (type(value).__name__, len(value))
| gpl-3.0 | 6,182,695,384,060,411,000 | 25.445378 | 117 | 0.557356 | false |
chaluemwut/fbserver | venv/lib/python2.7/site-packages/sklearn/neighbors/base.py | 1 | 24541 | """Base and mixin classes for nearest neighbors"""
# Authors: Jake Vanderplas <vanderplas@astro.washington.edu>
# Fabian Pedregosa <fabian.pedregosa@inria.fr>
# Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Sparseness support by Lars Buitinck <L.J.Buitinck@uva.nl>
# Multi-output support by Arnaud Joly <a.joly@ulg.ac.be>
#
# License: BSD 3 clause (C) INRIA, University of Amsterdam
import warnings
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import csr_matrix, issparse
from .ball_tree import BallTree
from .kd_tree import KDTree
from ..base import BaseEstimator
from ..metrics import pairwise_distances
from ..metrics.pairwise import PAIRWISE_DISTANCE_FUNCTIONS
from ..utils import safe_asarray, atleast2d_or_csr, check_arrays
from ..utils.fixes import argpartition
from ..utils.validation import DataConversionWarning
from ..externals import six
VALID_METRICS = dict(ball_tree=BallTree.valid_metrics,
kd_tree=KDTree.valid_metrics,
# The following list comes from the
# sklearn.metrics.pairwise doc string
brute=(list(PAIRWISE_DISTANCE_FUNCTIONS.keys()) +
['braycurtis', 'canberra', 'chebyshev',
'correlation', 'cosine', 'dice', 'hamming',
'jaccard', 'kulsinski', 'mahalanobis',
'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener',
'sokalsneath', 'sqeuclidean',
'yule', 'wminkowski']))
VALID_METRICS_SPARSE = dict(ball_tree=[],
kd_tree=[],
brute=PAIRWISE_DISTANCE_FUNCTIONS.keys())
class NeighborsWarning(UserWarning):
pass
# Make sure that NeighborsWarning are displayed more than once
warnings.simplefilter("always", NeighborsWarning)
def _check_weights(weights):
"""Check to make sure weights are valid"""
if weights in (None, 'uniform', 'distance'):
return weights
elif callable(weights):
return weights
else:
raise ValueError("weights not recognized: should be 'uniform', "
"'distance', or a callable function")
def _get_weights(dist, weights):
"""Get the weights from an array of distances and a parameter ``weights``
Parameters
===========
dist: ndarray
The input distances
weights: {'uniform', 'distance' or a callable}
The kind of weighting used
Returns
========
weights_arr: array of the same shape as ``dist``
if ``weights == 'uniform'``, then returns None
"""
if weights in (None, 'uniform'):
return None
elif weights == 'distance':
with np.errstate(divide='ignore'):
dist = 1. / dist
return dist
elif callable(weights):
return weights(dist)
else:
raise ValueError("weights not recognized: should be 'uniform', "
"'distance', or a callable function")
class NeighborsBase(six.with_metaclass(ABCMeta, BaseEstimator)):
"""Base class for nearest neighbors estimators."""
@abstractmethod
def __init__(self):
pass
def _init_params(self, n_neighbors=None, radius=None,
algorithm='auto', leaf_size=30, metric='minkowski',
p=2, metric_params=None, **kwargs):
if kwargs:
warnings.warn("Passing additional arguments to the metric "
"function as **kwargs is deprecated "
"and will no longer be supported in 0.18. "
"Use metric_params instead.",
DeprecationWarning, stacklevel=3)
if metric_params is None:
metric_params = {}
metric_params.update(kwargs)
self.n_neighbors = n_neighbors
self.radius = radius
self.algorithm = algorithm
self.leaf_size = leaf_size
self.metric = metric
self.metric_params = metric_params
self.p = p
if algorithm not in ['auto', 'brute',
'kd_tree', 'ball_tree']:
raise ValueError("unrecognized algorithm: '%s'" % algorithm)
if algorithm == 'auto':
alg_check = 'ball_tree'
else:
alg_check = algorithm
if callable(metric):
if algorithm == 'kd_tree':
# callable metric is only valid for brute force and ball_tree
raise ValueError(
"kd_tree algorithm does not support callable metric '%s'"
% metric)
elif metric not in VALID_METRICS[alg_check]:
raise ValueError("Metric '%s' not valid for algorithm '%s'"
% (metric, algorithm))
if self.metric_params is not None and 'p' in self.metric_params:
warnings.warn("Parameter p is found in metric_params. "
"The corresponding parameter from __init__ "
"is ignored.", SyntaxWarning, stacklevel=3)
effective_p = metric_params['p']
else:
effective_p = self.p
if self.metric in ['wminkowski', 'minkowski'] and effective_p < 1:
raise ValueError("p must be greater than one for minkowski metric")
self._fit_X = None
self._tree = None
self._fit_method = None
def _fit(self, X):
if self.metric_params is None:
self.effective_metric_params_ = {}
else:
self.effective_metric_params_ = self.metric_params.copy()
effective_p = self.effective_metric_params_.get('p', self.p)
if self.metric in ['wminkowski', 'minkowski']:
self.effective_metric_params_['p'] = effective_p
self.effective_metric_ = self.metric
# For minkowski distance, use more efficient methods where available
if self.metric == 'minkowski':
p = self.effective_metric_params_.pop('p', 2)
if p < 1:
raise ValueError("p must be greater than one "
"for minkowski metric")
elif p == 1:
self.effective_metric_ = 'manhattan'
elif p == 2:
self.effective_metric_ = 'euclidean'
elif p == np.inf:
self.effective_metric_ = 'chebyshev'
else:
self.effective_metric_params_['p'] = p
if isinstance(X, NeighborsBase):
self._fit_X = X._fit_X
self._tree = X._tree
self._fit_method = X._fit_method
return self
elif isinstance(X, BallTree):
self._fit_X = X.data
self._tree = X
self._fit_method = 'ball_tree'
return self
elif isinstance(X, KDTree):
self._fit_X = X.data
self._tree = X
self._fit_method = 'kd_tree'
return self
X = atleast2d_or_csr(X, copy=False)
n_samples = X.shape[0]
if n_samples == 0:
raise ValueError("n_samples must be greater than 0")
if issparse(X):
if self.algorithm not in ('auto', 'brute'):
warnings.warn("cannot use tree with sparse input: "
"using brute force")
if self.effective_metric_ not in VALID_METRICS_SPARSE['brute']:
raise ValueError("metric '%s' not valid for sparse input"
% self.effective_metric_)
self._fit_X = X.copy()
self._tree = None
self._fit_method = 'brute'
return self
self._fit_method = self.algorithm
self._fit_X = X
if self._fit_method == 'auto':
# A tree approach is better for small number of neighbors,
# and KDTree is generally faster when available
if (self.n_neighbors is None
or self.n_neighbors < self._fit_X.shape[0] // 2):
if self.effective_metric_ in VALID_METRICS['kd_tree']:
self._fit_method = 'kd_tree'
else:
self._fit_method = 'ball_tree'
else:
self._fit_method = 'brute'
if self._fit_method == 'ball_tree':
self._tree = BallTree(X, self.leaf_size,
metric=self.effective_metric_,
**self.effective_metric_params_)
elif self._fit_method == 'kd_tree':
self._tree = KDTree(X, self.leaf_size,
metric=self.effective_metric_,
**self.effective_metric_params_)
elif self._fit_method == 'brute':
self._tree = None
else:
raise ValueError("algorithm = '%s' not recognized"
% self.algorithm)
return self
class KNeighborsMixin(object):
"""Mixin for k-neighbors searches"""
def kneighbors(self, X, n_neighbors=None, return_distance=True):
"""Finds the K-neighbors of a point.
Returns distance
Parameters
----------
X : array-like, last dimension same as that of fit data
The new point.
n_neighbors : int
Number of neighbors to get (default is the value
passed to the constructor).
return_distance : boolean, optional. Defaults to True.
If False, distances will not be returned
Returns
-------
dist : array
Array representing the lengths to point, only present if
return_distance=True
ind : array
Indices of the nearest points in the population matrix.
Examples
--------
In the following example, we construct a NeighborsClassifier
class from an array representing our data set and ask who's
the closest point to [1,1,1]
>>> samples = [[0., 0., 0.], [0., .5, 0.], [1., 1., .5]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(n_neighbors=1)
>>> neigh.fit(samples) # doctest: +ELLIPSIS
NearestNeighbors(algorithm='auto', leaf_size=30, ...)
>>> print(neigh.kneighbors([1., 1., 1.])) # doctest: +ELLIPSIS
(array([[ 0.5]]), array([[2]]...))
As you can see, it returns [[0.5]], and [[2]], which means that the
element is at distance 0.5 and is the third element of samples
(indexes start at 0). You can also query for multiple points:
>>> X = [[0., 1., 0.], [1., 0., 1.]]
>>> neigh.kneighbors(X, return_distance=False) # doctest: +ELLIPSIS
array([[1],
[2]]...)
"""
if self._fit_method is None:
raise ValueError("must fit neighbors before querying")
X = atleast2d_or_csr(X)
if n_neighbors is None:
n_neighbors = self.n_neighbors
if self._fit_method == 'brute':
# for efficiency, use squared euclidean distances
if self.effective_metric_ == 'euclidean':
dist = pairwise_distances(X, self._fit_X, 'euclidean',
squared=True)
else:
dist = pairwise_distances(X, self._fit_X,
self.effective_metric_,
**self.effective_metric_params_)
neigh_ind = argpartition(dist, n_neighbors - 1, axis=1)
neigh_ind = neigh_ind[:, :n_neighbors]
# argpartition doesn't guarantee sorted order, so we sort again
j = np.arange(neigh_ind.shape[0])[:, None]
neigh_ind = neigh_ind[j, np.argsort(dist[j, neigh_ind])]
if return_distance:
if self.effective_metric_ == 'euclidean':
return np.sqrt(dist[j, neigh_ind]), neigh_ind
else:
return dist[j, neigh_ind], neigh_ind
else:
return neigh_ind
elif self._fit_method in ['ball_tree', 'kd_tree']:
result = self._tree.query(X, n_neighbors,
return_distance=return_distance)
return result
else:
raise ValueError("internal: _fit_method not recognized")
def kneighbors_graph(self, X, n_neighbors=None,
mode='connectivity'):
"""Computes the (weighted) graph of k-Neighbors for points in X
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Sample data
n_neighbors : int
Number of neighbors for each sample.
(default is value passed to the constructor).
mode : {'connectivity', 'distance'}, optional
Type of returned matrix: 'connectivity' will return the
connectivity matrix with ones and zeros, in 'distance' the
edges are Euclidean distance between points.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples_fit]
n_samples_fit is the number of samples in the fitted data
A[i, j] is assigned the weight of edge that connects i to j.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(n_neighbors=2)
>>> neigh.fit(X) # doctest: +ELLIPSIS
NearestNeighbors(algorithm='auto', leaf_size=30, ...)
>>> A = neigh.kneighbors_graph(X)
>>> A.toarray()
array([[ 1., 0., 1.],
[ 0., 1., 1.],
[ 1., 0., 1.]])
See also
--------
NearestNeighbors.radius_neighbors_graph
"""
X = safe_asarray(X)
if n_neighbors is None:
n_neighbors = self.n_neighbors
n_samples1 = X.shape[0]
n_samples2 = self._fit_X.shape[0]
n_nonzero = n_samples1 * n_neighbors
A_indptr = np.arange(0, n_nonzero + 1, n_neighbors)
# construct CSR matrix representation of the k-NN graph
if mode == 'connectivity':
A_data = np.ones((n_samples1, n_neighbors))
A_ind = self.kneighbors(X, n_neighbors, return_distance=False)
elif mode == 'distance':
data, ind = self.kneighbors(X, n_neighbors + 1,
return_distance=True)
A_data, A_ind = data[:, 1:], ind[:, 1:]
else:
raise ValueError(
'Unsupported mode, must be one of "connectivity" '
'or "distance" but got "%s" instead' % mode)
return csr_matrix((A_data.ravel(), A_ind.ravel(), A_indptr),
shape=(n_samples1, n_samples2))
class RadiusNeighborsMixin(object):
"""Mixin for radius-based neighbors searches"""
def radius_neighbors(self, X, radius=None, return_distance=True):
"""Finds the neighbors within a given radius of a point or points.
Returns indices of and distances to the neighbors of each point.
Parameters
----------
X : array-like, last dimension same as that of fit data
The new point or points
radius : float
Limiting distance of neighbors to return.
(default is the value passed to the constructor).
return_distance : boolean, optional. Defaults to True.
If False, distances will not be returned
Returns
-------
dist : array
Array representing the euclidean distances to each point,
only present if return_distance=True.
ind : array
Indices of the nearest points in the population matrix.
Examples
--------
In the following example, we construct a NeighborsClassifier
class from an array representing our data set and ask who's
the closest point to [1,1,1]
>>> samples = [[0., 0., 0.], [0., .5, 0.], [1., 1., .5]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(radius=1.6)
>>> neigh.fit(samples) # doctest: +ELLIPSIS
NearestNeighbors(algorithm='auto', leaf_size=30, ...)
>>> print(neigh.radius_neighbors([1., 1., 1.])) # doctest: +ELLIPSIS
(array([[ 1.5, 0.5]]...), array([[1, 2]]...)
The first array returned contains the distances to all points which
are closer than 1.6, while the second array returned contains their
indices. In general, multiple points can be queried at the same time.
Notes
-----
Because the number of neighbors of each point is not necessarily
equal, the results for multiple query points cannot be fit in a
standard data array.
For efficiency, `radius_neighbors` returns arrays of objects, where
each object is a 1D array of indices or distances.
"""
if self._fit_method is None:
raise ValueError("must fit neighbors before querying")
X = atleast2d_or_csr(X)
if radius is None:
radius = self.radius
if self._fit_method == 'brute':
# for efficiency, use squared euclidean distances
if self.effective_metric_ == 'euclidean':
dist = pairwise_distances(X, self._fit_X, 'euclidean',
squared=True)
radius *= radius
else:
dist = pairwise_distances(X, self._fit_X,
self.effective_metric_,
**self.effective_metric_params_)
neigh_ind = [np.where(d < radius)[0] for d in dist]
# if there are the same number of neighbors for each point,
# we can do a normal array. Otherwise, we return an object
# array with elements that are numpy arrays
try:
neigh_ind = np.asarray(neigh_ind, dtype=int)
dtype_F = float
except ValueError:
neigh_ind = np.asarray(neigh_ind, dtype='object')
dtype_F = object
if return_distance:
if self.effective_metric_ == 'euclidean':
dist = np.array([np.sqrt(d[neigh_ind[i]])
for i, d in enumerate(dist)],
dtype=dtype_F)
else:
dist = np.array([d[neigh_ind[i]]
for i, d in enumerate(dist)],
dtype=dtype_F)
return dist, neigh_ind
else:
return neigh_ind
elif self._fit_method in ['ball_tree', 'kd_tree']:
results = self._tree.query_radius(X, radius,
return_distance=return_distance)
if return_distance:
ind, dist = results
return dist, ind
else:
return results
else:
raise ValueError("internal: _fit_method not recognized")
def radius_neighbors_graph(self, X, radius=None, mode='connectivity'):
"""Computes the (weighted) graph of Neighbors for points in X
Neighborhoods are restricted the points at a distance lower than
radius.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Sample data
radius : float
Radius of neighborhoods.
(default is the value passed to the constructor).
mode : {'connectivity', 'distance'}, optional
Type of returned matrix: 'connectivity' will return the
connectivity matrix with ones and zeros, in 'distance' the
edges are Euclidean distance between points.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples]
A[i, j] is assigned the weight of edge that connects i to j.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(radius=1.5)
>>> neigh.fit(X) # doctest: +ELLIPSIS
NearestNeighbors(algorithm='auto', leaf_size=30, ...)
>>> A = neigh.radius_neighbors_graph(X)
>>> A.toarray()
array([[ 1., 0., 1.],
[ 0., 1., 0.],
[ 1., 0., 1.]])
See also
--------
kneighbors_graph
"""
X = safe_asarray(X)
if radius is None:
radius = self.radius
n_samples1 = X.shape[0]
n_samples2 = self._fit_X.shape[0]
# construct CSR matrix representation of the NN graph
if mode == 'connectivity':
A_ind = self.radius_neighbors(X, radius,
return_distance=False)
A_data = None
elif mode == 'distance':
dist, A_ind = self.radius_neighbors(X, radius,
return_distance=True)
A_data = np.concatenate(list(dist))
else:
raise ValueError(
'Unsupported mode, must be one of "connectivity", '
'or "distance" but got %s instead' % mode)
n_neighbors = np.array([len(a) for a in A_ind])
n_nonzero = np.sum(n_neighbors)
if A_data is None:
A_data = np.ones(n_nonzero)
A_ind = np.concatenate(list(A_ind))
A_indptr = np.concatenate((np.zeros(1, dtype=int),
np.cumsum(n_neighbors)))
return csr_matrix((A_data, A_ind, A_indptr),
shape=(n_samples1, n_samples2))
class SupervisedFloatMixin(object):
def fit(self, X, y):
"""Fit the model using X as training data and y as target values
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree}
Training data. If array or matrix, shape = [n_samples, n_features]
y : {array-like, sparse matrix}
Target values, array of float values, shape = [n_samples]
or [n_samples, n_outputs]
"""
if not isinstance(X, (KDTree, BallTree)):
X, y = check_arrays(X, y, sparse_format="csr")
self._y = y
return self._fit(X)
class SupervisedIntegerMixin(object):
def fit(self, X, y):
"""Fit the model using X as training data and y as target values
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree}
Training data. If array or matrix, shape = [n_samples, n_features]
y : {array-like, sparse matrix}
Target values of shape = [n_samples] or [n_samples, n_outputs]
"""
if not isinstance(X, (KDTree, BallTree)):
X, y = check_arrays(X, y, sparse_format="csr")
if y.ndim == 1 or y.ndim == 2 and y.shape[1] == 1:
if y.ndim != 1:
warnings.warn("A column-vector y was passed when a 1d array "
"was expected. Please change the shape of y to "
"(n_samples, ), for example using ravel().",
DataConversionWarning, stacklevel=2)
self.outputs_2d_ = False
y = y.reshape((-1, 1))
else:
self.outputs_2d_ = True
self.classes_ = []
self._y = np.empty(y.shape, dtype=np.int)
for k in range(self._y.shape[1]):
classes, self._y[:, k] = np.unique(y[:, k], return_inverse=True)
self.classes_.append(classes)
if not self.outputs_2d_:
self.classes_ = self.classes_[0]
self._y = self._y.ravel()
return self._fit(X)
class UnsupervisedMixin(object):
def fit(self, X, y=None):
"""Fit the model using X as training data
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree}
Training data. If array or matrix, shape = [n_samples, n_features]
"""
return self._fit(X)
| apache-2.0 | -4,065,047,931,965,230,600 | 36.12708 | 79 | 0.533149 | false |
tedelhourani/ansible | test/units/module_utils/facts/test_collector.py | 13 | 16812 | # This file is part of Ansible
# -*- coding: utf-8 -*-
#
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
# Make coding more python3-ish
from __future__ import (absolute_import, division)
__metaclass__ = type
from collections import defaultdict
# for testing
from ansible.compat.tests import unittest
from ansible.module_utils.facts import collector
from ansible.module_utils.facts import default_collectors
class TestFindCollectorsForPlatform(unittest.TestCase):
def test(self):
compat_platforms = [{'system': 'Generic'}]
res = collector.find_collectors_for_platform(default_collectors.collectors,
compat_platforms)
for coll_class in res:
self.assertIn(coll_class._platform, ('Generic'))
def test_linux(self):
compat_platforms = [{'system': 'Linux'}]
res = collector.find_collectors_for_platform(default_collectors.collectors,
compat_platforms)
for coll_class in res:
self.assertIn(coll_class._platform, ('Linux'))
def test_linux_or_generic(self):
compat_platforms = [{'system': 'Generic'}, {'system': 'Linux'}]
res = collector.find_collectors_for_platform(default_collectors.collectors,
compat_platforms)
for coll_class in res:
self.assertIn(coll_class._platform, ('Generic', 'Linux'))
class TestSelectCollectorNames(unittest.TestCase):
def test(self):
collector_names = set(['distribution', 'all_ipv4_addresses',
'local', 'pkg_mgr'])
all_fact_subsets = self._all_fact_subsets()
all_collector_classes = self._all_collector_classes()
res = collector.select_collector_classes(collector_names,
all_fact_subsets,
all_collector_classes)
expected = [default_collectors.DistributionFactCollector,
default_collectors.PkgMgrFactCollector]
self.assertEqual(res, expected)
def test_reverse(self):
collector_names = set(['distribution', 'all_ipv4_addresses',
'local', 'pkg_mgr'])
all_fact_subsets = self._all_fact_subsets()
all_collector_classes = self._all_collector_classes()
all_collector_classes.reverse()
res = collector.select_collector_classes(collector_names,
all_fact_subsets,
all_collector_classes)
expected = [default_collectors.PkgMgrFactCollector,
default_collectors.DistributionFactCollector]
self.assertEqual(res, expected)
def test_default_collectors(self):
platform_info = {'system': 'Generic'}
compat_platforms = [platform_info]
collectors_for_platform = collector.find_collectors_for_platform(default_collectors.collectors,
compat_platforms)
all_fact_subsets, aliases_map = collector.build_fact_id_to_collector_map(collectors_for_platform)
all_valid_subsets = frozenset(all_fact_subsets.keys())
collector_names = collector.get_collector_names(valid_subsets=all_valid_subsets,
aliases_map=aliases_map,
platform_info=platform_info)
collector.select_collector_classes(collector_names,
all_fact_subsets,
default_collectors.collectors)
def _all_collector_classes(self):
return [default_collectors.DistributionFactCollector,
default_collectors.PkgMgrFactCollector,
default_collectors.LinuxNetworkCollector]
def _all_fact_subsets(self, data=None):
all_fact_subsets = defaultdict(list)
_data = {'pkg_mgr': [default_collectors.PkgMgrFactCollector],
'distribution': [default_collectors.DistributionFactCollector],
'network': [default_collectors.LinuxNetworkCollector]}
data = data or _data
for key, value in data.items():
all_fact_subsets[key] = value
return all_fact_subsets
class TestGetCollectorNames(unittest.TestCase):
def test_none(self):
res = collector.get_collector_names()
self.assertIsInstance(res, set)
self.assertEqual(res, set([]))
def test_empty_sets(self):
res = collector.get_collector_names(valid_subsets=frozenset([]),
minimal_gather_subset=frozenset([]),
gather_subset=[])
self.assertIsInstance(res, set)
self.assertEqual(res, set([]))
def test_empty_valid_and_min_with_all_gather_subset(self):
res = collector.get_collector_names(valid_subsets=frozenset([]),
minimal_gather_subset=frozenset([]),
gather_subset=['all'])
self.assertIsInstance(res, set)
self.assertEqual(res, set([]))
def test_one_valid_with_all_gather_subset(self):
valid_subsets = frozenset(['my_fact'])
res = collector.get_collector_names(valid_subsets=valid_subsets,
minimal_gather_subset=frozenset([]),
gather_subset=['all'])
self.assertIsInstance(res, set)
self.assertEqual(res, set(['my_fact']))
def _compare_res(self, gather_subset1, gather_subset2,
valid_subsets=None, min_subset=None):
valid_subsets = valid_subsets or frozenset()
minimal_gather_subset = min_subset or frozenset()
res1 = collector.get_collector_names(valid_subsets=valid_subsets,
minimal_gather_subset=minimal_gather_subset,
gather_subset=gather_subset1)
res2 = collector.get_collector_names(valid_subsets=valid_subsets,
minimal_gather_subset=minimal_gather_subset,
gather_subset=gather_subset2)
return res1, res2
def test_not_all_other_order(self):
valid_subsets = frozenset(['min_fact', 'something_else', 'whatever'])
minimal_gather_subset = frozenset(['min_fact'])
res1, res2 = self._compare_res(['!all', 'whatever'],
['whatever', '!all'],
valid_subsets=valid_subsets,
min_subset=minimal_gather_subset)
self.assertEqual(res1, res2)
self.assertEqual(res1, set(['min_fact', 'whatever']))
def test_not_all_other_order_min(self):
valid_subsets = frozenset(['min_fact', 'something_else', 'whatever'])
minimal_gather_subset = frozenset(['min_fact'])
res1, res2 = self._compare_res(['!min_fact', 'whatever'],
['whatever', '!min_fact'],
valid_subsets=valid_subsets,
min_subset=minimal_gather_subset)
self.assertEqual(res1, res2)
self.assertEqual(res1, set(['whatever']))
def test_one_minimal_with_all_gather_subset(self):
my_fact = 'my_fact'
valid_subsets = frozenset([my_fact])
minimal_gather_subset = valid_subsets
res = collector.get_collector_names(valid_subsets=valid_subsets,
minimal_gather_subset=minimal_gather_subset,
gather_subset=['all'])
self.assertIsInstance(res, set)
self.assertEqual(res, set(['my_fact']))
def test_with_all_gather_subset(self):
valid_subsets = frozenset(['my_fact', 'something_else', 'whatever'])
minimal_gather_subset = frozenset(['my_fact'])
# even with '!all', the minimal_gather_subset should be returned
res = collector.get_collector_names(valid_subsets=valid_subsets,
minimal_gather_subset=minimal_gather_subset,
gather_subset=['all'])
self.assertIsInstance(res, set)
self.assertEqual(res, set(['my_fact', 'something_else', 'whatever']))
def test_one_minimal_with_not_all_gather_subset(self):
valid_subsets = frozenset(['my_fact', 'something_else', 'whatever'])
minimal_gather_subset = frozenset(['my_fact'])
# even with '!all', the minimal_gather_subset should be returned
res = collector.get_collector_names(valid_subsets=valid_subsets,
minimal_gather_subset=minimal_gather_subset,
gather_subset=['!all'])
self.assertIsInstance(res, set)
self.assertEqual(res, set(['my_fact']))
def test_gather_subset_excludes(self):
valid_subsets = frozenset(['my_fact', 'something_else', 'whatever'])
minimal_gather_subset = frozenset(['min_fact', 'min_another'])
# even with '!all', the minimal_gather_subset should be returned
res = collector.get_collector_names(valid_subsets=valid_subsets,
minimal_gather_subset=minimal_gather_subset,
# gather_subset=set(['all', '!my_fact', '!whatever']))
# gather_subset=['all', '!my_fact', '!whatever'])
gather_subset=['!min_fact', '!whatever'])
self.assertIsInstance(res, set)
# min_another is in minimal_gather_subset, so always returned
self.assertEqual(res, set(['min_another']))
def test_gather_subset_excludes_ordering(self):
valid_subsets = frozenset(['my_fact', 'something_else', 'whatever'])
minimal_gather_subset = frozenset(['my_fact'])
res = collector.get_collector_names(valid_subsets=valid_subsets,
minimal_gather_subset=minimal_gather_subset,
gather_subset=['!all', 'whatever'])
self.assertIsInstance(res, set)
# excludes are higher precedence than includes, so !all excludes everything
# and then minimal_gather_subset is added. so '!all', 'other' == '!all'
self.assertEqual(res, set(['my_fact', 'whatever']))
def test_gather_subset_excludes_min(self):
valid_subsets = frozenset(['min_fact', 'something_else', 'whatever'])
minimal_gather_subset = frozenset(['min_fact'])
res = collector.get_collector_names(valid_subsets=valid_subsets,
minimal_gather_subset=minimal_gather_subset,
gather_subset=['whatever', '!min'])
self.assertIsInstance(res, set)
# excludes are higher precedence than includes, so !all excludes everything
# and then minimal_gather_subset is added. so '!all', 'other' == '!all'
self.assertEqual(res, set(['whatever']))
def test_gather_subset_excludes_min_and_all(self):
valid_subsets = frozenset(['min_fact', 'something_else', 'whatever'])
minimal_gather_subset = frozenset(['min_fact'])
res = collector.get_collector_names(valid_subsets=valid_subsets,
minimal_gather_subset=minimal_gather_subset,
gather_subset=['whatever', '!all', '!min'])
self.assertIsInstance(res, set)
# excludes are higher precedence than includes, so !all excludes everything
# and then minimal_gather_subset is added. so '!all', 'other' == '!all'
self.assertEqual(res, set(['whatever']))
def test_invaid_gather_subset(self):
valid_subsets = frozenset(['my_fact', 'something_else'])
minimal_gather_subset = frozenset(['my_fact'])
self.assertRaisesRegexp(TypeError,
'Bad subset .* given to Ansible.*allowed\:.*all,.*my_fact.*',
collector.get_collector_names,
valid_subsets=valid_subsets,
minimal_gather_subset=minimal_gather_subset,
gather_subset=['my_fact', 'not_a_valid_gather_subset'])
class TestCollectorClassesFromGatherSubset(unittest.TestCase):
def _classes(self,
all_collector_classes=None,
valid_subsets=None,
minimal_gather_subset=None,
gather_subset=None,
gather_timeout=None):
return collector.collector_classes_from_gather_subset(all_collector_classes=all_collector_classes,
valid_subsets=valid_subsets,
minimal_gather_subset=minimal_gather_subset,
gather_subset=gather_subset,
gather_timeout=gather_timeout)
def test_no_args(self):
res = self._classes()
self.assertIsInstance(res, list)
self.assertEqual(res, [])
def test(self):
res = self._classes(all_collector_classes=default_collectors.collectors,
gather_subset=['!all'])
self.assertIsInstance(res, list)
self.assertEqual(res, [])
def test_env(self):
res = self._classes(all_collector_classes=default_collectors.collectors,
gather_subset=['env'])
self.assertIsInstance(res, list)
self.assertEqual(res, [default_collectors.EnvFactCollector])
def test_facter(self):
res = self._classes(all_collector_classes=default_collectors.collectors,
gather_subset=set(['env', 'facter']))
self.assertIsInstance(res, list)
self.assertEqual(set(res),
set([default_collectors.EnvFactCollector,
default_collectors.FacterFactCollector]))
def test_facter_ohai(self):
res = self._classes(all_collector_classes=default_collectors.collectors,
gather_subset=set(['env', 'facter', 'ohai']))
self.assertIsInstance(res, list)
self.assertEqual(set(res),
set([default_collectors.EnvFactCollector,
default_collectors.FacterFactCollector,
default_collectors.OhaiFactCollector]))
def test_just_facter(self):
res = self._classes(all_collector_classes=default_collectors.collectors,
gather_subset=set(['facter']))
self.assertIsInstance(res, list)
self.assertEqual(set(res),
set([default_collectors.FacterFactCollector]))
def test_collector_specified_multiple_times(self):
res = self._classes(all_collector_classes=default_collectors.collectors,
gather_subset=['platform', 'all', 'machine'])
self.assertIsInstance(res, list)
self.assertIn(default_collectors.PlatformFactCollector,
res)
def test_unknown_collector(self):
# something claims 'unknown_collector' is a valid gather_subset, but there is
# no FactCollector mapped to 'unknown_collector'
self.assertRaisesRegexp(TypeError,
'Bad subset.*unknown_collector.*given to Ansible.*allowed\:.*all,.*env.*',
self._classes,
all_collector_classes=default_collectors.collectors,
gather_subset=['env', 'unknown_collector'])
| gpl-3.0 | -8,132,019,064,457,380,000 | 47.449568 | 106 | 0.562812 | false |
Thraxis/SickRage | lib/github/ContentFile.py | 74 | 6775 | # -*- coding: utf-8 -*-
# ########################## Copyrights and license ############################
# #
# Copyright 2012 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2012 Zearin <zearin@gonk.net> #
# Copyright 2013 AKFish <akfish@gmail.com> #
# Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net> #
# #
# This file is part of PyGithub. http://jacquev6.github.com/PyGithub/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
# ##############################################################################
import base64
import sys
import github.GithubObject
import github.Repository
atLeastPython3 = sys.hexversion >= 0x03000000
class ContentFile(github.GithubObject.CompletableGithubObject):
"""
This class represents ContentFiles as returned for example by http://developer.github.com/v3/todo
"""
@property
def content(self):
"""
:type: string
"""
self._completeIfNotSet(self._content)
return self._content.value
@property
def decoded_content(self):
assert self.encoding == "base64", "unsupported encoding: %s" % self.encoding
if atLeastPython3:
content = bytearray(self.content, "utf-8") # pragma no cover (covered by tests with Python 3.2)
else:
content = self.content
return base64.b64decode(content)
@property
def encoding(self):
"""
:type: string
"""
self._completeIfNotSet(self._encoding)
return self._encoding.value
@property
def git_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._git_url)
return self._git_url.value
@property
def html_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._html_url)
return self._html_url.value
@property
def name(self):
"""
:type: string
"""
self._completeIfNotSet(self._name)
return self._name.value
@property
def path(self):
"""
:type: string
"""
self._completeIfNotSet(self._path)
return self._path.value
@property
def repository(self):
"""
:type: :class:`github.Repository.Repository`
"""
if self._repository is github.GithubObject.NotSet:
# The repository was not set automatically, so it must be looked up by url.
repo_url = "/".join(self.url.split("/")[:6]) # pragma no cover (Should be covered)
self._repository = github.GithubObject._ValuedAttribute(github.Repository.Repository(self._requester, self._headers, {'url': repo_url}, completed=False)) # pragma no cover (Should be covered)
return self._repository.value
@property
def sha(self):
"""
:type: string
"""
self._completeIfNotSet(self._sha)
return self._sha.value
@property
def size(self):
"""
:type: integer
"""
self._completeIfNotSet(self._size)
return self._size.value
@property
def type(self):
"""
:type: string
"""
self._completeIfNotSet(self._type)
return self._type.value
@property
def url(self):
"""
:type: string
"""
self._completeIfNotSet(self._url)
return self._url.value
def _initAttributes(self):
self._content = github.GithubObject.NotSet
self._encoding = github.GithubObject.NotSet
self._git_url = github.GithubObject.NotSet
self._html_url = github.GithubObject.NotSet
self._name = github.GithubObject.NotSet
self._path = github.GithubObject.NotSet
self._repository = github.GithubObject.NotSet
self._sha = github.GithubObject.NotSet
self._size = github.GithubObject.NotSet
self._type = github.GithubObject.NotSet
def _useAttributes(self, attributes):
if "content" in attributes: # pragma no branch
self._content = self._makeStringAttribute(attributes["content"])
if "encoding" in attributes: # pragma no branch
self._encoding = self._makeStringAttribute(attributes["encoding"])
if "git_url" in attributes: # pragma no branch
self._git_url = self._makeStringAttribute(attributes["git_url"])
if "html_url" in attributes: # pragma no branch
self._html_url = self._makeStringAttribute(attributes["html_url"])
if "name" in attributes: # pragma no branch
self._name = self._makeStringAttribute(attributes["name"])
if "path" in attributes: # pragma no branch
self._path = self._makeStringAttribute(attributes["path"])
if "repository" in attributes: # pragma no branch
self._repository = self._makeClassAttribute(github.Repository.Repository, attributes["repository"])
if "sha" in attributes: # pragma no branch
self._sha = self._makeStringAttribute(attributes["sha"])
if "size" in attributes: # pragma no branch
self._size = self._makeIntAttribute(attributes["size"])
if "type" in attributes: # pragma no branch
self._type = self._makeStringAttribute(attributes["type"])
if "url" in attributes: # pragma no branch
self._url = self._makeStringAttribute(attributes["url"])
| gpl-3.0 | 4,919,438,331,659,630,000 | 37.494318 | 204 | 0.544649 | false |
RubenKelevra/rethinkdb | external/v8_3.30.33.16/build/gyp/tools/pretty_vcproj.py | 2637 | 9586 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Make the format of a vcproj really pretty.
This script normalize and sort an xml. It also fetches all the properties
inside linked vsprops and include them explicitly in the vcproj.
It outputs the resulting xml to stdout.
"""
__author__ = 'nsylvain (Nicolas Sylvain)'
import os
import sys
from xml.dom.minidom import parse
from xml.dom.minidom import Node
REPLACEMENTS = dict()
ARGUMENTS = None
class CmpTuple(object):
"""Compare function between 2 tuple."""
def __call__(self, x, y):
return cmp(x[0], y[0])
class CmpNode(object):
"""Compare function between 2 xml nodes."""
def __call__(self, x, y):
def get_string(node):
node_string = "node"
node_string += node.nodeName
if node.nodeValue:
node_string += node.nodeValue
if node.attributes:
# We first sort by name, if present.
node_string += node.getAttribute("Name")
all_nodes = []
for (name, value) in node.attributes.items():
all_nodes.append((name, value))
all_nodes.sort(CmpTuple())
for (name, value) in all_nodes:
node_string += name
node_string += value
return node_string
return cmp(get_string(x), get_string(y))
def PrettyPrintNode(node, indent=0):
if node.nodeType == Node.TEXT_NODE:
if node.data.strip():
print '%s%s' % (' '*indent, node.data.strip())
return
if node.childNodes:
node.normalize()
# Get the number of attributes
attr_count = 0
if node.attributes:
attr_count = node.attributes.length
# Print the main tag
if attr_count == 0:
print '%s<%s>' % (' '*indent, node.nodeName)
else:
print '%s<%s' % (' '*indent, node.nodeName)
all_attributes = []
for (name, value) in node.attributes.items():
all_attributes.append((name, value))
all_attributes.sort(CmpTuple())
for (name, value) in all_attributes:
print '%s %s="%s"' % (' '*indent, name, value)
print '%s>' % (' '*indent)
if node.nodeValue:
print '%s %s' % (' '*indent, node.nodeValue)
for sub_node in node.childNodes:
PrettyPrintNode(sub_node, indent=indent+2)
print '%s</%s>' % (' '*indent, node.nodeName)
def FlattenFilter(node):
"""Returns a list of all the node and sub nodes."""
node_list = []
if (node.attributes and
node.getAttribute('Name') == '_excluded_files'):
# We don't add the "_excluded_files" filter.
return []
for current in node.childNodes:
if current.nodeName == 'Filter':
node_list.extend(FlattenFilter(current))
else:
node_list.append(current)
return node_list
def FixFilenames(filenames, current_directory):
new_list = []
for filename in filenames:
if filename:
for key in REPLACEMENTS:
filename = filename.replace(key, REPLACEMENTS[key])
os.chdir(current_directory)
filename = filename.strip('"\' ')
if filename.startswith('$'):
new_list.append(filename)
else:
new_list.append(os.path.abspath(filename))
return new_list
def AbsoluteNode(node):
"""Makes all the properties we know about in this node absolute."""
if node.attributes:
for (name, value) in node.attributes.items():
if name in ['InheritedPropertySheets', 'RelativePath',
'AdditionalIncludeDirectories',
'IntermediateDirectory', 'OutputDirectory',
'AdditionalLibraryDirectories']:
# We want to fix up these paths
path_list = value.split(';')
new_list = FixFilenames(path_list, os.path.dirname(ARGUMENTS[1]))
node.setAttribute(name, ';'.join(new_list))
if not value:
node.removeAttribute(name)
def CleanupVcproj(node):
"""For each sub node, we call recursively this function."""
for sub_node in node.childNodes:
AbsoluteNode(sub_node)
CleanupVcproj(sub_node)
# Normalize the node, and remove all extranous whitespaces.
for sub_node in node.childNodes:
if sub_node.nodeType == Node.TEXT_NODE:
sub_node.data = sub_node.data.replace("\r", "")
sub_node.data = sub_node.data.replace("\n", "")
sub_node.data = sub_node.data.rstrip()
# Fix all the semicolon separated attributes to be sorted, and we also
# remove the dups.
if node.attributes:
for (name, value) in node.attributes.items():
sorted_list = sorted(value.split(';'))
unique_list = []
for i in sorted_list:
if not unique_list.count(i):
unique_list.append(i)
node.setAttribute(name, ';'.join(unique_list))
if not value:
node.removeAttribute(name)
if node.childNodes:
node.normalize()
# For each node, take a copy, and remove it from the list.
node_array = []
while node.childNodes and node.childNodes[0]:
# Take a copy of the node and remove it from the list.
current = node.childNodes[0]
node.removeChild(current)
# If the child is a filter, we want to append all its children
# to this same list.
if current.nodeName == 'Filter':
node_array.extend(FlattenFilter(current))
else:
node_array.append(current)
# Sort the list.
node_array.sort(CmpNode())
# Insert the nodes in the correct order.
for new_node in node_array:
# But don't append empty tool node.
if new_node.nodeName == 'Tool':
if new_node.attributes and new_node.attributes.length == 1:
# This one was empty.
continue
if new_node.nodeName == 'UserMacro':
continue
node.appendChild(new_node)
def GetConfiguationNodes(vcproj):
#TODO(nsylvain): Find a better way to navigate the xml.
nodes = []
for node in vcproj.childNodes:
if node.nodeName == "Configurations":
for sub_node in node.childNodes:
if sub_node.nodeName == "Configuration":
nodes.append(sub_node)
return nodes
def GetChildrenVsprops(filename):
dom = parse(filename)
if dom.documentElement.attributes:
vsprops = dom.documentElement.getAttribute('InheritedPropertySheets')
return FixFilenames(vsprops.split(';'), os.path.dirname(filename))
return []
def SeekToNode(node1, child2):
# A text node does not have properties.
if child2.nodeType == Node.TEXT_NODE:
return None
# Get the name of the current node.
current_name = child2.getAttribute("Name")
if not current_name:
# There is no name. We don't know how to merge.
return None
# Look through all the nodes to find a match.
for sub_node in node1.childNodes:
if sub_node.nodeName == child2.nodeName:
name = sub_node.getAttribute("Name")
if name == current_name:
return sub_node
# No match. We give up.
return None
def MergeAttributes(node1, node2):
# No attributes to merge?
if not node2.attributes:
return
for (name, value2) in node2.attributes.items():
# Don't merge the 'Name' attribute.
if name == 'Name':
continue
value1 = node1.getAttribute(name)
if value1:
# The attribute exist in the main node. If it's equal, we leave it
# untouched, otherwise we concatenate it.
if value1 != value2:
node1.setAttribute(name, ';'.join([value1, value2]))
else:
# The attribute does nto exist in the main node. We append this one.
node1.setAttribute(name, value2)
# If the attribute was a property sheet attributes, we remove it, since
# they are useless.
if name == 'InheritedPropertySheets':
node1.removeAttribute(name)
def MergeProperties(node1, node2):
MergeAttributes(node1, node2)
for child2 in node2.childNodes:
child1 = SeekToNode(node1, child2)
if child1:
MergeProperties(child1, child2)
else:
node1.appendChild(child2.cloneNode(True))
def main(argv):
"""Main function of this vcproj prettifier."""
global ARGUMENTS
ARGUMENTS = argv
# check if we have exactly 1 parameter.
if len(argv) < 2:
print ('Usage: %s "c:\\path\\to\\vcproj.vcproj" [key1=value1] '
'[key2=value2]' % argv[0])
return 1
# Parse the keys
for i in range(2, len(argv)):
(key, value) = argv[i].split('=')
REPLACEMENTS[key] = value
# Open the vcproj and parse the xml.
dom = parse(argv[1])
# First thing we need to do is find the Configuration Node and merge them
# with the vsprops they include.
for configuration_node in GetConfiguationNodes(dom.documentElement):
# Get the property sheets associated with this configuration.
vsprops = configuration_node.getAttribute('InheritedPropertySheets')
# Fix the filenames to be absolute.
vsprops_list = FixFilenames(vsprops.strip().split(';'),
os.path.dirname(argv[1]))
# Extend the list of vsprops with all vsprops contained in the current
# vsprops.
for current_vsprops in vsprops_list:
vsprops_list.extend(GetChildrenVsprops(current_vsprops))
# Now that we have all the vsprops, we need to merge them.
for current_vsprops in vsprops_list:
MergeProperties(configuration_node,
parse(current_vsprops).documentElement)
# Now that everything is merged, we need to cleanup the xml.
CleanupVcproj(dom.documentElement)
# Finally, we use the prett xml function to print the vcproj back to the
# user.
#print dom.toprettyxml(newl="\n")
PrettyPrintNode(dom.documentElement)
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
| agpl-3.0 | 4,147,145,683,473,757,700 | 28.136778 | 76 | 0.654913 | false |
sebdelsol/pyload | module/plugins/hoster/RapidshareCom.py | 1 | 7801 | # -*- coding: utf-8 -*-
import re
from module.network.RequestFactory import getURL
from module.plugins.Hoster import Hoster
def getInfo(urls):
ids = ""
names = ""
p = re.compile(RapidshareCom.__pattern__)
for url in urls:
r = p.search(url)
if r.group("name"):
ids += "," + r.group("id")
names += "," + r.group("name")
elif r.group("name_new"):
ids += "," + r.group("id_new")
names += "," + r.group("name_new")
url = "http://api.rapidshare.com/cgi-bin/rsapi.cgi?sub=checkfiles&files=%s&filenames=%s" % (ids[1:], names[1:])
api = getURL(url)
result = []
i = 0
for res in api.split():
tmp = res.split(",")
if tmp[4] in ("0", "4", "5"):
status = 1
elif tmp[4] == "1":
status = 2
else:
status = 3
result.append((tmp[1], tmp[2], status, urls[i]))
i += 1
yield result
class RapidshareCom(Hoster):
__name__ = "RapidshareCom"
__type__ = "hoster"
__version__ = "1.40"
__pattern__ = r'https?://(?:www\.)?rapidshare\.com/(?:files/(?P<id>\d+)/(?P<name>[^?]+)|#!download\|(?:\w+)\|(?P<id_new>\d+)\|(?P<name_new>[^|]+))'
__description__ = """Rapidshare.com hoster plugin"""
__license__ = "GPLv3"
__authors__ = [("spoob", "spoob@pyload.org"),
("RaNaN", "ranan@pyload.org"),
("mkaay", "mkaay@mkaay.de")]
def setup(self):
self.no_download = True
self.api_data = None
self.offset = 0
self.dl_dict = {}
self.id = None
self.name = None
self.chunkLimit = -1 if self.premium else 1
self.multiDL = self.resumeDownload = self.premium
def process(self, pyfile):
self.url = pyfile.url
self.prepare()
def prepare(self):
m = re.match(self.__pattern__, self.url)
if m.group("name"):
self.id = m.group("id")
self.name = m.group("name")
else:
self.id = m.group("id_new")
self.name = m.group("name_new")
self.download_api_data()
if self.api_data['status'] == "1":
self.pyfile.name = self.get_file_name()
if self.premium:
self.handlePremium()
else:
self.handleFree()
elif self.api_data['status'] == "2":
self.logInfo(_("Rapidshare: Traffic Share (direct download)"))
self.pyfile.name = self.get_file_name()
self.download(self.pyfile.url, get={"directstart": 1})
elif self.api_data['status'] in ("0", "4", "5"):
self.offline()
elif self.api_data['status'] == "3":
self.tempOffline()
else:
self.error(_("Unknown response code"))
def handleFree(self):
while self.no_download:
self.dl_dict = self.freeWait()
#tmp = "#!download|%(server)s|%(id)s|%(name)s|%(size)s"
download = "http://%(host)s/cgi-bin/rsapi.cgi?sub=download&editparentlocation=0&bin=1&fileid=%(id)s&filename=%(name)s&dlauth=%(auth)s" % self.dl_dict
self.logDebug("RS API Request: %s" % download)
self.download(download, ref=False)
check = self.checkDownload({"ip": "You need RapidPro to download more files from your IP address",
"auth": "Download auth invalid"})
if check == "ip":
self.setWait(60)
self.logInfo(_("Already downloading from this ip address, waiting 60 seconds"))
self.wait()
self.handleFree()
elif check == "auth":
self.logInfo(_("Invalid Auth Code, download will be restarted"))
self.offset += 5
self.handleFree()
def handlePremium(self):
info = self.account.getAccountInfo(self.user, True)
self.logDebug("Use Premium Account")
url = self.api_data['mirror']
self.download(url, get={"directstart": 1})
def download_api_data(self, force=False):
"""
http://images.rapidshare.com/apidoc.txt
"""
if self.api_data and not force:
return
api_url_base = "http://api.rapidshare.com/cgi-bin/rsapi.cgi"
api_param_file = {"sub": "checkfiles", "incmd5": "1", "files": self.id, "filenames": self.name}
html = self.load(api_url_base, cookies=False, get=api_param_file).strip()
self.logDebug("RS INFO API: %s" % html)
if html.startswith("ERROR"):
return
fields = html.split(",")
# status codes:
# 0=File not found
# 1=File OK (Anonymous downloading)
# 3=Server down
# 4=File marked as illegal
# 5=Anonymous file locked, because it has more than 10 downloads already
# 50+n=File OK (TrafficShare direct download type "n" without any logging.)
# 100+n=File OK (TrafficShare direct download type "n" with logging.
# Read our privacy policy to see what is logged.)
self.api_data = {"fileid": fields[0], "filename": fields[1], "size": int(fields[2]), "serverid": fields[3],
"status": fields[4], "shorthost": fields[5], "checksum": fields[6].strip().lower()}
if int(self.api_data['status']) > 100:
self.api_data['status'] = str(int(self.api_data['status']) - 100)
elif int(self.api_data['status']) > 50:
self.api_data['status'] = str(int(self.api_data['status']) - 50)
self.api_data['mirror'] = "http://rs%(serverid)s%(shorthost)s.rapidshare.com/files/%(fileid)s/%(filename)s" % self.api_data
def freeWait(self):
"""downloads html with the important information
"""
self.no_download = True
id = self.id
name = self.name
prepare = "https://api.rapidshare.com/cgi-bin/rsapi.cgi?sub=download&fileid=%(id)s&filename=%(name)s&try=1&cbf=RSAPIDispatcher&cbid=1" % {
"name": name, "id": id}
self.logDebug("RS API Request: %s" % prepare)
result = self.load(prepare, ref=False)
self.logDebug("RS API Result: %s" % result)
between_wait = re.search("You need to wait (\d+) seconds", result)
if "You need RapidPro to download more files from your IP address" in result:
self.setWait(60)
self.logInfo(_("Already downloading from this ip address, waiting 60 seconds"))
self.wait()
elif ("Too many users downloading from this server right now" in result or
"All free download slots are full" in result):
self.setWait(120)
self.logInfo(_("RapidShareCom: No free slots"))
self.wait()
elif "This file is too big to download it for free" in result:
self.fail(_("You need a premium account for this file"))
elif "Filename invalid." in result:
self.fail(_("Filename reported invalid"))
elif between_wait:
self.setWait(int(between_wait.group(1)), True)
self.wait()
else:
self.no_download = False
tmp, info = result.split(":")
data = info.split(",")
dl_dict = {"id": id,
"name": name,
"host": data[0],
"auth": data[1],
"server": self.api_data['serverid'],
"size": self.api_data['size']}
self.setWait(int(data[2]) + 2 + self.offset)
self.wait()
return dl_dict
def get_file_name(self):
if self.api_data['filename']:
return self.api_data['filename']
return self.url.split("/")[-1]
| gpl-3.0 | -1,073,814,628,659,868,700 | 33.214912 | 157 | 0.532239 | false |
PyMNtos/stacks | stacks/library/migrations/0003_auto__add_field_author_uuid.py | 1 | 2747 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding M2M table for field authors on 'Book'
m2m_table_name = db.shorten_name(u'library_book_authors')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('book', models.ForeignKey(orm[u'library.book'], null=False)),
('author', models.ForeignKey(orm[u'library.author'], null=False))
))
db.create_unique(m2m_table_name, ['book_id', 'author_id'])
# Adding field 'Author.uuid'
db.add_column(u'library_author', 'uuid',
self.gf('django.db.models.fields.CharField')(default='None', max_length=36, db_index=True),
keep_default=False)
def backwards(self, orm):
# Removing M2M table for field authors on 'Book'
db.delete_table(db.shorten_name(u'library_book_authors'))
# Deleting field 'Author.uuid'
db.delete_column(u'library_author', 'uuid')
models = {
u'library.author': {
'Meta': {'object_name': 'Author'},
'firstname': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lastname': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'uuid': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '36', 'db_index': 'True'})
},
u'library.book': {
'Meta': {'object_name': 'Book'},
'authors': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['library.Author']", 'symmetrical': 'False'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'isbn10': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True'}),
'isbn13': ('django.db.models.fields.CharField', [], {'max_length': '13', 'null': 'True'}),
'publish_date': ('django.db.models.fields.DateField', [], {'null': 'True'}),
'publisher': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'uuid': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '36', 'db_index': 'True'})
}
}
complete_apps = ['library'] | gpl-2.0 | 6,528,894,093,028,434,000 | 48.071429 | 137 | 0.56862 | false |
Jordonbc/GlassOS | Glass_OS/build/lib/GlassOS/libaries/requests/packages/urllib3/connection.py | 196 | 10286 | from __future__ import absolute_import
import datetime
import os
import sys
import socket
from socket import error as SocketError, timeout as SocketTimeout
import warnings
from .packages import six
try: # Python 3
from http.client import HTTPConnection as _HTTPConnection
from http.client import HTTPException # noqa: unused in this module
except ImportError:
from httplib import HTTPConnection as _HTTPConnection
from httplib import HTTPException # noqa: unused in this module
try: # Compiled with SSL?
import ssl
BaseSSLError = ssl.SSLError
except (ImportError, AttributeError): # Platform-specific: No SSL.
ssl = None
class BaseSSLError(BaseException):
pass
try: # Python 3:
# Not a no-op, we're adding this to the namespace so it can be imported.
ConnectionError = ConnectionError
except NameError: # Python 2:
class ConnectionError(Exception):
pass
from .exceptions import (
NewConnectionError,
ConnectTimeoutError,
SubjectAltNameWarning,
SystemTimeWarning,
)
from .packages.ssl_match_hostname import match_hostname
from .util.ssl_ import (
resolve_cert_reqs,
resolve_ssl_version,
ssl_wrap_socket,
assert_fingerprint,
)
from .util import connection
port_by_scheme = {
'http': 80,
'https': 443,
}
RECENT_DATE = datetime.date(2014, 1, 1)
class DummyConnection(object):
"""Used to detect a failed ConnectionCls import."""
pass
class HTTPConnection(_HTTPConnection, object):
"""
Based on httplib.HTTPConnection but provides an extra constructor
backwards-compatibility layer between older and newer Pythons.
Additional keyword parameters are used to configure attributes of the connection.
Accepted parameters include:
- ``strict``: See the documentation on :class:`urllib3.connectionpool.HTTPConnectionPool`
- ``source_address``: Set the source address for the current connection.
.. note:: This is ignored for Python 2.6. It is only applied for 2.7 and 3.x
- ``socket_options``: Set specific options on the underlying socket. If not specified, then
defaults are loaded from ``HTTPConnection.default_socket_options`` which includes disabling
Nagle's algorithm (sets TCP_NODELAY to 1) unless the connection is behind a proxy.
For example, if you wish to enable TCP Keep Alive in addition to the defaults,
you might pass::
HTTPConnection.default_socket_options + [
(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1),
]
Or you may want to disable the defaults by passing an empty list (e.g., ``[]``).
"""
default_port = port_by_scheme['http']
#: Disable Nagle's algorithm by default.
#: ``[(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)]``
default_socket_options = [(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)]
#: Whether this connection verifies the host's certificate.
is_verified = False
def __init__(self, *args, **kw):
if six.PY3: # Python 3
kw.pop('strict', None)
# Pre-set source_address in case we have an older Python like 2.6.
self.source_address = kw.get('source_address')
if sys.version_info < (2, 7): # Python 2.6
# _HTTPConnection on Python 2.6 will balk at this keyword arg, but
# not newer versions. We can still use it when creating a
# connection though, so we pop it *after* we have saved it as
# self.source_address.
kw.pop('source_address', None)
#: The socket options provided by the user. If no options are
#: provided, we use the default options.
self.socket_options = kw.pop('socket_options', self.default_socket_options)
# Superclass also sets self.source_address in Python 2.7+.
_HTTPConnection.__init__(self, *args, **kw)
def _new_conn(self):
""" Establish a socket connection and set nodelay settings on it.
:return: New socket connection.
"""
extra_kw = {}
if self.source_address:
extra_kw['source_address'] = self.source_address
if self.socket_options:
extra_kw['socket_options'] = self.socket_options
try:
conn = connection.create_connection(
(self.host, self.port), self.timeout, **extra_kw)
except SocketTimeout as e:
raise ConnectTimeoutError(
self, "Connection to %s timed out. (connect timeout=%s)" %
(self.host, self.timeout))
except SocketError as e:
raise NewConnectionError(
self, "Failed to establish a new connection: %s" % e)
return conn
def _prepare_conn(self, conn):
self.sock = conn
# the _tunnel_host attribute was added in python 2.6.3 (via
# http://hg.python.org/cpython/rev/0f57b30a152f) so pythons 2.6(0-2) do
# not have them.
if getattr(self, '_tunnel_host', None):
# TODO: Fix tunnel so it doesn't depend on self.sock state.
self._tunnel()
# Mark this connection as not reusable
self.auto_open = 0
def connect(self):
conn = self._new_conn()
self._prepare_conn(conn)
class HTTPSConnection(HTTPConnection):
default_port = port_by_scheme['https']
def __init__(self, host, port=None, key_file=None, cert_file=None,
strict=None, timeout=socket._GLOBAL_DEFAULT_TIMEOUT, **kw):
HTTPConnection.__init__(self, host, port, strict=strict,
timeout=timeout, **kw)
self.key_file = key_file
self.cert_file = cert_file
# Required property for Google AppEngine 1.9.0 which otherwise causes
# HTTPS requests to go out as HTTP. (See Issue #356)
self._protocol = 'https'
def connect(self):
conn = self._new_conn()
self._prepare_conn(conn)
self.sock = ssl.wrap_socket(conn, self.key_file, self.cert_file)
class VerifiedHTTPSConnection(HTTPSConnection):
"""
Based on httplib.HTTPSConnection but wraps the socket with
SSL certification.
"""
cert_reqs = None
ca_certs = None
ca_cert_dir = None
ssl_version = None
assert_fingerprint = None
def set_cert(self, key_file=None, cert_file=None,
cert_reqs=None, ca_certs=None,
assert_hostname=None, assert_fingerprint=None,
ca_cert_dir=None):
if (ca_certs or ca_cert_dir) and cert_reqs is None:
cert_reqs = 'CERT_REQUIRED'
self.key_file = key_file
self.cert_file = cert_file
self.cert_reqs = cert_reqs
self.assert_hostname = assert_hostname
self.assert_fingerprint = assert_fingerprint
self.ca_certs = ca_certs and os.path.expanduser(ca_certs)
self.ca_cert_dir = ca_cert_dir and os.path.expanduser(ca_cert_dir)
def connect(self):
# Add certificate verification
conn = self._new_conn()
resolved_cert_reqs = resolve_cert_reqs(self.cert_reqs)
resolved_ssl_version = resolve_ssl_version(self.ssl_version)
hostname = self.host
if getattr(self, '_tunnel_host', None):
# _tunnel_host was added in Python 2.6.3
# (See: http://hg.python.org/cpython/rev/0f57b30a152f)
self.sock = conn
# Calls self._set_hostport(), so self.host is
# self._tunnel_host below.
self._tunnel()
# Mark this connection as not reusable
self.auto_open = 0
# Override the host with the one we're requesting data from.
hostname = self._tunnel_host
is_time_off = datetime.date.today() < RECENT_DATE
if is_time_off:
warnings.warn((
'System time is way off (before {0}). This will probably '
'lead to SSL verification errors').format(RECENT_DATE),
SystemTimeWarning
)
# Wrap socket using verification with the root certs in
# trusted_root_certs
self.sock = ssl_wrap_socket(conn, self.key_file, self.cert_file,
cert_reqs=resolved_cert_reqs,
ca_certs=self.ca_certs,
ca_cert_dir=self.ca_cert_dir,
server_hostname=hostname,
ssl_version=resolved_ssl_version)
if self.assert_fingerprint:
assert_fingerprint(self.sock.getpeercert(binary_form=True),
self.assert_fingerprint)
elif resolved_cert_reqs != ssl.CERT_NONE \
and self.assert_hostname is not False:
cert = self.sock.getpeercert()
if not cert.get('subjectAltName', ()):
warnings.warn((
'Certificate for {0} has no `subjectAltName`, falling back to check for a '
'`commonName` for now. This feature is being removed by major browsers and '
'deprecated by RFC 2818. (See https://github.com/shazow/urllib3/issues/497 '
'for details.)'.format(hostname)),
SubjectAltNameWarning
)
# In case the hostname is an IPv6 address, strip the square
# brackets from it before using it to validate. This is because
# a certificate with an IPv6 address in it won't have square
# brackets around that address. Sadly, match_hostname won't do this
# for us: it expects the plain host part without any extra work
# that might have been done to make it palatable to httplib.
asserted_hostname = self.assert_hostname or hostname
asserted_hostname = asserted_hostname.strip('[]')
match_hostname(cert, asserted_hostname)
self.is_verified = (resolved_cert_reqs == ssl.CERT_REQUIRED or
self.assert_fingerprint is not None)
if ssl:
# Make a copy for testing.
UnverifiedHTTPSConnection = HTTPSConnection
HTTPSConnection = VerifiedHTTPSConnection
else:
HTTPSConnection = DummyConnection
| mit | 3,071,038,301,343,374,000 | 34.715278 | 99 | 0.615497 | false |
lamby/redis-py | benchmarks/command_packer_benchmark.py | 49 | 3338 | import socket
import sys
from redis.connection import (Connection, SYM_STAR, SYM_DOLLAR, SYM_EMPTY,
SYM_CRLF, b)
from redis._compat import imap
from base import Benchmark
class StringJoiningConnection(Connection):
def send_packed_command(self, command):
"Send an already packed command to the Redis server"
if not self._sock:
self.connect()
try:
self._sock.sendall(command)
except socket.error:
e = sys.exc_info()[1]
self.disconnect()
if len(e.args) == 1:
_errno, errmsg = 'UNKNOWN', e.args[0]
else:
_errno, errmsg = e.args
raise ConnectionError("Error %s while writing to socket. %s." %
(_errno, errmsg))
except:
self.disconnect()
raise
def pack_command(self, *args):
"Pack a series of arguments into a value Redis command"
args_output = SYM_EMPTY.join([
SYM_EMPTY.join((SYM_DOLLAR, b(str(len(k))), SYM_CRLF, k, SYM_CRLF))
for k in imap(self.encode, args)])
output = SYM_EMPTY.join(
(SYM_STAR, b(str(len(args))), SYM_CRLF, args_output))
return output
class ListJoiningConnection(Connection):
def send_packed_command(self, command):
if not self._sock:
self.connect()
try:
if isinstance(command, str):
command = [command]
for item in command:
self._sock.sendall(item)
except socket.error:
e = sys.exc_info()[1]
self.disconnect()
if len(e.args) == 1:
_errno, errmsg = 'UNKNOWN', e.args[0]
else:
_errno, errmsg = e.args
raise ConnectionError("Error %s while writing to socket. %s." %
(_errno, errmsg))
except:
self.disconnect()
raise
def pack_command(self, *args):
output = []
buff = SYM_EMPTY.join(
(SYM_STAR, b(str(len(args))), SYM_CRLF))
for k in imap(self.encode, args):
if len(buff) > 6000 or len(k) > 6000:
buff = SYM_EMPTY.join(
(buff, SYM_DOLLAR, b(str(len(k))), SYM_CRLF))
output.append(buff)
output.append(k)
buff = SYM_CRLF
else:
buff = SYM_EMPTY.join((buff, SYM_DOLLAR, b(str(len(k))),
SYM_CRLF, k, SYM_CRLF))
output.append(buff)
return output
class CommandPackerBenchmark(Benchmark):
ARGUMENTS = (
{
'name': 'connection_class',
'values': [StringJoiningConnection, ListJoiningConnection]
},
{
'name': 'value_size',
'values': [10, 100, 1000, 10000, 100000, 1000000, 10000000,
100000000]
},
)
def setup(self, connection_class, value_size):
self.get_client(connection_class=connection_class)
def run(self, connection_class, value_size):
r = self.get_client()
x = 'a' * value_size
r.set('benchmark', x)
if __name__ == '__main__':
CommandPackerBenchmark().run_benchmark()
| mit | 2,677,731,210,954,243,000 | 31.096154 | 79 | 0.509587 | false |
sindhus/hasjob | hasjob/views/login.py | 4 | 1781 | # -*- coding: utf-8 -*-
from sqlalchemy.exc import IntegrityError
from flask import g, Response, redirect, flash
from flask.ext.lastuser import signal_user_session_refreshed
from coaster.views import get_next_url
from baseframe import csrf
from .. import app, lastuser
from ..signals import signal_login, signal_logout
from ..models import db, UserActiveAt
@app.route('/500')
def error500():
raise Exception("Something b0rked")
@app.route('/login')
@lastuser.login_handler
def login():
return {'scope': 'id email/* phone/* organizations/* teams/* notice/*'}
@app.route('/logout')
@lastuser.logout_handler
def logout():
flash(u"You are now logged out", category='info')
signal_logout.send(app, user=g.user)
return get_next_url()
@app.route('/login/redirect')
@lastuser.auth_handler
def lastuserauth():
signal_login.send(app, user=g.user)
db.session.commit()
return redirect(get_next_url())
@csrf.exempt
@app.route('/login/notify', methods=['POST'])
@lastuser.notification_handler
def lastusernotify(user):
db.session.commit()
@lastuser.auth_error_handler
def lastuser_error(error, error_description=None, error_uri=None):
if error == 'access_denied':
flash("You denied the request to login", category='error')
return redirect(get_next_url())
return Response(u"Error: %s\n"
u"Description: %s\n"
u"URI: %s" % (error, error_description, error_uri),
mimetype="text/plain")
@signal_user_session_refreshed.connect
def track_user(user):
db.session.add(UserActiveAt(user=user, board=g.board))
try:
db.session.commit()
except IntegrityError: # Small but not impossible chance we got two parallel signals
db.session.rollback()
| agpl-3.0 | 7,547,748,057,554,435,000 | 26.4 | 89 | 0.683324 | false |
RockySteveJobs/python-for-android | python-modules/twisted/twisted/test/test_hook.py | 81 | 4290 |
# Copyright (c) 2001-2004 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Test cases for twisted.hook module.
"""
from twisted.python import hook
from twisted.trial import unittest
class BaseClass:
"""
dummy class to help in testing.
"""
def __init__(self):
"""
dummy initializer
"""
self.calledBasePre = 0
self.calledBasePost = 0
self.calledBase = 0
def func(self, a, b):
"""
dummy method
"""
assert a == 1
assert b == 2
self.calledBase = self.calledBase + 1
class SubClass(BaseClass):
"""
another dummy class
"""
def __init__(self):
"""
another dummy initializer
"""
BaseClass.__init__(self)
self.calledSubPre = 0
self.calledSubPost = 0
self.calledSub = 0
def func(self, a, b):
"""
another dummy function
"""
assert a == 1
assert b == 2
BaseClass.func(self, a, b)
self.calledSub = self.calledSub + 1
_clean_BaseClass = BaseClass.__dict__.copy()
_clean_SubClass = SubClass.__dict__.copy()
def basePre(base, a, b):
"""
a pre-hook for the base class
"""
base.calledBasePre = base.calledBasePre + 1
def basePost(base, a, b):
"""
a post-hook for the base class
"""
base.calledBasePost = base.calledBasePost + 1
def subPre(sub, a, b):
"""
a pre-hook for the subclass
"""
sub.calledSubPre = sub.calledSubPre + 1
def subPost(sub, a, b):
"""
a post-hook for the subclass
"""
sub.calledSubPost = sub.calledSubPost + 1
class HookTestCase(unittest.TestCase):
"""
test case to make sure hooks are called
"""
def setUp(self):
"""Make sure we have clean versions of our classes."""
BaseClass.__dict__.clear()
BaseClass.__dict__.update(_clean_BaseClass)
SubClass.__dict__.clear()
SubClass.__dict__.update(_clean_SubClass)
def testBaseHook(self):
"""make sure that the base class's hook is called reliably
"""
base = BaseClass()
self.assertEquals(base.calledBase, 0)
self.assertEquals(base.calledBasePre, 0)
base.func(1,2)
self.assertEquals(base.calledBase, 1)
self.assertEquals(base.calledBasePre, 0)
hook.addPre(BaseClass, "func", basePre)
base.func(1, b=2)
self.assertEquals(base.calledBase, 2)
self.assertEquals(base.calledBasePre, 1)
hook.addPost(BaseClass, "func", basePost)
base.func(1, b=2)
self.assertEquals(base.calledBasePost, 1)
self.assertEquals(base.calledBase, 3)
self.assertEquals(base.calledBasePre, 2)
hook.removePre(BaseClass, "func", basePre)
hook.removePost(BaseClass, "func", basePost)
base.func(1, b=2)
self.assertEquals(base.calledBasePost, 1)
self.assertEquals(base.calledBase, 4)
self.assertEquals(base.calledBasePre, 2)
def testSubHook(self):
"""test interactions between base-class hooks and subclass hooks
"""
sub = SubClass()
self.assertEquals(sub.calledSub, 0)
self.assertEquals(sub.calledBase, 0)
sub.func(1, b=2)
self.assertEquals(sub.calledSub, 1)
self.assertEquals(sub.calledBase, 1)
hook.addPre(SubClass, 'func', subPre)
self.assertEquals(sub.calledSub, 1)
self.assertEquals(sub.calledBase, 1)
self.assertEquals(sub.calledSubPre, 0)
self.assertEquals(sub.calledBasePre, 0)
sub.func(1, b=2)
self.assertEquals(sub.calledSub, 2)
self.assertEquals(sub.calledBase, 2)
self.assertEquals(sub.calledSubPre, 1)
self.assertEquals(sub.calledBasePre, 0)
# let the pain begin
hook.addPre(BaseClass, 'func', basePre)
BaseClass.func(sub, 1, b=2)
# sub.func(1, b=2)
self.assertEquals(sub.calledBase, 3)
self.assertEquals(sub.calledBasePre, 1, str(sub.calledBasePre))
sub.func(1, b=2)
self.assertEquals(sub.calledBasePre, 2)
self.assertEquals(sub.calledBase, 4)
self.assertEquals(sub.calledSubPre, 2)
self.assertEquals(sub.calledSub, 3)
testCases = [HookTestCase]
| apache-2.0 | 4,409,256,285,021,896,000 | 27.6 | 72 | 0.60303 | false |
devGregA/code | build/lib.linux-x86_64-2.7/scrapy/tests/test_http_request.py | 33 | 30578 | import cgi
import unittest
from six.moves import xmlrpc_client as xmlrpclib
from six.moves.urllib.parse import urlparse
from scrapy.http import Request, FormRequest, XmlRpcRequest, Headers, HtmlResponse
class RequestTest(unittest.TestCase):
request_class = Request
default_method = 'GET'
default_headers = {}
default_meta = {}
def test_init(self):
# Request requires url in the constructor
self.assertRaises(Exception, self.request_class)
# url argument must be basestring
self.assertRaises(TypeError, self.request_class, 123)
r = self.request_class('http://www.example.com')
r = self.request_class("http://www.example.com")
assert isinstance(r.url, str)
self.assertEqual(r.url, "http://www.example.com")
self.assertEqual(r.method, self.default_method)
assert isinstance(r.headers, Headers)
self.assertEqual(r.headers, self.default_headers)
self.assertEqual(r.meta, self.default_meta)
meta = {"lala": "lolo"}
headers = {"caca": "coco"}
r = self.request_class("http://www.example.com", meta=meta, headers=headers, body="a body")
assert r.meta is not meta
self.assertEqual(r.meta, meta)
assert r.headers is not headers
self.assertEqual(r.headers["caca"], "coco")
def test_url_no_scheme(self):
self.assertRaises(ValueError, self.request_class, 'foo')
def test_headers(self):
# Different ways of setting headers attribute
url = 'http://www.scrapy.org'
headers = {'Accept':'gzip', 'Custom-Header':'nothing to tell you'}
r = self.request_class(url=url, headers=headers)
p = self.request_class(url=url, headers=r.headers)
self.assertEqual(r.headers, p.headers)
self.assertFalse(r.headers is headers)
self.assertFalse(p.headers is r.headers)
# headers must not be unicode
h = Headers({'key1': u'val1', u'key2': 'val2'})
h[u'newkey'] = u'newval'
for k, v in h.iteritems():
self.assert_(isinstance(k, str))
for s in v:
self.assert_(isinstance(s, str))
def test_eq(self):
url = 'http://www.scrapy.org'
r1 = self.request_class(url=url)
r2 = self.request_class(url=url)
self.assertNotEqual(r1, r2)
set_ = set()
set_.add(r1)
set_.add(r2)
self.assertEqual(len(set_), 2)
def test_url(self):
"""Request url tests"""
r = self.request_class(url="http://www.scrapy.org/path")
self.assertEqual(r.url, "http://www.scrapy.org/path")
# url quoting on creation
r = self.request_class(url="http://www.scrapy.org/blank%20space")
self.assertEqual(r.url, "http://www.scrapy.org/blank%20space")
r = self.request_class(url="http://www.scrapy.org/blank space")
self.assertEqual(r.url, "http://www.scrapy.org/blank%20space")
# url encoding
r1 = self.request_class(url=u"http://www.scrapy.org/price/\xa3", encoding="utf-8")
r2 = self.request_class(url=u"http://www.scrapy.org/price/\xa3", encoding="latin1")
self.assertEqual(r1.url, "http://www.scrapy.org/price/%C2%A3")
self.assertEqual(r2.url, "http://www.scrapy.org/price/%A3")
def test_body(self):
r1 = self.request_class(url="http://www.example.com/")
assert r1.body == ''
r2 = self.request_class(url="http://www.example.com/", body="")
assert isinstance(r2.body, str)
self.assertEqual(r2.encoding, 'utf-8') # default encoding
r3 = self.request_class(url="http://www.example.com/", body=u"Price: \xa3100", encoding='utf-8')
assert isinstance(r3.body, str)
self.assertEqual(r3.body, "Price: \xc2\xa3100")
r4 = self.request_class(url="http://www.example.com/", body=u"Price: \xa3100", encoding='latin1')
assert isinstance(r4.body, str)
self.assertEqual(r4.body, "Price: \xa3100")
def test_ajax_url(self):
# ascii url
r = self.request_class(url="http://www.example.com/ajax.html#!key=value")
self.assertEqual(r.url, "http://www.example.com/ajax.html?_escaped_fragment_=key%3Dvalue")
# unicode url
r = self.request_class(url=u"http://www.example.com/ajax.html#!key=value")
self.assertEqual(r.url, "http://www.example.com/ajax.html?_escaped_fragment_=key%3Dvalue")
def test_copy(self):
"""Test Request copy"""
def somecallback():
pass
r1 = self.request_class("http://www.example.com", callback=somecallback, errback=somecallback)
r1.meta['foo'] = 'bar'
r2 = r1.copy()
# make sure copy does not propagate callbacks
assert r1.callback is somecallback
assert r1.errback is somecallback
assert r2.callback is r1.callback
assert r2.errback is r2.errback
# make sure meta dict is shallow copied
assert r1.meta is not r2.meta, "meta must be a shallow copy, not identical"
self.assertEqual(r1.meta, r2.meta)
# make sure headers attribute is shallow copied
assert r1.headers is not r2.headers, "headers must be a shallow copy, not identical"
self.assertEqual(r1.headers, r2.headers)
self.assertEqual(r1.encoding, r2.encoding)
self.assertEqual(r1.dont_filter, r2.dont_filter)
# Request.body can be identical since it's an immutable object (str)
def test_copy_inherited_classes(self):
"""Test Request children copies preserve their class"""
class CustomRequest(self.request_class):
pass
r1 = CustomRequest('http://www.example.com')
r2 = r1.copy()
assert type(r2) is CustomRequest
def test_replace(self):
"""Test Request.replace() method"""
r1 = self.request_class("http://www.example.com", method='GET')
hdrs = Headers(dict(r1.headers, key='value'))
r2 = r1.replace(method="POST", body="New body", headers=hdrs)
self.assertEqual(r1.url, r2.url)
self.assertEqual((r1.method, r2.method), ("GET", "POST"))
self.assertEqual((r1.body, r2.body), ('', "New body"))
self.assertEqual((r1.headers, r2.headers), (self.default_headers, hdrs))
# Empty attributes (which may fail if not compared properly)
r3 = self.request_class("http://www.example.com", meta={'a': 1}, dont_filter=True)
r4 = r3.replace(url="http://www.example.com/2", body='', meta={}, dont_filter=False)
self.assertEqual(r4.url, "http://www.example.com/2")
self.assertEqual(r4.body, '')
self.assertEqual(r4.meta, {})
assert r4.dont_filter is False
def test_method_always_str(self):
r = self.request_class("http://www.example.com", method=u"POST")
assert isinstance(r.method, str)
def test_immutable_attributes(self):
r = self.request_class("http://example.com")
self.assertRaises(AttributeError, setattr, r, 'url', 'http://example2.com')
self.assertRaises(AttributeError, setattr, r, 'body', 'xxx')
class FormRequestTest(RequestTest):
request_class = FormRequest
def assertSortedEqual(self, first, second, msg=None):
return self.assertEqual(sorted(first), sorted(second), msg)
def test_empty_formdata(self):
r1 = self.request_class("http://www.example.com", formdata={})
self.assertEqual(r1.body, '')
def test_default_encoding(self):
# using default encoding (utf-8)
data = {'one': 'two', 'price': '\xc2\xa3 100'}
r2 = self.request_class("http://www.example.com", formdata=data)
self.assertEqual(r2.method, 'POST')
self.assertEqual(r2.encoding, 'utf-8')
self.assertSortedEqual(r2.body.split('&'),
'price=%C2%A3+100&one=two'.split('&'))
self.assertEqual(r2.headers['Content-Type'], 'application/x-www-form-urlencoded')
def test_custom_encoding(self):
data = {'price': u'\xa3 100'}
r3 = self.request_class("http://www.example.com", formdata=data, encoding='latin1')
self.assertEqual(r3.encoding, 'latin1')
self.assertEqual(r3.body, 'price=%A3+100')
def test_multi_key_values(self):
# using multiples values for a single key
data = {'price': u'\xa3 100', 'colours': ['red', 'blue', 'green']}
r3 = self.request_class("http://www.example.com", formdata=data)
self.assertSortedEqual(r3.body.split('&'),
'colours=red&colours=blue&colours=green&price=%C2%A3+100'.split('&'))
def test_from_response_post(self):
response = _buildresponse(
"""<form action="post.php" method="POST">
<input type="hidden" name="test" value="val1">
<input type="hidden" name="test" value="val2">
<input type="hidden" name="test2" value="xxx">
</form>""",
url="http://www.example.com/this/list.html")
req = self.request_class.from_response(response,
formdata={'one': ['two', 'three'], 'six': 'seven'})
self.assertEqual(req.method, 'POST')
self.assertEqual(req.headers['Content-type'], 'application/x-www-form-urlencoded')
self.assertEqual(req.url, "http://www.example.com/this/post.php")
fs = _qs(req)
self.assertEqual(set(fs["test"]), set(["val1", "val2"]))
self.assertEqual(set(fs["one"]), set(["two", "three"]))
self.assertEqual(fs['test2'], ['xxx'])
self.assertEqual(fs['six'], ['seven'])
def test_from_response_extra_headers(self):
response = _buildresponse(
"""<form action="post.php" method="POST">
<input type="hidden" name="test" value="val1">
<input type="hidden" name="test" value="val2">
<input type="hidden" name="test2" value="xxx">
</form>""")
req = self.request_class.from_response(response,
formdata={'one': ['two', 'three'], 'six': 'seven'},
headers={"Accept-Encoding": "gzip,deflate"})
self.assertEqual(req.method, 'POST')
self.assertEqual(req.headers['Content-type'], 'application/x-www-form-urlencoded')
self.assertEqual(req.headers['Accept-Encoding'], 'gzip,deflate')
def test_from_response_get(self):
response = _buildresponse(
"""<form action="get.php" method="GET">
<input type="hidden" name="test" value="val1">
<input type="hidden" name="test" value="val2">
<input type="hidden" name="test2" value="xxx">
</form>""",
url="http://www.example.com/this/list.html")
r1 = self.request_class.from_response(response,
formdata={'one': ['two', 'three'], 'six': 'seven'})
self.assertEqual(r1.method, 'GET')
self.assertEqual(urlparse(r1.url).hostname, "www.example.com")
self.assertEqual(urlparse(r1.url).path, "/this/get.php")
fs = _qs(r1)
self.assertEqual(set(fs['test']), set(['val1', 'val2']))
self.assertEqual(set(fs['one']), set(['two', 'three']))
self.assertEqual(fs['test2'], ['xxx'])
self.assertEqual(fs['six'], ['seven'])
def test_from_response_override_params(self):
response = _buildresponse(
"""<form action="get.php" method="POST">
<input type="hidden" name="one" value="1">
<input type="hidden" name="two" value="3">
</form>""")
req = self.request_class.from_response(response, formdata={'two': '2'})
fs = _qs(req)
self.assertEqual(fs['one'], ['1'])
self.assertEqual(fs['two'], ['2'])
def test_from_response_override_method(self):
response = _buildresponse(
'''<html><body>
<form action="/app"></form>
</body></html>''')
request = FormRequest.from_response(response)
self.assertEqual(request.method, 'GET')
request = FormRequest.from_response(response, method='POST')
self.assertEqual(request.method, 'POST')
def test_from_response_override_url(self):
response = _buildresponse(
'''<html><body>
<form action="/app"></form>
</body></html>''')
request = FormRequest.from_response(response)
self.assertEqual(request.url, 'http://example.com/app')
request = FormRequest.from_response(response, url='http://foo.bar/absolute')
self.assertEqual(request.url, 'http://foo.bar/absolute')
request = FormRequest.from_response(response, url='/relative')
self.assertEqual(request.url, 'http://example.com/relative')
def test_from_response_submit_first_clickable(self):
response = _buildresponse(
"""<form action="get.php" method="GET">
<input type="submit" name="clickable1" value="clicked1">
<input type="hidden" name="one" value="1">
<input type="hidden" name="two" value="3">
<input type="submit" name="clickable2" value="clicked2">
</form>""")
req = self.request_class.from_response(response, formdata={'two': '2'})
fs = _qs(req)
self.assertEqual(fs['clickable1'], ['clicked1'])
self.assertFalse('clickable2' in fs, fs)
self.assertEqual(fs['one'], ['1'])
self.assertEqual(fs['two'], ['2'])
def test_from_response_submit_not_first_clickable(self):
response = _buildresponse(
"""<form action="get.php" method="GET">
<input type="submit" name="clickable1" value="clicked1">
<input type="hidden" name="one" value="1">
<input type="hidden" name="two" value="3">
<input type="submit" name="clickable2" value="clicked2">
</form>""")
req = self.request_class.from_response(response, formdata={'two': '2'}, \
clickdata={'name': 'clickable2'})
fs = _qs(req)
self.assertEqual(fs['clickable2'], ['clicked2'])
self.assertFalse('clickable1' in fs, fs)
self.assertEqual(fs['one'], ['1'])
self.assertEqual(fs['two'], ['2'])
def test_from_response_dont_submit_image_as_input(self):
response = _buildresponse(
"""<form>
<input type="hidden" name="i1" value="i1v">
<input type="image" name="i2" src="http://my.image.org/1.jpg">
<input type="submit" name="i3" value="i3v">
</form>""")
req = self.request_class.from_response(response, dont_click=True)
fs = _qs(req)
self.assertEqual(fs, {'i1': ['i1v']})
def test_from_response_dont_submit_reset_as_input(self):
response = _buildresponse(
"""<form>
<input type="hidden" name="i1" value="i1v">
<input type="text" name="i2" value="i2v">
<input type="reset" name="resetme">
<input type="submit" name="i3" value="i3v">
</form>""")
req = self.request_class.from_response(response, dont_click=True)
fs = _qs(req)
self.assertEqual(fs, {'i1': ['i1v'], 'i2': ['i2v']})
def test_from_response_multiple_clickdata(self):
response = _buildresponse(
"""<form action="get.php" method="GET">
<input type="submit" name="clickable" value="clicked1">
<input type="submit" name="clickable" value="clicked2">
<input type="hidden" name="one" value="clicked1">
<input type="hidden" name="two" value="clicked2">
</form>""")
req = self.request_class.from_response(response, \
clickdata={'name': 'clickable', 'value': 'clicked2'})
fs = _qs(req)
self.assertEqual(fs['clickable'], ['clicked2'])
self.assertEqual(fs['one'], ['clicked1'])
self.assertEqual(fs['two'], ['clicked2'])
def test_from_response_unicode_clickdata(self):
response = _buildresponse(
u"""<form action="get.php" method="GET">
<input type="submit" name="price in \u00a3" value="\u00a3 1000">
<input type="submit" name="price in \u20ac" value="\u20ac 2000">
<input type="hidden" name="poundsign" value="\u00a3">
<input type="hidden" name="eurosign" value="\u20ac">
</form>""")
req = self.request_class.from_response(response, \
clickdata={'name': u'price in \u00a3'})
fs = _qs(req)
self.assertTrue(fs[u'price in \u00a3'.encode('utf-8')])
def test_from_response_multiple_forms_clickdata(self):
response = _buildresponse(
"""<form name="form1">
<input type="submit" name="clickable" value="clicked1">
<input type="hidden" name="field1" value="value1">
</form>
<form name="form2">
<input type="submit" name="clickable" value="clicked2">
<input type="hidden" name="field2" value="value2">
</form>
""")
req = self.request_class.from_response(response, formname='form2', \
clickdata={'name': 'clickable'})
fs = _qs(req)
self.assertEqual(fs['clickable'], ['clicked2'])
self.assertEqual(fs['field2'], ['value2'])
self.assertFalse('field1' in fs, fs)
def test_from_response_override_clickable(self):
response = _buildresponse('''<form><input type="submit" name="clickme" value="one"> </form>''')
req = self.request_class.from_response(response, \
formdata={'clickme': 'two'}, clickdata={'name': 'clickme'})
fs = _qs(req)
self.assertEqual(fs['clickme'], ['two'])
def test_from_response_dont_click(self):
response = _buildresponse(
"""<form action="get.php" method="GET">
<input type="submit" name="clickable1" value="clicked1">
<input type="hidden" name="one" value="1">
<input type="hidden" name="two" value="3">
<input type="submit" name="clickable2" value="clicked2">
</form>""")
r1 = self.request_class.from_response(response, dont_click=True)
fs = _qs(r1)
self.assertFalse('clickable1' in fs, fs)
self.assertFalse('clickable2' in fs, fs)
def test_from_response_ambiguous_clickdata(self):
response = _buildresponse(
"""
<form action="get.php" method="GET">
<input type="submit" name="clickable1" value="clicked1">
<input type="hidden" name="one" value="1">
<input type="hidden" name="two" value="3">
<input type="submit" name="clickable2" value="clicked2">
</form>""")
self.assertRaises(ValueError, self.request_class.from_response,
response, clickdata={'type': 'submit'})
def test_from_response_non_matching_clickdata(self):
response = _buildresponse(
"""<form>
<input type="submit" name="clickable" value="clicked">
</form>""")
self.assertRaises(ValueError, self.request_class.from_response,
response, clickdata={'nonexistent': 'notme'})
def test_from_response_nr_index_clickdata(self):
response = _buildresponse(
"""<form>
<input type="submit" name="clickable1" value="clicked1">
<input type="submit" name="clickable2" value="clicked2">
</form>
""")
req = self.request_class.from_response(response, clickdata={'nr': 1})
fs = _qs(req)
self.assertIn('clickable2', fs)
self.assertNotIn('clickable1', fs)
def test_from_response_invalid_nr_index_clickdata(self):
response = _buildresponse(
"""<form>
<input type="submit" name="clickable" value="clicked">
</form>
""")
self.assertRaises(ValueError, self.request_class.from_response,
response, clickdata={'nr': 1})
def test_from_response_errors_noform(self):
response = _buildresponse("""<html></html>""")
self.assertRaises(ValueError, self.request_class.from_response, response)
def test_from_response_invalid_html5(self):
response = _buildresponse("""<!DOCTYPE html><body></html><form>"""
"""<input type="text" name="foo" value="xxx">"""
"""</form></body></html>""")
req = self.request_class.from_response(response, formdata={'bar': 'buz'})
fs = _qs(req)
self.assertEqual(fs, {'foo': ['xxx'], 'bar': ['buz']})
def test_from_response_errors_formnumber(self):
response = _buildresponse(
"""<form action="get.php" method="GET">
<input type="hidden" name="test" value="val1">
<input type="hidden" name="test" value="val2">
<input type="hidden" name="test2" value="xxx">
</form>""")
self.assertRaises(IndexError, self.request_class.from_response, response, formnumber=1)
def test_from_response_noformname(self):
response = _buildresponse(
"""<form action="post.php" method="POST">
<input type="hidden" name="one" value="1">
<input type="hidden" name="two" value="2">
</form>""")
r1 = self.request_class.from_response(response, formdata={'two':'3'})
self.assertEqual(r1.method, 'POST')
self.assertEqual(r1.headers['Content-type'], 'application/x-www-form-urlencoded')
fs = _qs(r1)
self.assertEqual(fs, {'one': ['1'], 'two': ['3']})
def test_from_response_formname_exists(self):
response = _buildresponse(
"""<form action="post.php" method="POST">
<input type="hidden" name="one" value="1">
<input type="hidden" name="two" value="2">
</form>
<form name="form2" action="post.php" method="POST">
<input type="hidden" name="three" value="3">
<input type="hidden" name="four" value="4">
</form>""")
r1 = self.request_class.from_response(response, formname="form2")
self.assertEqual(r1.method, 'POST')
fs = _qs(r1)
self.assertEqual(fs, {'four': ['4'], 'three': ['3']})
def test_from_response_formname_notexist(self):
response = _buildresponse(
"""<form name="form1" action="post.php" method="POST">
<input type="hidden" name="one" value="1">
</form>
<form name="form2" action="post.php" method="POST">
<input type="hidden" name="two" value="2">
</form>""")
r1 = self.request_class.from_response(response, formname="form3")
self.assertEqual(r1.method, 'POST')
fs = _qs(r1)
self.assertEqual(fs, {'one': ['1']})
def test_from_response_formname_errors_formnumber(self):
response = _buildresponse(
"""<form name="form1" action="post.php" method="POST">
<input type="hidden" name="one" value="1">
</form>
<form name="form2" action="post.php" method="POST">
<input type="hidden" name="two" value="2">
</form>""")
self.assertRaises(IndexError, self.request_class.from_response, \
response, formname="form3", formnumber=2)
def test_from_response_select(self):
res = _buildresponse(
'''<form>
<select name="i1">
<option value="i1v1">option 1</option>
<option value="i1v2" selected>option 2</option>
</select>
<select name="i2">
<option value="i2v1">option 1</option>
<option value="i2v2">option 2</option>
</select>
<select>
<option value="i3v1">option 1</option>
<option value="i3v2">option 2</option>
</select>
<select name="i4" multiple>
<option value="i4v1">option 1</option>
<option value="i4v2" selected>option 2</option>
<option value="i4v3" selected>option 3</option>
</select>
<select name="i5" multiple>
<option value="i5v1">option 1</option>
<option value="i5v2">option 2</option>
</select>
<select name="i6"></select>
<select name="i7"/>
</form>''')
req = self.request_class.from_response(res)
fs = _qs(req)
self.assertEqual(fs, {'i1': ['i1v2'], 'i2': ['i2v1'], 'i4': ['i4v2', 'i4v3']})
def test_from_response_radio(self):
res = _buildresponse(
'''<form>
<input type="radio" name="i1" value="i1v1">
<input type="radio" name="i1" value="iv2" checked>
<input type="radio" name="i2" checked>
<input type="radio" name="i2">
<input type="radio" name="i3" value="i3v1">
<input type="radio" name="i3">
<input type="radio" value="i4v1">
<input type="radio">
</form>''')
req = self.request_class.from_response(res)
fs = _qs(req)
self.assertEqual(fs, {'i1': ['iv2'], 'i2': ['on']})
def test_from_response_checkbox(self):
res = _buildresponse(
'''<form>
<input type="checkbox" name="i1" value="i1v1">
<input type="checkbox" name="i1" value="iv2" checked>
<input type="checkbox" name="i2" checked>
<input type="checkbox" name="i2">
<input type="checkbox" name="i3" value="i3v1">
<input type="checkbox" name="i3">
<input type="checkbox" value="i4v1">
<input type="checkbox">
</form>''')
req = self.request_class.from_response(res)
fs = _qs(req)
self.assertEqual(fs, {'i1': ['iv2'], 'i2': ['on']})
def test_from_response_input_text(self):
res = _buildresponse(
'''<form>
<input type="text" name="i1" value="i1v1">
<input type="text" name="i2">
<input type="text" value="i3v1">
<input type="text">
</form>''')
req = self.request_class.from_response(res)
fs = _qs(req)
self.assertEqual(fs, {'i1': ['i1v1'], 'i2': ['']})
def test_from_response_input_hidden(self):
res = _buildresponse(
'''<form>
<input type="hidden" name="i1" value="i1v1">
<input type="hidden" name="i2">
<input type="hidden" value="i3v1">
<input type="hidden">
</form>''')
req = self.request_class.from_response(res)
fs = _qs(req)
self.assertEqual(fs, {'i1': ['i1v1'], 'i2': ['']})
def test_from_response_input_textarea(self):
res = _buildresponse(
'''<form>
<textarea name="i1">i1v</textarea>
<textarea name="i2"></textarea>
<textarea name="i3"/>
<textarea>i4v</textarea>
</form>''')
req = self.request_class.from_response(res)
fs = _qs(req)
self.assertEqual(fs, {'i1': ['i1v'], 'i2': [''], 'i3': ['']})
def test_from_response_descendants(self):
res = _buildresponse(
'''<form>
<div>
<fieldset>
<input type="text" name="i1">
<select name="i2">
<option value="v1" selected>
</select>
</fieldset>
<input type="radio" name="i3" value="i3v2" checked>
<input type="checkbox" name="i4" value="i4v2" checked>
<textarea name="i5"></textarea>
<input type="hidden" name="h1" value="h1v">
</div>
<input type="hidden" name="h2" value="h2v">
</form>''')
req = self.request_class.from_response(res)
fs = _qs(req)
self.assertEqual(set(fs), set(['h2', 'i2', 'i1', 'i3', 'h1', 'i5', 'i4']))
def test_from_response_xpath(self):
response = _buildresponse(
"""<form action="post.php" method="POST">
<input type="hidden" name="one" value="1">
<input type="hidden" name="two" value="2">
</form>
<form action="post2.php" method="POST">
<input type="hidden" name="three" value="3">
<input type="hidden" name="four" value="4">
</form>""")
r1 = self.request_class.from_response(response, formxpath="//form[@action='post.php']")
fs = _qs(r1)
self.assertEqual(fs['one'], ['1'])
r1 = self.request_class.from_response(response, formxpath="//form/input[@name='four']")
fs = _qs(r1)
self.assertEqual(fs['three'], ['3'])
self.assertRaises(ValueError, self.request_class.from_response,
response, formxpath="//form/input[@name='abc']")
def _buildresponse(body, **kwargs):
kwargs.setdefault('body', body)
kwargs.setdefault('url', 'http://example.com')
kwargs.setdefault('encoding', 'utf-8')
return HtmlResponse(**kwargs)
def _qs(req):
if req.method == 'POST':
qs = req.body
else:
qs = req.url.partition('?')[2]
return cgi.parse_qs(qs, True)
class XmlRpcRequestTest(RequestTest):
request_class = XmlRpcRequest
default_method = 'POST'
default_headers = {'Content-Type': ['text/xml']}
def _test_request(self, **kwargs):
r = self.request_class('http://scrapytest.org/rpc2', **kwargs)
self.assertEqual(r.headers['Content-Type'], 'text/xml')
self.assertEqual(r.body, xmlrpclib.dumps(**kwargs))
self.assertEqual(r.method, 'POST')
self.assertEqual(r.encoding, kwargs.get('encoding', 'utf-8'))
self.assertTrue(r.dont_filter, True)
def test_xmlrpc_dumps(self):
self._test_request(params=('value',))
self._test_request(params=('username', 'password'), methodname='login')
self._test_request(params=('response', ), methodresponse='login')
self._test_request(params=(u'pas\xa3',), encoding='utf-8')
self._test_request(params=(u'pas\xa3',), encoding='latin')
self._test_request(params=(None,), allow_none=1)
self.assertRaises(TypeError, self._test_request)
self.assertRaises(TypeError, self._test_request, params=(None,))
if __name__ == "__main__":
unittest.main()
| bsd-3-clause | 3,537,051,486,304,000,000 | 41.706704 | 105 | 0.566028 | false |
scottpurdy/nupic | examples/opf/simple_server/model_params.py | 10 | 9318 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2015, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
MODEL_PARAMS = {
# Type of model that the rest of these parameters apply to.
'model': "HTMPrediction",
# Version that specifies the format of the config.
'version': 1,
# Intermediate variables used to compute fields in modelParams and also
# referenced from the control section.
'aggregationInfo': {'days': 0,
'fields': [('consumption', 'sum')],
'hours': 1,
'microseconds': 0,
'milliseconds': 0,
'minutes': 0,
'months': 0,
'seconds': 0,
'weeks': 0,
'years': 0},
'predictAheadTime': None,
# Model parameter dictionary.
'modelParams': {
# The type of inference that this model will perform
'inferenceType': 'TemporalAnomaly',
'sensorParams': {
# Sensor diagnostic output verbosity control;
# if > 0: sensor region will print out on screen what it's sensing
# at each step 0: silent; >=1: some info; >=2: more info;
# >=3: even more info (see compute() in py/regions/RecordSensor.py)
'verbosity' : 0,
# Include the encoders we use
'encoders': {
u'consumption': {
'fieldname': u'consumption',
'resolution': 0.88,
'seed': 1,
'name': u'consumption',
'type': 'RandomDistributedScalarEncoder',
},
'timestamp_timeOfDay': { 'fieldname': u'timestamp',
'name': u'timestamp_timeOfDay',
'timeOfDay': (21, 1),
'type': 'DateEncoder'},
'timestamp_weekend': { 'fieldname': u'timestamp',
'name': u'timestamp_weekend',
'type': 'DateEncoder',
'weekend': 21}
},
# A dictionary specifying the period for automatically-generated
# resets from a RecordSensor;
#
# None = disable automatically-generated resets (also disabled if
# all of the specified values evaluate to 0).
# Valid keys is the desired combination of the following:
# days, hours, minutes, seconds, milliseconds, microseconds, weeks
#
# Example for 1.5 days: sensorAutoReset = dict(days=1,hours=12),
#
# (value generated from SENSOR_AUTO_RESET)
'sensorAutoReset' : None,
},
'spEnable': True,
'spParams': {
# SP diagnostic output verbosity control;
# 0: silent; >=1: some info; >=2: more info;
'spVerbosity' : 0,
# Spatial Pooler implementation selector.
# Options: 'py', 'cpp' (speed optimized, new)
'spatialImp' : 'cpp',
'globalInhibition': 1,
# Number of cell columns in the cortical region (same number for
# SP and TM)
# (see also tpNCellsPerCol)
'columnCount': 2048,
'inputWidth': 0,
# SP inhibition control (absolute value);
# Maximum number of active columns in the SP region's output (when
# there are more, the weaker ones are suppressed)
'numActiveColumnsPerInhArea': 40,
'seed': 1956,
# potentialPct
# What percent of the columns's receptive field is available
# for potential synapses.
'potentialPct': 0.85,
# The default connected threshold. Any synapse whose
# permanence value is above the connected threshold is
# a "connected synapse", meaning it can contribute to the
# cell's firing. Typical value is 0.10.
'synPermConnected': 0.1,
'synPermActiveInc': 0.04,
'synPermInactiveDec': 0.005,
},
# Controls whether TM is enabled or disabled;
# TM is necessary for making temporal predictions, such as predicting
# the next inputs. Without TM, the model is only capable of
# reconstructing missing sensor inputs (via SP).
'tmEnable' : True,
'tmParams': {
# TM diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
# (see verbosity in nupic/trunk/py/nupic/research/backtracking_tm.py and backtracking_tm_cpp.py)
'verbosity': 0,
# Number of cell columns in the cortical region (same number for
# SP and TM)
# (see also tpNCellsPerCol)
'columnCount': 2048,
# The number of cells (i.e., states), allocated per column.
'cellsPerColumn': 32,
'inputWidth': 2048,
'seed': 1960,
# Temporal Pooler implementation selector (see _getTPClass in
# CLARegion.py).
'temporalImp': 'cpp',
# New Synapse formation count
# NOTE: If None, use spNumActivePerInhArea
#
# TODO: need better explanation
'newSynapseCount': 20,
# Maximum number of synapses per segment
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TM
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSynapsesPerSegment': 32,
# Maximum number of segments per cell
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TM
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSegmentsPerCell': 128,
# Initial Permanence
# TODO: need better explanation
'initialPerm': 0.21,
# Permanence Increment
'permanenceInc': 0.1,
# Permanence Decrement
# If set to None, will automatically default to tpPermanenceInc
# value.
'permanenceDec' : 0.1,
'globalDecay': 0.0,
'maxAge': 0,
# Minimum number of active synapses for a segment to be considered
# during search for the best-matching segments.
# None=use default
# Replaces: tpMinThreshold
'minThreshold': 12,
# Segment activation threshold.
# A segment is active if it has >= tpSegmentActivationThreshold
# connected synapses that are active due to infActiveState
# None=use default
# Replaces: tpActivationThreshold
'activationThreshold': 16,
'outputType': 'normal',
# "Pay Attention Mode" length. This tells the TM how many new
# elements to append to the end of a learned sequence at a time.
# Smaller values are better for datasets with short sequences,
# higher values are better for datasets with long sequences.
'pamLength': 1,
},
'clParams': {
'regionName' : 'SDRClassifierRegion',
# Classifier diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
'verbosity' : 0,
# This controls how fast the classifier learns/forgets. Higher values
# make it adapt faster and forget older patterns faster.
'alpha': 0.0001,
# This is set after the call to updateConfigFromSubConfig and is
# computed from the aggregationInfo and predictAheadTime.
'steps': '1,5',
'implementation': 'py',
},
'anomalyParams': { u'anomalyCacheRecords': None,
u'autoDetectThreshold': None,
u'autoDetectWaitRecords': 2184},
'trainSPNetOnlyIfRequested': False,
},
}
| agpl-3.0 | -893,157,681,630,810,600 | 36.724696 | 108 | 0.539493 | false |
mrjaydee82/SinLessKernel-4.4.4 | toolchains/linaro-arm-eabi-4.10-master/share/gdb/system-gdbinit/elinos.py | 134 | 3080 | # Copyright (C) 2011-2014 Free Software Foundation, Inc.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Configure GDB using the ELinOS environment."""
import os
import glob
import gdb
def warn(msg):
print "warning: %s" % msg
def get_elinos_environment():
"""Return the ELinOS environment.
If the ELinOS environment is properly set up, return a dictionary
which contains:
* The path to the ELinOS project at key 'project';
* The path to the ELinOS CDK at key 'cdk';
* The ELinOS target name at key 'target' (Eg. 'i486-linux');
* A list of Xenomai install prefixes (which could be empty, if
the ELinOS project does not include Xenomai) at key 'xenomai'.
If one of these cannot be found, print a warning; the corresponding
value in the returned dictionary will be None.
"""
result = {}
for key in ("project", "cdk", "target"):
var = "ELINOS_" + key.upper()
if var in os.environ:
result[key] = os.environ[var]
else:
warn("%s not set" % var)
result[key] = None
if result["project"] is not None:
result["xenomai"] = glob.glob(result["project"] + "/xenomai-[0-9.]*")
else:
result["xenomai"] = []
return result
def elinos_init():
"""Initialize debugger environment for ELinOS.
Let the debugger know where to find the ELinOS libraries on host. This
assumes that an ELinOS environment is properly set up. If some environment
variables are missing, warn about which library may be missing.
"""
elinos_env = get_elinos_environment()
solib_dirs = []
# System libraries
if None in (elinos_env[key] for key in ("cdk", "target")):
warn("ELinOS system libraries will not be loaded")
else:
solib_prefix = "%s/%s" % (elinos_env["cdk"], elinos_env["target"])
solib_dirs += ["%s/%s" % (solib_prefix, "lib")]
gdb.execute("set solib-absolute-prefix %s" % solib_prefix)
# Xenomai libraries. Those are optional, so have a lighter warning
# if they cannot be located.
if elinos_env["project"] is None:
warn("Xenomai libraries may not be loaded")
else:
for dir in elinos_env['xenomai']:
solib_dirs += ["%s/%s"
% (dir, "xenomai-build/usr/realtime/lib")]
if len(solib_dirs) != 0:
gdb.execute("set solib-search-path %s" % ":".join(solib_dirs))
if __name__ == "__main__":
elinos_init()
| gpl-2.0 | -1,691,516,481,179,178,200 | 32.846154 | 78 | 0.644156 | false |
amnona/heatsequer | heatsequer/experiment/expclass.py | 1 | 28528 | #!/usr/bin/env python
"""
heatsequer experiment class
"""
# amnonscript
__version__ = "0.9"
import heatsequer as hs
import os
import copy
import numpy as np
from pdb import set_trace as XXX
import time
import collections
class Experiment:
'''
experiment class holds the read data and metadata about the experiment, as well
as the command history
'''
# the static unique experiment id
experimentid=0
def __init__(self):
# the data matrix (non sparse)
self.data=[]
# True is data is sparse, False is data is not sparse
self.sparse=False
# the sample dictionary (double hash - sampleid and then mapping file field)
self.smap={}
# name of all the fields in the mapping data
self.fields=[]
# list of sampleids ordered according to the data matrix
self.samples=[]
# the sequences in the table
self.seqs=[]
# dictionary holding all the sequences and their position (for fast lookup)
self.seqdict={}
# taxonomies
self.tax=[]
# the hashed ids for the sequences
self.sids=[]
# the original name for each otu (from the biom table)
self.origotunames=[]
# original table name
self.tablefilename=''
# original mapping file
self.mapfilename=''
# name of the study (or the table file name without path)
self.studyname=''
# number of reads for each sample in the biom table
self.origreads=[]
# the original scaling factor used to convert the reads. how many original reads each normalized unit is equal to
# (i.e. if we load an experiment with 1k reads and normalize to 10k, scaling factor is 10)
self.scalingfactor=None
# the history of actions performed
self.filters=[]
# and the command list
self.commands=[]
# the complete sequence database
self.seqdb=None
# the cool sequences (manually curated) database
self.cdb=None
# the list of annotations to add to plot (for addplotmetadata)
self.plotmetadata=[]
# list of positions for horizontal lines (for diffexp etc.)
self.hlines=[]
# the tree structure of the sequences (from loadexptree)
self.tree=False
# the experiment type ('biom' or 'meta' for metabolite)
self.datatype=''
# the unqiue experiment id
self.uniqueid=0
# the md5 of the original data and mapping files loaded
# used for a unique id for the data in the manual curation database
self.datamd5=''
self.mapmd5=''
# both can be set via hs.getexpannotations()
# the list of annotations per sequence (key)
self.seqannotations=None
# the list of sequences per annotation (key)
self.annotationseqs=None
hs.Debug(0,'New experiment initialized')
# get a unique identifier and increase by 1
def getexperimentid(self):
Experiment.experimentid+=1
return Experiment.experimentid
def plotexp(self,**kwargs):
hs.plotexp(self,**kwargs)
def copyexp(expdat,todense=False):
"""
copy an experiment (duplicating the important fields)
but give it a unique identifier
Parameters
----------
expdat : Experiment
the experiment to copy
todense : bool (optional)
False (default) to not convert to dense, True to convert to dense
output:
newexp : Experiment
a deep copy of expdat
"""
newexp=copy.copy(expdat)
if todense:
newexp.data=expdat.data.todense()
newexp.sparse=False
else:
newexp.data=copy.deepcopy(expdat.data)
newexp.smap=copy.deepcopy(expdat.smap)
newexp.fields=copy.deepcopy(expdat.fields)
newexp.samples=copy.deepcopy(expdat.samples)
newexp.seqs=copy.deepcopy(expdat.seqs)
newexp.seqdict=copy.deepcopy(expdat.seqdict)
newexp.tax=copy.deepcopy(expdat.tax)
newexp.sids=copy.deepcopy(expdat.sids)
newexp.origotunames=copy.deepcopy(expdat.origotunames)
newexp.tablefilename=copy.deepcopy(expdat.tablefilename)
newexp.mapfilename=copy.deepcopy(expdat.mapfilename)
newexp.studyname=copy.deepcopy(expdat.studyname)
newexp.origreads=copy.deepcopy(expdat.origreads)
newexp.scalingfactor=copy.deepcopy(expdat.scalingfactor)
newexp.filters=copy.deepcopy(expdat.filters)
newexp.commands=copy.deepcopy(expdat.commands)
newexp.plotmetadata=copy.deepcopy(expdat.plotmetadata)
# nmewexp.tree=copy.deepcopy(expdat.tree)
newexp.datatype=copy.deepcopy(expdat.datatype)
newexp.hlines=copy.deepcopy(expdat.hlines)
newexp.seqannotations=copy.deepcopy(expdat.seqannotations)
newexp.annotationseqs=copy.deepcopy(expdat.annotationseqs)
# get a unique identifier for this experiment
newexp.uniqueid=newexp.getexperimentid()
return newexp
def hashseq(seq):
'''
calculate the hash value for a given sequence (for an almost unique sequence identifier)
used for the sid field in experiment
input:
seq : str
the sequence to hash
output:
hval : int
the hash value for the sequence
'''
hval=hs.mlhash(seq, emod=10000000)
return hval
def addcommand(expdat,command,params={},replaceparams={}):
'''
append a command string to the experiment command list from the command and the unique experiment id
"expXXX=command" where XXX is the unique experiment id
if params is supplied, use them as the function parameters, otherwise just use command
input:
expdat : experiment
the experiment for which to prepare the command
command : str
the command
params : dict
if empty, just append the command
if dict, append command+"("+params+")"
replaceparams : dict
a dict of parameters whos values need to be replaced by an experimentid.
key is parameter, value is experiment, from where the experimentid will be taken
'''
newcommand='exp%d=hs.%s' % (expdat.uniqueid,command)
if len(params)>0:
# if replaceparams:
# for rk,rv in replaceparams.items():
# if rk not in params:
# hs.Debug(9,'replacement parameter %s not in params' % rk)
# params[rk]='exp%d' % rv.uniqueid
newcommand+='('
for k,v in params.items():
if k in replaceparams:
v='exp%d' % v.uniqueid
else:
v=repr(v)
newcommand+='%s=%s,' % (k,str(v))
newcommand=newcommand[:-1]+')'
expdat.commands.append(newcommand)
def reordersamples(exp,newpos,inplace=False):
"""
reorder the samples of the experiment
input:
exp - the experiment
newpos - array - the new positions (can skip positions to delete them)
output:
newexp - the new experiment
"""
if inplace:
newexp=exp
else:
newexp=copyexp(exp)
# newexp=copy.deepcopy(exp)
newexp.data=newexp.data[:,newpos]
newexp.samples=hs.reorder(newexp.samples,newpos)
newexp.origreads=hs.reorder(newexp.origreads,newpos)
if newexp.scalingfactor is not None:
newexp.scalingfactor=newexp.scalingfactor[newpos]
return newexp
def reorderbacteria(exp,order,inplace=False):
"""
reorder the bacteria in an experiment (can delete if bacteria not in new order)
input:
exp - the experiment
order - the new order
output:
newexp
"""
if inplace:
newexp=exp
else:
newexp=copyexp(exp)
# newexp=copy.deepcopy(exp)
newexp.data=newexp.data[order,:]
newexp.seqs=hs.reorder(newexp.seqs,order)
newexp.seqdict={}
for idx,cseq in enumerate(newexp.seqs):
newexp.seqdict[cseq]=idx
newexp.tax=hs.reorder(newexp.tax,order)
newexp.sids=hs.reorder(newexp.sids,order)
# filter the annotations if needed
if exp.seqannotations is not None:
seqannotations={}
annotationseqs=collections.defaultdict(list)
for cseq in newexp.seqs:
seqannotations[cseq]=newexp.seqannotations[cseq]
for cinfo in seqannotations[cseq]:
annotationseqs[cinfo].append(cseq)
newexp.seqannotations=seqannotations
newexp.annotationseqs=annotationseqs
return newexp
def getfieldvals(expdat,field,ounique=False):
"""
get a list of the field values in all samples
input:
expdat : Experiment
field : string
name of the field to get the values from
ounique : bool
True to get unique values, False to get all
"""
vals=[]
for cid in expdat.samples:
vals.append(expdat.smap[cid][field])
if ounique:
vals=list(set(vals))
return vals
def joinfields(expdat,field1,field2,newfield):
"""
join 2 fields to create a new field for each sample
input:
expdat : Experiment
field1,field2 : string
name of the 2 fields to join
newfield : string
name of new field to add
"""
params=locals()
for csamp in expdat.samples:
expdat.smap[csamp][newfield]=expdat.smap[csamp][field1]+';'+expdat.smap[csamp][field2]
expdat.fields.append(newfield)
expdat.filters.append("join fields %s, %s to new field %s" % (field1,field2,newfield))
hs.addcommand(expdat,"joinfields",params=params,replaceparams={'expdat':expdat})
return expdat
def joinexperiments(exp1,exp2,missingval='NA',origfieldname='origexp',addbefore=False):
"""
join 2 experiments into a new experiment. adding a new field origfieldname
input:
exp1,exp2 - the experiments to join
missingval - string to put when field not in mapping file of one of the experiments
origfieldname - name of the new field to add which contains the original experiment name
addbefore : bool (optional)
False (default) to add '-1'/'-2' after sampleid if similar ids in both experiments
True to add '-1'/'-2' after sampleid if similar ids in both experiments
"""
params=locals()
# test if same sampleid exists in both experiments. if so, add "-1" and "-2" to sampleid
samp1=set(exp1.samples)
samp2=set(exp2.samples)
if len(samp1.intersection(samp2))>0:
hs.Debug(6,'same sampleID - renaming samples')
exp1=hs.renamesamples(exp1,'-1',addbefore=addbefore)
exp2=hs.renamesamples(exp2,'-2',addbefore=addbefore)
# join the sequences of both experiments
# ASSUMING SAME SEQ LENGTH!!!!
allseqs=list(set(exp1.seqs) | set(exp2.seqs))
alldict={}
alltax=[]
allids=[]
for idx,cseq in enumerate(allseqs):
alldict[cseq]=idx
# make the new joined data for each experiment
dat1=np.zeros((len(allseqs),np.size(exp1.data,1)))
for idx,cseq in enumerate(allseqs):
if cseq in exp1.seqdict:
dat1[idx,:]=exp1.data[exp1.seqdict[cseq],:]
alltax.append(exp1.tax[exp1.seqdict[cseq]])
allids.append(exp1.sids[exp1.seqdict[cseq]])
else:
alltax.append(exp2.tax[exp2.seqdict[cseq]])
allids.append(exp2.sids[exp2.seqdict[cseq]])
dat2=np.zeros((len(allseqs),np.size(exp2.data,1)))
for idx,cseq in enumerate(allseqs):
if cseq in exp2.seqdict:
dat2[idx,:]=exp2.data[exp2.seqdict[cseq],:]
newexp=hs.copyexp(exp1)
# concatenate the reads
newexp.data=np.concatenate((dat1,dat2), axis=1)
newexp.seqdict=alldict
newexp.seqs=allseqs
newexp.tax=alltax
newexp.sids=allids
newexp.sids=newexp.seqs
newexp.samples = list(exp1.samples) + list(exp2.samples)
newexp.origreads=exp1.origreads+exp2.origreads
newexp.scalingfactor=np.hstack([exp1.scalingfactor,exp2.scalingfactor])
newexp.fields=list(set(exp1.fields+exp2.fields))
for cfield in newexp.fields:
if cfield in exp1.fields:
continue
for csamp in exp1.samples:
newexp.smap[csamp][cfield]=missingval
for csamp in exp2.samples:
newexp.smap[csamp]={}
for cfield in newexp.fields:
if cfield in exp2.fields:
newexp.smap[csamp][cfield]=exp2.smap[csamp][cfield]
else:
newexp.smap[csamp][cfield]=missingval
for csamp in exp1.samples:
if origfieldname in exp1.fields:
cname=exp1.smap[csamp][origfieldname]
else:
cname=exp1.studyname
newexp.smap[csamp][origfieldname]=cname
for csamp in exp2.samples:
if origfieldname in exp2.fields:
cname=exp2.smap[csamp][origfieldname]
else:
cname=exp2.studyname
newexp.smap[csamp][origfieldname]=cname
if origfieldname not in newexp.fields:
newexp.fields.append(origfieldname)
newexp.filters.append('joined with %s' % exp2.studyname)
hs.addcommand(newexp,"joinexperiments",params=params,replaceparams={'exp1':exp1,'exp2':exp2})
return newexp
def clipseqs(expdat,startpos,addseq='TAC'):
"""
clip the first nucleotides in all sequences in experiment
to fix offset in sequencing
input:
expdat
startpos - the position to start from (0 indexed) or negative to add nucleotides
addseq - the sequence to add (just a guess) if startpos is negative
output:
newexp - new experiment with all sequences clipped and joined identical sequences
"""
params=locals()
newexp=copy.deepcopy(expdat)
newseqs=[]
newdict={}
keeppos=[]
for idx,cseq in enumerate(newexp.seqs):
if startpos>=0:
cseq=cseq[startpos:]
else:
cseq=addseq[:abs(startpos)]+cseq
cseq=cseq[:len(expdat.seqs[0])]
if cseq in newdict:
newexp.data[newdict[cseq],:] += newexp.data[idx,:]
else:
newdict[cseq]=idx
newseqs.append(cseq)
keeppos.append(idx)
newexp=reorderbacteria(newexp,keeppos)
newexp.seqs=newseqs
newexp.seqdict=newdict
hs.addcommand(newexp,"clipseqs",params=params,replaceparams={'expdat':expdat})
newexp.filters.append("trim %d nucleotides" % startpos)
return newexp
def findsamples(expdat,field,value,exclude=False):
"""
return the positions of samples in expdat matching value in field
similar to filtersamples but returns a list of indices (for the data matrix)
input:
expdat
field - name of the field to test
value - the value to look for (or a list of values)
exclude - True to get positions without that value, False to get positions of the value
output:
pos - a list of positions matching the field/val (for use as indices in expdat.data)
"""
pos=[]
if not isinstance(value,list):
value=[value]
for cidx,csamp in enumerate(expdat.samples):
if expdat.smap[csamp][field] in value:
if not exclude:
pos.append(cidx)
else:
if exclude:
pos.append(cidx)
return pos
def zerobacteria(expdat,inplace=False):
"""
zero all the bacteria in an experiment (can then add insertbacteria)
input:
expdat : Experiment
inplace : bool
True to do inplace, False to make new copy
output:
newexp : Experiment
all bacteria have been removed
"""
if inplace:
newexp=expdat
else:
newexp=hs.copyexp(expdat)
newexp.data=np.zeros([0,len(newexp.samples)])
newexp.seqs=[]
newexp.tax=[]
newexp.seqdict={}
newexp.sids=[]
return newexp
def insertbacteria(expdat,freqs=[],seq="unknown",tax="unknown",logit=True):
"""
insert a new bacteria to an experiment
input:
expdat
freqs - the frequency of the bacteria in all samles of expdat or [] to add zeros
seq - the sequence of the new bacteria
tax - taxonomy of the new bacteria
logit - True to add command log/filter, False to not add (if called from other function)
output:
pos - position of the new bacteria
"""
params=locals()
if len(freqs)==0:
freqs=np.zeros([1,len(expdat.samples)])
expdat.data=np.vstack((expdat.data,freqs))
expdat.tax.append(tax)
if seq in expdat.seqdict:
hs.Debug(6,'Sequence already in experiment',seq)
# get a unique sequence
cid=0
while seq+str(cid) in expdat.seqdict:
cid+=1
# expdat.seqs.append()
seq=seq+str(cid)
expdat.seqs.append(seq)
expdat.seqdict[seq]=len(expdat.seqs)-1
expdat.sids.append(seq)
if logit:
expdat.filters.append("insert bacteria")
hs.addcommand(expdat,"insertbacteria",params=params,replaceparams={'expdat':expdat})
return expdat,len(expdat.seqs)-1
def addsubtrees(expdat,tree,inplace=False):
"""
add otus for all subtrees with the frequency being the sum of all bacteria in the subtree
input:
expdat - the experiment
tree - the tree for the experiment
inplace - if true, replace current experiment
output:
newexp - the new experiment with twice-1 number of otus
"""
params=locals()
# if not expdat.tree:
# hs.Debug(8,"No tree loaded for experiment")
# return False
if inplace:
newexp=expdat
else:
newexp=hs.copyexp(expdat)
subtrees=tree.subsets()
for csubtree in subtrees:
newname=""
newtax=""
numuse=0
newfreq=np.zeros([1,len(newexp.samples)])
for cbact in csubtree:
if cbact not in newexp.seqdict:
hs.Debug(4,'sequence not in seqdict',cbact)
continue
numuse+=1
cpos=newexp.seqdict[cbact]
newfreq+=newexp.data[cpos,:]
newname+='%d,' % cpos
if newtax=='':
newtax=newexp.tax[cpos]
else:
newtax=hs.common_start(newtax,newexp.tax[cpos])
# add only if we have 2 bacteria or more
if numuse>1:
if newname not in newexp.seqdict:
newexp,newpos=insertbacteria(newexp,freqs=newfreq,seq=newname,tax=newtax,logit=False)
newexp.filters.append("Add subtrees")
hs.addcommand(newexp,"addsubtrees",params=params,replaceparams={'expdat':expdat})
return(newexp)
def findseqsinexp(expdat,seqs):
"""
find sequences from seqs in expdat sequences and return the indices
input:
expdat
seqs - a list of sequences
output:
res - a list of indices where seqs are in expdat sequences
"""
res=[]
for cseq in seqs:
res.append(expdat.seqdict[cseq])
return res
# def samplemeanpervalue(expdat,field):
# """
# BETTER TO USE filtersimilarsamples!!!!
# create a new experiment, with 1 sample per value in field, containing the mean of all samples with that value
# input:
# expdat : Experiment
# field : string
# the field to use (i.e. 'ENV_MATTER')
# output:
# newexp : Experiment
# The new experiment with 1 sample per unique value of field
# """
# params=locals()
# uvals=hs.getfieldvals(expdat,field,ounique=True)
# vals=hs.getfieldvals(expdat,field,ounique=False)
# vdict=hs.listtodict(vals)
# nsamps=[]
# for cval in uvals:
# nsamps.append(vdict[cval][0])
# newexp=hs.reordersamples(expdat,nsamps)
# for idx,cval in enumerate(uvals):
# cdat=expdat.data[:,vdict[cval]]
# mv=np.mean(cdat,axis=1)
# newexp.data[:,idx]=mv
# newexp.filters.append('samplemeanpervalue for field %s' % field)
# hs.addcommand(newexp,"samplemeanpervalue",params=params,replaceparams={'expdat':expdat})
# return(newexp)
def convertdatefield(expdat,field,newfield,timeformat='%m/%d/%y %H:%M'):
"""
convert a field containing date/time to a numeric (seocds since epoch) field (create a new field for that)
input:
expdat : Experiment
the experiment to add the field to
field : string
name of the field containing the date/time format
newfield : string
name of the new field (with seconds since epoch)
timeformat : string
format of the date/time field (based on time format)
output:
newexp : Experiment
the experiment with the added time since epoch field
"""
params=locals()
newexp=hs.copyexp(expdat)
newexp.fields.append(newfield)
numfailed=0
for csamp in newexp.samples:
try:
ctime=time.mktime(time.strptime(newexp.smap[csamp][field],timeformat))
except:
ctime=0
numfailed+=1
newexp.smap[csamp][newfield]=str(ctime)
hs.Debug(6,'%d conversions failed' % numfailed)
newexp.filters.append('add time field %s (based on field %s)' % (newfield,field))
hs.addcommand(newexp,"convertdatefield",params=params,replaceparams={'expdat':expdat})
return(newexp)
def fieldtobact(expdat,field,bactname='',meanreads=1000,cutoff=0):
"""
convert values in a map file field to a new bacteria (to facilitate numeric analysis)
input:
expdat : Experiment
field : string
name of the field to convert
bactname : string
name of the new bacteria (empty to have similar to field name)
meanreads : int
the mean number of reads for the new field bacteria or None to not rescale
cutoff : int
the minimal value of the field per sample (otherwise replace with meanreads)
output:
newexp : Experiment
with added bacteria with the field vals as reads
"""
params=locals()
if len(bactname)==0:
bactname=field
fv=hs.getfieldvals(expdat,field)
vals=np.array(hs.tofloat(fv))
okpos=np.where(vals>=cutoff)[0]
badpos=np.where(vals<cutoff)[0]
if meanreads is not None:
scalefactor=np.mean(vals[okpos])
vals[okpos]=(vals[okpos]/scalefactor)*meanreads
vals[badpos]=meanreads
newexp=hs.copyexp(expdat)
hs.insertbacteria(newexp,vals,bactname,bactname,logit=False)
newexp.filters.append('add bacteria from map field %s' % field)
hs.addcommand(newexp,"fieldtobact",params=params,replaceparams={'expdat':expdat})
return(newexp)
def get_data_path(fn, subfolder='data'):
"""
Return path to filename ``fn`` in the data folder.
returns the joining of the heatsequerdir variable (set in __init__) and the subfolder and fn
"""
return os.path.join(hs.heatsequerdir,subfolder,fn)
def addmapfield(expdat,fieldname,defaultval='NA',inplace=False):
"""
add a new field to the mapping file
input:
expdat : Experiment
fieldname : str
name of the new field
defaultval : str
the value for all samples
inplace : bool
True to overwrite current experiment, False (default) to copy
output:
newexp : Experiment
with the new field added
"""
if inplace:
newexp=expdat
else:
newexp=hs.copyexp(expdat)
if fieldname in newexp.fields:
hs.Debug(8,'field %s already exists')
return newexp
newexp.fields.append(fieldname)
for csamp in newexp.samples:
newexp.smap[csamp][fieldname]=defaultval
return newexp
def changemapval(expdat,newfield,newval,oldfield,vals,inplace=False):
"""
change values of a field in the mapping file according to another field
input:
expdat : Experiment
newfield : name of the field to change the values in (from addmapfield?)
newval : the new value to put
oldfield : the field with the values to test
vals : a list of values, so newfield is set to newval only if the the value of oldfield is in the list
inplace : bool
True to overwrite current experiment, False (default) to copy
"""
if inplace:
newexp=expdat
else:
newexp=hs.copyexp(expdat)
for csamp in newexp.samples:
if newexp.smap[csamp][oldfield] in vals:
newexp.smap[csamp][newfield]=newval
return newexp
def getseqsamp(expdat,seq,samp,unnormalize=False):
"""
get the number of reads of a sequence/sample combination in the experiment
input:
expdat : ExpClass
the experiment
seq : str
the sequence to look for
samp : str
the sample name to look for
unnormalize : bool
False (default) to use normalized reads, True to un-normalize the result (to raw reads)
output:
reads : float
the number of reads of sequence seq in samples samp
"""
seqpos=expdat.seqdict[seq]
samppos=np.where(expdat.samples==samp)[0]
reads=expdat.data[seqpos,samppos]
if unnormalize:
reads=reads*expdat.origreads[samppos]/np.sum(expdat.data[:,samppos])
return reads
def addsample(expdat,sampleid,fieldvals={},missingval='NA',data=None):
"""
add a sample to the experiment
input:
expdat : Experiment
the experiment to add the sample to
sampleid : str
name of the sample
fieldvals : dict of (str: str)
dict (field: value) of mapping file field values
missingval : str
value to add for missing mapping file values
data : None of nparray
the reads per bacteria, or None to skip
output:
expdat : experiment
with the added sample
"""
hs.Debug(1,'Add sample %s to experiment' % sampleid)
if sampleid in expdat.samples:
hs.Debug('Sample %s already in experiment! aborting' % sampleid)
return expdat
# add the sample
expdat.samples.append(sampleid)
# and the mapping file values
expdat.smap[sampleid]={}
for cfield in expdat.fields:
if cfield in fieldvals:
expdat.smap[sampleid][cfield]=fieldvals[cfield]
else:
expdat.smap[sampleid][cfield]=missingval
if data is None:
data=np.zeros(np.shape(expdat.data)[0])
expdat.origreads.append(np.sum(data))
data=np.reshape(data,[len(data),1])
expdat.data=np.hstack([expdat.data,data])
return expdat
def taxtoseq(expdat,fixtax=False):
"""
put the taxonomy into the sequence field
input:
expdat : Experiment
fixtax: bool (optional)
False (default) to just copy, True to remove the k__ etc.
output:
newexp : Experiment
with seqs=taxonomies
"""
newexp=hs.copyexp(expdat)
newexp.seqs=newexp.tax
if fixtax:
newtax=[]
for ctax in newexp.tax:
cstr=''
cctax=ctax.split(';')
for clevel in range(7):
if len(cctax)>clevel:
cstr+=cctax[clevel][3:]
cstr+=';'
newtax.append(cstr)
newexp.seqs=newtax
newexp.seqdict={}
newseqs=[]
for idx,cseq in enumerate(newexp.seqs):
if cseq in newexp.seqdict:
hs.Debug(8,'found %s again' % cseq)
cseq=cseq+'-'+str(idx)
newseqs.append(cseq)
newexp.seqdict[cseq]=idx
newexp.seqs=newseqs
return(newexp)
def renamesamples(expdat,addstr,addbefore=True):
"""
rename all the samples in expdat by adding addbefore before or after the name of each sample
input:
expdat : Experiment
the experiment to change the sample names in
addstr : str
the string to add to each sampleid
addbefore : bool (optional)
True (default) to add addstr before each sampleid
False to add addstr after each sampleid
output:
newexp : Experiment
with new sample names
"""
newexp=hs.copyexp(expdat)
newids=[]
newmap={}
for csamp in newexp.samples:
if addbefore:
cnewid=addstr+csamp
else:
cnewid=csamp+addstr
newids.append(cnewid)
newmap[cnewid]={}
for ckey,cval in newexp.smap[csamp].items():
newmap[cnewid][ckey]=cval
newexp.samples=newids
newexp.smap=newmap
return newexp
def validateexp(expdat):
"""
test the validity of an experiment:
1. seqdict is correct
2. smap contains all the samples
3. smap fields are the same as fields
4. issparse is correct
5. taxonomy length is the same as the number of sequence
input:
expdat : Experiment
output:
isok : bool
True if experiment is validated, False if there is a problem
"""
# test the seqdict
# for idx,cseq in enumerate(expdat.seqs):
# if expdat.seqdict
def getheatsequerdir():
"""
Get the root directory of heatsequer
"""
return hs.heatsequerdir
def trimfieldnames(expdat,field,newfield,trimlen=6):
"""
trim experiment per sample field values to trimlen
input:
expdat: Experiment
field : str
name of the field to trim the values in
newfield : str
name of the field where to keep the trimmed values
trimlen : int
>0 : trim keeping first trimlen chars
<0 : trim keeping last -trimlen chars
output:
newexo : Experiment
with trimmed field values
"""
params=locals()
for csamp in expdat.samples:
cstr=expdat.smap[csamp][field]
if trimlen>0:
cstr=cstr[:trimlen]
else:
cstr=cstr[trimlen:]
expdat.smap[csamp][newfield]=cstr
expdat.fields.append(newfield)
expdat.filters.append('Trim field names field %s trimlen %d' % (field,trimlen))
hs.addcommand(expdat,"trimfieldnames",params=params,replaceparams={'expdat':expdat})
return expdat
def addfield(expdat,field,values):
"""
add a new field to the experiment and add the values to it
inplace
input:
expdat : experiment
field : str
name of the new field to add
values : list of str or str
the values to add. if str - put same value in all. if list - put in each sample the value
output:
expdat : experiment
with the new field added (NOTE: inplace)
"""
for idx,csamp in enumerate(expdat.samples):
if type(values)==str:
expdat.smap[csamp][field]=values
else:
expdat.smap[csamp][field]=values[idx]
expdat.fields.append(field)
def filtermapfields(expdat,fields=['#SampleID'],keep=True,inplace=False):
"""
filter fields from the experiment mapping data
input:
expdat : Experiment
fields : list of str
the list of the fields to keep/remove
keep : bool (optional)
True (default) to keep only the fields specified
False to remove the fields specified
inplace : bool (optional)
False (default) to create new experiment
True to replace in current experiment
output:
newexp : Experiment
with only the fields requested
"""
params=locals()
newsmap={}
newfields=set(expdat.fields)
if keep:
newfields=newfields.intersection(set(fields))
else:
newfields=newfields.difference(set(fields))
newfields.add('#SampleID')
for csamp in expdat.samples:
newsmap[csamp]={}
for cfield in newfields:
newsmap[csamp][cfield]=expdat.smap[csamp][cfield]
if inplace:
newexp=expdat
else:
newexp=hs.copyexp(expdat)
newexp.fields=list(newfields)
newexp.smap=newsmap
expdat.filters.append('filter map fields %s (keep=%s)' % (fields,keep))
hs.addcommand(expdat,"filtermapfields",params=params,replaceparams={'expdat':expdat})
return newexp
def expfromcalour(cexp):
'''
convert an experiment from calour to heatsequer
input:
cexp : calour experiment
'''
newexp=Experiment()
newexp.data=copy.copy(cexp.data).transpose()
newexp.samples=list(cexp.sample_metadata.index)
newexp.seqs=list(cexp.feature_metadata.index)
if 'taxonomy' in cexp.feature_metadata.columns:
newexp.tax=[';'.join(x) for x in cexp.feature_metadata['taxonomy']]
else:
newexp.tax=list(cexp.feature_metadata.index)
newexp.sids=list(cexp.feature_metadata.index)
newexp.origreads=np.sum(newexp.data,0)
newexp.fields=list(cexp.sample_metadata.columns)
for csamp in newexp.samples:
newexp.smap[csamp]={}
for cfield in newexp.fields:
newexp.smap[csamp][cfield]=cexp.sample_metadata.loc[csamp][cfield]
newexp.commands.append('From calour experiment')
newexp.commands.append(cexp.description)
return newexp
| bsd-3-clause | -7,964,009,669,535,403,000 | 25.439296 | 115 | 0.733805 | false |
j-carl/boto | boto/directconnect/exceptions.py | 148 | 1239 | # Copyright (c) 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
class DirectConnectClientException(Exception):
pass
class DirectConnectServerException(Exception):
pass
| mit | 255,873,484,612,716,740 | 41.724138 | 77 | 0.772397 | false |
maartenq/ansible | test/units/modules/source_control/test_gitlab_deploy_key.py | 12 | 8009 | # -*- coding: utf-8 -*-
# Copyright (c) 2018 Marcus Watkins <marwatk@marcuswatkins.net>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from ansible.compat.tests.mock import patch
from ansible.modules.source_control import gitlab_deploy_key
from ansible.module_utils._text import to_bytes
from ansible.module_utils import basic
import pytest
import json
from units.modules.utils import set_module_args
fake_server_state = [
{
"id": 1,
"title": "Public key",
"key": 'ssh-rsa long/+base64//+string==',
"created_at": "2013-10-02T10:12:29Z",
"can_push": False
},
]
class FakeReader:
def __init__(self, object):
self.content = json.dumps(object, sort_keys=True)
def read(self):
return self.content
class AnsibleExitJson(Exception):
"""Exception class to be raised by module.exit_json and caught by the test case"""
pass
class AnsibleFailJson(Exception):
"""Exception class to be raised by module.fail_json and caught by the test case"""
pass
def exit_json(*args, **kwargs):
"""function to patch over exit_json; package return data into an exception"""
if 'changed' not in kwargs:
kwargs['changed'] = False
raise AnsibleExitJson(kwargs)
def fail_json(*args, **kwargs):
"""function to patch over fail_json; package return data into an exception"""
kwargs['failed'] = True
raise AnsibleFailJson(kwargs)
@pytest.fixture
def fetch_url_mock(mocker):
return mocker.patch('ansible.module_utils.gitlab.fetch_url')
@pytest.fixture
def module_mock(mocker):
return mocker.patch.multiple(basic.AnsibleModule,
exit_json=exit_json,
fail_json=fail_json)
def test_access_token_output(capfd, fetch_url_mock, module_mock):
fetch_url_mock.return_value = [FakeReader(fake_server_state), {'status': 200}]
set_module_args({
'api_url': 'https://gitlab.example.com/api',
'access_token': 'test-access-token',
'project': '10',
'key': 'ssh-key foobar',
'title': 'a title',
'state': 'absent'
})
with pytest.raises(AnsibleExitJson) as result:
gitlab_deploy_key.main()
first_call = fetch_url_mock.call_args_list[0][1]
assert first_call['url'] == 'https://gitlab.example.com/api/v4/projects/10/deploy_keys'
assert first_call['headers']['Authorization'] == 'Bearer test-access-token'
assert 'Private-Token' not in first_call['headers']
assert first_call['method'] == 'GET'
def test_private_token_output(capfd, fetch_url_mock, module_mock):
fetch_url_mock.return_value = [FakeReader(fake_server_state), {'status': 200}]
set_module_args({
'api_url': 'https://gitlab.example.com/api',
'private_token': 'test-private-token',
'project': 'foo/bar',
'key': 'ssh-key foobar',
'title': 'a title',
'state': 'absent'
})
with pytest.raises(AnsibleExitJson) as result:
gitlab_deploy_key.main()
first_call = fetch_url_mock.call_args_list[0][1]
assert first_call['url'] == 'https://gitlab.example.com/api/v4/projects/foo%2Fbar/deploy_keys'
assert first_call['headers']['Private-Token'] == 'test-private-token'
assert 'Authorization' not in first_call['headers']
assert first_call['method'] == 'GET'
def test_bad_http_first_response(capfd, fetch_url_mock, module_mock):
fetch_url_mock.side_effect = [[FakeReader("Permission denied"), {'status': 403}], [FakeReader("Permission denied"), {'status': 403}]]
set_module_args({
'api_url': 'https://gitlab.example.com/api',
'access_token': 'test-access-token',
'project': '10',
'key': 'ssh-key foobar',
'title': 'a title',
'state': 'absent'
})
with pytest.raises(AnsibleFailJson):
gitlab_deploy_key.main()
def test_bad_http_second_response(capfd, fetch_url_mock, module_mock):
fetch_url_mock.side_effect = [[FakeReader(fake_server_state), {'status': 200}], [FakeReader("Permission denied"), {'status': 403}]]
set_module_args({
'api_url': 'https://gitlab.example.com/api',
'access_token': 'test-access-token',
'project': '10',
'key': 'ssh-key foobar',
'title': 'a title',
'state': 'present'
})
with pytest.raises(AnsibleFailJson):
gitlab_deploy_key.main()
def test_delete_non_existing(capfd, fetch_url_mock, module_mock):
fetch_url_mock.return_value = [FakeReader(fake_server_state), {'status': 200}]
set_module_args({
'api_url': 'https://gitlab.example.com/api',
'access_token': 'test-access-token',
'project': '10',
'key': 'ssh-key foobar',
'title': 'a title',
'state': 'absent'
})
with pytest.raises(AnsibleExitJson) as result:
gitlab_deploy_key.main()
assert result.value.args[0]['changed'] is False
def test_delete_existing(capfd, fetch_url_mock, module_mock):
fetch_url_mock.return_value = [FakeReader(fake_server_state), {'status': 200}]
set_module_args({
'api_url': 'https://gitlab.example.com/api',
'access_token': 'test-access-token',
'project': '10',
'key': 'ssh-rsa long/+base64//+string==',
'title': 'a title',
'state': 'absent'
})
with pytest.raises(AnsibleExitJson) as result:
gitlab_deploy_key.main()
second_call = fetch_url_mock.call_args_list[1][1]
assert second_call['url'] == 'https://gitlab.example.com/api/v4/projects/10/deploy_keys/1'
assert second_call['method'] == 'DELETE'
assert result.value.args[0]['changed'] is True
def test_add_new(capfd, fetch_url_mock, module_mock):
fetch_url_mock.return_value = [FakeReader(fake_server_state), {'status': 200}]
set_module_args({
'api_url': 'https://gitlab.example.com/api',
'access_token': 'test-access-token',
'project': '10',
'key': 'ssh-key foobar',
'title': 'a title',
'state': 'present'
})
with pytest.raises(AnsibleExitJson) as result:
gitlab_deploy_key.main()
second_call = fetch_url_mock.call_args_list[1][1]
assert second_call['url'] == 'https://gitlab.example.com/api/v4/projects/10/deploy_keys'
assert second_call['method'] == 'POST'
assert second_call['data'] == '{"can_push": false, "key": "ssh-key foobar", "title": "a title"}'
assert result.value.args[0]['changed'] is True
def test_update_existing(capfd, fetch_url_mock, module_mock):
fetch_url_mock.return_value = [FakeReader(fake_server_state), {'status': 200}]
set_module_args({
'api_url': 'https://gitlab.example.com/api',
'access_token': 'test-access-token',
'project': '10',
'title': 'Public key',
'key': 'ssh-rsa long/+base64//+string==',
'can_push': 'yes',
'state': 'present'
})
with pytest.raises(AnsibleExitJson) as result:
gitlab_deploy_key.main()
second_call = fetch_url_mock.call_args_list[1][1]
assert second_call['url'] == 'https://gitlab.example.com/api/v4/projects/10/deploy_keys/1'
assert second_call['method'] == 'PUT'
assert second_call['data'] == ('{"can_push": true, "key": "ssh-rsa long/+base64//+string==", "title": "Public key"}')
assert result.value.args[0]['changed'] is True
def test_unchanged_existing(capfd, fetch_url_mock, module_mock):
fetch_url_mock.return_value = [FakeReader(fake_server_state), {'status': 200}]
set_module_args({
'api_url': 'https://gitlab.example.com/api',
'access_token': 'test-access-token',
'project': '10',
'title': 'Public key',
'key': 'ssh-rsa long/+base64//+string==',
'can_push': 'no',
'state': 'present'
})
with pytest.raises(AnsibleExitJson) as result:
gitlab_deploy_key.main()
assert result.value.args[0]['changed'] is False
assert fetch_url_mock.call_count == 1
| gpl-3.0 | -9,096,318,722,550,040,000 | 33.373391 | 137 | 0.621925 | false |
chainer/chainer | chainer/testing/helper.py | 6 | 3610 | import contextlib
import sys
import unittest
import warnings
import pkg_resources
try:
import mock
_mock_error = None
except ImportError as e:
_mock_error = e
def _check_mock_available():
if _mock_error is not None:
raise RuntimeError(
'mock is not available: Reason: {}'.format(_mock_error))
def with_requires(*requirements):
"""Run a test case only when given requirements are satisfied.
.. admonition:: Example
This test case runs only when `numpy>=1.10` is installed.
>>> import unittest
>>> from chainer import testing
>>> class Test(unittest.TestCase):
... @testing.with_requires('numpy>=1.10')
... def test_for_numpy_1_10(self):
... pass
Args:
requirements: A list of string representing requirement condition to
run a given test case.
"""
ws = pkg_resources.WorkingSet()
try:
ws.require(*requirements)
skip = False
except pkg_resources.ResolutionError:
skip = True
msg = 'requires: {}'.format(','.join(requirements))
return unittest.skipIf(skip, msg)
def without_requires(*requirements):
"""Run a test case only when given requirements are not satisfied.
.. admonition:: Example
This test case runs only when `numpy>=1.10` is not installed.
>>> from chainer import testing
... class Test(unittest.TestCase):
... @testing.without_requires('numpy>=1.10')
... def test_without_numpy_1_10(self):
... pass
Args:
requirements: A list of string representing requirement condition to
run a given test case.
"""
ws = pkg_resources.WorkingSet()
try:
ws.require(*requirements)
skip = True
except pkg_resources.ResolutionError:
skip = False
msg = 'requires: {}'.format(','.join(requirements))
return unittest.skipIf(skip, msg)
@contextlib.contextmanager
def assert_warns(expected):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
yield
# Python 2 does not raise warnings multiple times from the same stack
# frame.
if sys.version_info >= (3, 0):
if not any(isinstance(m.message, expected) for m in w):
try:
exc_name = expected.__name__
except AttributeError:
exc_name = str(expected)
raise AssertionError('%s not triggerred' % exc_name)
def _import_object_from_name(fullname):
comps = fullname.split('.')
obj = sys.modules.get(comps[0])
if obj is None:
raise RuntimeError('Can\'t import {}'.format(comps[0]))
for i, comp in enumerate(comps[1:]):
obj = getattr(obj, comp)
if obj is None:
raise RuntimeError(
'Can\'t find object {}'.format('.'.join(comps[:i + 1])))
return obj
def patch(target, *args, **kwargs):
"""A wrapper of mock.patch which appends wraps argument.
.. note::
Unbound methods are not supported as ``wraps`` argument.
Args:
target(str): Full name of target object.
wraps: Wrapping object which will be passed to ``mock.patch`` as
``wraps`` argument.
If omitted, the object specified by ``target`` is used.
*args: Passed to ``mock.patch``.
**kwargs: Passed to ``mock.patch``.
"""
_check_mock_available()
try:
wraps = kwargs.pop('wraps')
except KeyError:
wraps = _import_object_from_name(target)
return mock.patch(target, *args, wraps=wraps, **kwargs)
| mit | 5,426,637,843,646,993,000 | 26.557252 | 76 | 0.60831 | false |
liavkoren/djangoDev | django/contrib/gis/geoip/tests.py | 48 | 4728 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os
import unittest
from unittest import skipUnless
from django.conf import settings
from django.contrib.gis.geos import HAS_GEOS
from django.contrib.gis.geoip import HAS_GEOIP
from django.utils import six
if HAS_GEOIP:
from . import GeoIP, GeoIPException
if HAS_GEOS:
from ..geos import GEOSGeometry
# Note: Requires use of both the GeoIP country and city datasets.
# The GEOIP_DATA path should be the only setting set (the directory
# should contain links or the actual database files 'GeoIP.dat' and
# 'GeoLiteCity.dat'.
@skipUnless(HAS_GEOIP and getattr(settings, "GEOIP_PATH", None),
"GeoIP is required along with the GEOIP_PATH setting.")
class GeoIPTest(unittest.TestCase):
def test01_init(self):
"Testing GeoIP initialization."
g1 = GeoIP() # Everything inferred from GeoIP path
path = settings.GEOIP_PATH
g2 = GeoIP(path, 0) # Passing in data path explicitly.
g3 = GeoIP.open(path, 0) # MaxMind Python API syntax.
for g in (g1, g2, g3):
self.assertEqual(True, bool(g._country))
self.assertEqual(True, bool(g._city))
# Only passing in the location of one database.
city = os.path.join(path, 'GeoLiteCity.dat')
cntry = os.path.join(path, 'GeoIP.dat')
g4 = GeoIP(city, country='')
self.assertEqual(None, g4._country)
g5 = GeoIP(cntry, city='')
self.assertEqual(None, g5._city)
# Improper parameters.
bad_params = (23, 'foo', 15.23)
for bad in bad_params:
self.assertRaises(GeoIPException, GeoIP, cache=bad)
if isinstance(bad, six.string_types):
e = GeoIPException
else:
e = TypeError
self.assertRaises(e, GeoIP, bad, 0)
def test02_bad_query(self):
"Testing GeoIP query parameter checking."
cntry_g = GeoIP(city='<foo>')
# No city database available, these calls should fail.
self.assertRaises(GeoIPException, cntry_g.city, 'google.com')
self.assertRaises(GeoIPException, cntry_g.coords, 'yahoo.com')
# Non-string query should raise TypeError
self.assertRaises(TypeError, cntry_g.country_code, 17)
self.assertRaises(TypeError, cntry_g.country_name, GeoIP)
def test03_country(self):
"Testing GeoIP country querying methods."
g = GeoIP(city='<foo>')
fqdn = 'www.google.com'
addr = '12.215.42.19'
for query in (fqdn, addr):
for func in (g.country_code, g.country_code_by_addr, g.country_code_by_name):
self.assertEqual('US', func(query))
for func in (g.country_name, g.country_name_by_addr, g.country_name_by_name):
self.assertEqual('United States', func(query))
self.assertEqual({'country_code': 'US', 'country_name': 'United States'},
g.country(query))
@skipUnless(HAS_GEOS, "Geos is required")
def test04_city(self):
"Testing GeoIP city querying methods."
g = GeoIP(country='<foo>')
addr = '128.249.1.1'
fqdn = 'tmc.edu'
for query in (fqdn, addr):
# Country queries should still work.
for func in (g.country_code, g.country_code_by_addr, g.country_code_by_name):
self.assertEqual('US', func(query))
for func in (g.country_name, g.country_name_by_addr, g.country_name_by_name):
self.assertEqual('United States', func(query))
self.assertEqual({'country_code': 'US', 'country_name': 'United States'},
g.country(query))
# City information dictionary.
d = g.city(query)
self.assertEqual('USA', d['country_code3'])
self.assertEqual('Houston', d['city'])
self.assertEqual('TX', d['region'])
self.assertEqual(713, d['area_code'])
geom = g.geos(query)
self.assertIsInstance(geom, GEOSGeometry)
lon, lat = (-95.4010, 29.7079)
lat_lon = g.lat_lon(query)
lat_lon = (lat_lon[1], lat_lon[0])
for tup in (geom.tuple, g.coords(query), g.lon_lat(query), lat_lon):
self.assertAlmostEqual(lon, tup[0], 4)
self.assertAlmostEqual(lat, tup[1], 4)
def test05_unicode_response(self):
"Testing that GeoIP strings are properly encoded, see #16553."
g = GeoIP()
d = g.city("www.osnabrueck.de")
self.assertEqual('Osnabrück', d['city'])
d = g.country('200.7.49.81')
self.assertEqual('Curaçao', d['country_name'])
| bsd-3-clause | -889,161,696,582,536,300 | 37.422764 | 89 | 0.600296 | false |
kumanna/Simple-OFDM-Modem | usrp/transmit.py | 1 | 1973 | #!/usr/bin/env python
"""
test
"""
INTERP = 128
TXGAIN = 30
CONSTANT = 0.10
from gnuradio import gr, gr_unittest
import usrp_options
from optparse import OptionParser
from gnuradio.eng_option import eng_option
from pick_bitrate import pick_tx_bitrate
def main():
gr.enable_realtime_scheduling()
tb = gr.top_block ()
src = gr.file_source(gr.sizeof_gr_complex, "transmit-data.dat", True)
parser = OptionParser(option_class=eng_option, conflict_handler="resolve")
(options, args) = parser.parse_args ()
d = {'verbose': True, 'discontinuous': False, 'samples_per_symbol': 2, 'usrpx': None, 'interp': INTERP, 'fusb_block_size': 0, 'megabytes': 1.0, 'rx_freq': 2.475e9, 'size': 1500, 'show_tx_gain_range': False, 'log': False, 'tx_subdev_spec': None, 'fusb_nblocks': 0, 'lo_offset': None, 'tx_gain': TXGAIN, 'which': 0, 'modulation': 'gmsk', 'excess_bw': 0.34999999999999998, 'bt': 0.34999999999999998, 'interface': 'eth0', 'freq': None, 'bitrate': 100000.0, 'from_file': None, 'tx_freq': 2475000000.0, 'mac_addr': '', 'tx_amplitude': 0.1, 'gray_code': True}
for i, j in d.items():
setattr(options, i, j)
u = usrp_options.create_usrp_sink(options)
dac_rate = u.dac_rate()
if options.verbose:
print 'USRP Sink:', u
(_bitrate, _samples_per_symbol, _interp) = \
pick_tx_bitrate(options.bitrate, 2, \
options.samples_per_symbol, options.interp, dac_rate, \
u.get_interp_rates())
u.set_interp(_interp)
u.set_auto_tr(True)
if not u.set_center_freq(options.tx_freq):
print "Failed to set Rx frequency to %s" % (eng_notation.num_to_str(options.tx_freq))
raise ValueError, eng_notation.num_to_str(options.tx_freq)
m = gr.multiply_const_cc(CONSTANT)
tb.connect(src, m, u)
tb.run()
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
print "Bye"
| gpl-3.0 | -1,772,233,772,600,407,300 | 36.226415 | 556 | 0.616827 | false |
Neamar/django | tests/deprecation/tests.py | 199 | 7253 | from __future__ import unicode_literals
import os
import unittest
import warnings
from django.test import SimpleTestCase
from django.test.utils import reset_warning_registry
from django.utils import six
from django.utils.deprecation import RenameMethodsBase
from django.utils.encoding import force_text
class RenameManagerMethods(RenameMethodsBase):
renamed_methods = (
('old', 'new', DeprecationWarning),
)
class RenameMethodsTests(SimpleTestCase):
"""
Tests the `RenameMethodsBase` type introduced to rename `get_query_set`
to `get_queryset` across the code base following #15363.
"""
def test_class_definition_warnings(self):
"""
Ensure a warning is raised upon class definition to suggest renaming
the faulty method.
"""
reset_warning_registry()
with warnings.catch_warnings(record=True) as recorded:
warnings.simplefilter('always')
class Manager(six.with_metaclass(RenameManagerMethods)):
def old(self):
pass
self.assertEqual(len(recorded), 1)
msg = str(recorded[0].message)
self.assertEqual(msg,
'`Manager.old` method should be renamed `new`.')
def test_get_new_defined(self):
"""
Ensure `old` complains and not `new` when only `new` is defined.
"""
with warnings.catch_warnings(record=True) as recorded:
warnings.simplefilter('ignore')
class Manager(six.with_metaclass(RenameManagerMethods)):
def new(self):
pass
warnings.simplefilter('always')
manager = Manager()
manager.new()
self.assertEqual(len(recorded), 0)
manager.old()
self.assertEqual(len(recorded), 1)
msg = str(recorded.pop().message)
self.assertEqual(msg,
'`Manager.old` is deprecated, use `new` instead.')
def test_get_old_defined(self):
"""
Ensure `old` complains when only `old` is defined.
"""
with warnings.catch_warnings(record=True) as recorded:
warnings.simplefilter('ignore')
class Manager(six.with_metaclass(RenameManagerMethods)):
def old(self):
pass
warnings.simplefilter('always')
manager = Manager()
manager.new()
self.assertEqual(len(recorded), 0)
manager.old()
self.assertEqual(len(recorded), 1)
msg = str(recorded.pop().message)
self.assertEqual(msg,
'`Manager.old` is deprecated, use `new` instead.')
def test_deprecated_subclass_renamed(self):
"""
Ensure the correct warnings are raised when a class that didn't rename
`old` subclass one that did.
"""
with warnings.catch_warnings(record=True) as recorded:
warnings.simplefilter('ignore')
class Renamed(six.with_metaclass(RenameManagerMethods)):
def new(self):
pass
class Deprecated(Renamed):
def old(self):
super(Deprecated, self).old()
warnings.simplefilter('always')
deprecated = Deprecated()
deprecated.new()
self.assertEqual(len(recorded), 1)
msg = str(recorded.pop().message)
self.assertEqual(msg,
'`Renamed.old` is deprecated, use `new` instead.')
recorded[:] = []
deprecated.old()
self.assertEqual(len(recorded), 2)
msgs = [str(warning.message) for warning in recorded]
self.assertEqual(msgs, [
'`Deprecated.old` is deprecated, use `new` instead.',
'`Renamed.old` is deprecated, use `new` instead.',
])
def test_renamed_subclass_deprecated(self):
"""
Ensure the correct warnings are raised when a class that renamed
`old` subclass one that didn't.
"""
with warnings.catch_warnings(record=True) as recorded:
warnings.simplefilter('ignore')
class Deprecated(six.with_metaclass(RenameManagerMethods)):
def old(self):
pass
class Renamed(Deprecated):
def new(self):
super(Renamed, self).new()
warnings.simplefilter('always')
renamed = Renamed()
renamed.new()
self.assertEqual(len(recorded), 0)
renamed.old()
self.assertEqual(len(recorded), 1)
msg = str(recorded.pop().message)
self.assertEqual(msg,
'`Renamed.old` is deprecated, use `new` instead.')
def test_deprecated_subclass_renamed_and_mixins(self):
"""
Ensure the correct warnings are raised when a subclass inherit from a
class that renamed `old` and mixins that may or may not have renamed
`new`.
"""
with warnings.catch_warnings(record=True) as recorded:
warnings.simplefilter('ignore')
class Renamed(six.with_metaclass(RenameManagerMethods)):
def new(self):
pass
class RenamedMixin(object):
def new(self):
super(RenamedMixin, self).new()
class DeprecatedMixin(object):
def old(self):
super(DeprecatedMixin, self).old()
class Deprecated(DeprecatedMixin, RenamedMixin, Renamed):
pass
warnings.simplefilter('always')
deprecated = Deprecated()
deprecated.new()
self.assertEqual(len(recorded), 1)
msg = str(recorded.pop().message)
self.assertEqual(msg,
'`RenamedMixin.old` is deprecated, use `new` instead.')
deprecated.old()
self.assertEqual(len(recorded), 2)
msgs = [str(warning.message) for warning in recorded]
self.assertEqual(msgs, [
'`DeprecatedMixin.old` is deprecated, use `new` instead.',
'`RenamedMixin.old` is deprecated, use `new` instead.',
])
class DeprecatingSimpleTestCaseUrls(unittest.TestCase):
def test_deprecation(self):
"""
Ensure the correct warning is raised when SimpleTestCase.urls is used.
"""
class TempTestCase(SimpleTestCase):
urls = 'tests.urls'
def test(self):
pass
with warnings.catch_warnings(record=True) as recorded:
warnings.filterwarnings('always')
suite = unittest.TestLoader().loadTestsFromTestCase(TempTestCase)
with open(os.devnull, 'w') as devnull:
unittest.TextTestRunner(stream=devnull, verbosity=2).run(suite)
msg = force_text(recorded.pop().message)
self.assertEqual(msg,
"SimpleTestCase.urls is deprecated and will be removed in "
"Django 1.10. Use @override_settings(ROOT_URLCONF=...) "
"in TempTestCase instead.")
| bsd-3-clause | -3,775,015,545,703,534,600 | 35.447236 | 79 | 0.569006 | false |
twz915/django | django/db/backends/sqlite3/base.py | 4 | 17967 | """
SQLite3 backend for the sqlite3 module in the standard library.
"""
import decimal
import re
import warnings
from sqlite3 import dbapi2 as Database
import pytz
from django.core.exceptions import ImproperlyConfigured
from django.db import utils
from django.db.backends import utils as backend_utils
from django.db.backends.base.base import BaseDatabaseWrapper
from django.utils import timezone
from django.utils.dateparse import (
parse_date, parse_datetime, parse_duration, parse_time,
)
from django.utils.encoding import force_text
from .client import DatabaseClient # isort:skip
from .creation import DatabaseCreation # isort:skip
from .features import DatabaseFeatures # isort:skip
from .introspection import DatabaseIntrospection # isort:skip
from .operations import DatabaseOperations # isort:skip
from .schema import DatabaseSchemaEditor # isort:skip
def decoder(conv_func):
""" The Python sqlite3 interface returns always byte strings.
This function converts the received value to a regular string before
passing it to the receiver function.
"""
return lambda s: conv_func(s.decode('utf-8'))
Database.register_converter("bool", decoder(lambda s: s == '1'))
Database.register_converter("time", decoder(parse_time))
Database.register_converter("date", decoder(parse_date))
Database.register_converter("datetime", decoder(parse_datetime))
Database.register_converter("timestamp", decoder(parse_datetime))
Database.register_converter("TIMESTAMP", decoder(parse_datetime))
Database.register_converter("decimal", decoder(backend_utils.typecast_decimal))
Database.register_adapter(decimal.Decimal, backend_utils.rev_typecast_decimal)
class DatabaseWrapper(BaseDatabaseWrapper):
vendor = 'sqlite'
# SQLite doesn't actually support most of these types, but it "does the right
# thing" given more verbose field definitions, so leave them as is so that
# schema inspection is more useful.
data_types = {
'AutoField': 'integer',
'BigAutoField': 'integer',
'BinaryField': 'BLOB',
'BooleanField': 'bool',
'CharField': 'varchar(%(max_length)s)',
'DateField': 'date',
'DateTimeField': 'datetime',
'DecimalField': 'decimal',
'DurationField': 'bigint',
'FileField': 'varchar(%(max_length)s)',
'FilePathField': 'varchar(%(max_length)s)',
'FloatField': 'real',
'IntegerField': 'integer',
'BigIntegerField': 'bigint',
'IPAddressField': 'char(15)',
'GenericIPAddressField': 'char(39)',
'NullBooleanField': 'bool',
'OneToOneField': 'integer',
'PositiveIntegerField': 'integer unsigned',
'PositiveSmallIntegerField': 'smallint unsigned',
'SlugField': 'varchar(%(max_length)s)',
'SmallIntegerField': 'smallint',
'TextField': 'text',
'TimeField': 'time',
'UUIDField': 'char(32)',
}
data_types_suffix = {
'AutoField': 'AUTOINCREMENT',
'BigAutoField': 'AUTOINCREMENT',
}
# SQLite requires LIKE statements to include an ESCAPE clause if the value
# being escaped has a percent or underscore in it.
# See http://www.sqlite.org/lang_expr.html for an explanation.
operators = {
'exact': '= %s',
'iexact': "LIKE %s ESCAPE '\\'",
'contains': "LIKE %s ESCAPE '\\'",
'icontains': "LIKE %s ESCAPE '\\'",
'regex': 'REGEXP %s',
'iregex': "REGEXP '(?i)' || %s",
'gt': '> %s',
'gte': '>= %s',
'lt': '< %s',
'lte': '<= %s',
'startswith': "LIKE %s ESCAPE '\\'",
'endswith': "LIKE %s ESCAPE '\\'",
'istartswith': "LIKE %s ESCAPE '\\'",
'iendswith': "LIKE %s ESCAPE '\\'",
}
# The patterns below are used to generate SQL pattern lookup clauses when
# the right-hand side of the lookup isn't a raw string (it might be an expression
# or the result of a bilateral transformation).
# In those cases, special characters for LIKE operators (e.g. \, *, _) should be
# escaped on database side.
#
# Note: we use str.format() here for readability as '%' is used as a wildcard for
# the LIKE operator.
pattern_esc = r"REPLACE(REPLACE(REPLACE({}, '\', '\\'), '%%', '\%%'), '_', '\_')"
pattern_ops = {
'contains': r"LIKE '%%' || {} || '%%' ESCAPE '\'",
'icontains': r"LIKE '%%' || UPPER({}) || '%%' ESCAPE '\'",
'startswith': r"LIKE {} || '%%' ESCAPE '\'",
'istartswith': r"LIKE UPPER({}) || '%%' ESCAPE '\'",
'endswith': r"LIKE '%%' || {} ESCAPE '\'",
'iendswith': r"LIKE '%%' || UPPER({}) ESCAPE '\'",
}
Database = Database
SchemaEditorClass = DatabaseSchemaEditor
# Classes instantiated in __init__().
client_class = DatabaseClient
creation_class = DatabaseCreation
features_class = DatabaseFeatures
introspection_class = DatabaseIntrospection
ops_class = DatabaseOperations
def get_connection_params(self):
settings_dict = self.settings_dict
if not settings_dict['NAME']:
raise ImproperlyConfigured(
"settings.DATABASES is improperly configured. "
"Please supply the NAME value.")
kwargs = {
'database': settings_dict['NAME'],
'detect_types': Database.PARSE_DECLTYPES | Database.PARSE_COLNAMES,
}
kwargs.update(settings_dict['OPTIONS'])
# Always allow the underlying SQLite connection to be shareable
# between multiple threads. The safe-guarding will be handled at a
# higher level by the `BaseDatabaseWrapper.allow_thread_sharing`
# property. This is necessary as the shareability is disabled by
# default in pysqlite and it cannot be changed once a connection is
# opened.
if 'check_same_thread' in kwargs and kwargs['check_same_thread']:
warnings.warn(
'The `check_same_thread` option was provided and set to '
'True. It will be overridden with False. Use the '
'`DatabaseWrapper.allow_thread_sharing` property instead '
'for controlling thread shareability.',
RuntimeWarning
)
kwargs.update({'check_same_thread': False})
if self.features.can_share_in_memory_db:
kwargs.update({'uri': True})
return kwargs
def get_new_connection(self, conn_params):
conn = Database.connect(**conn_params)
conn.create_function("django_date_extract", 2, _sqlite_date_extract)
conn.create_function("django_date_trunc", 2, _sqlite_date_trunc)
conn.create_function("django_datetime_cast_date", 2, _sqlite_datetime_cast_date)
conn.create_function("django_datetime_cast_time", 2, _sqlite_datetime_cast_time)
conn.create_function("django_datetime_extract", 3, _sqlite_datetime_extract)
conn.create_function("django_datetime_trunc", 3, _sqlite_datetime_trunc)
conn.create_function("django_time_extract", 2, _sqlite_time_extract)
conn.create_function("django_time_trunc", 2, _sqlite_time_trunc)
conn.create_function("django_time_diff", 2, _sqlite_time_diff)
conn.create_function("django_timestamp_diff", 2, _sqlite_timestamp_diff)
conn.create_function("regexp", 2, _sqlite_regexp)
conn.create_function("django_format_dtdelta", 3, _sqlite_format_dtdelta)
conn.create_function("django_power", 2, _sqlite_power)
return conn
def init_connection_state(self):
pass
def create_cursor(self, name=None):
return self.connection.cursor(factory=SQLiteCursorWrapper)
def close(self):
self.validate_thread_sharing()
# If database is in memory, closing the connection destroys the
# database. To prevent accidental data loss, ignore close requests on
# an in-memory db.
if not self.is_in_memory_db():
BaseDatabaseWrapper.close(self)
def _savepoint_allowed(self):
# Two conditions are required here:
# - A sufficiently recent version of SQLite to support savepoints,
# - Being in a transaction, which can only happen inside 'atomic'.
# When 'isolation_level' is not None, sqlite3 commits before each
# savepoint; it's a bug. When it is None, savepoints don't make sense
# because autocommit is enabled. The only exception is inside 'atomic'
# blocks. To work around that bug, on SQLite, 'atomic' starts a
# transaction explicitly rather than simply disable autocommit.
return self.features.uses_savepoints and self.in_atomic_block
def _set_autocommit(self, autocommit):
if autocommit:
level = None
else:
# sqlite3's internal default is ''. It's different from None.
# See Modules/_sqlite/connection.c.
level = ''
# 'isolation_level' is a misleading API.
# SQLite always runs at the SERIALIZABLE isolation level.
with self.wrap_database_errors:
self.connection.isolation_level = level
def check_constraints(self, table_names=None):
"""
Checks each table name in `table_names` for rows with invalid foreign
key references. This method is intended to be used in conjunction with
`disable_constraint_checking()` and `enable_constraint_checking()`, to
determine if rows with invalid references were entered while constraint
checks were off.
Raises an IntegrityError on the first invalid foreign key reference
encountered (if any) and provides detailed information about the
invalid reference in the error message.
Backends can override this method if they can more directly apply
constraint checking (e.g. via "SET CONSTRAINTS ALL IMMEDIATE")
"""
cursor = self.cursor()
if table_names is None:
table_names = self.introspection.table_names(cursor)
for table_name in table_names:
primary_key_column_name = self.introspection.get_primary_key_column(cursor, table_name)
if not primary_key_column_name:
continue
key_columns = self.introspection.get_key_columns(cursor, table_name)
for column_name, referenced_table_name, referenced_column_name in key_columns:
cursor.execute(
"""
SELECT REFERRING.`%s`, REFERRING.`%s` FROM `%s` as REFERRING
LEFT JOIN `%s` as REFERRED
ON (REFERRING.`%s` = REFERRED.`%s`)
WHERE REFERRING.`%s` IS NOT NULL AND REFERRED.`%s` IS NULL
"""
% (
primary_key_column_name, column_name, table_name,
referenced_table_name, column_name, referenced_column_name,
column_name, referenced_column_name,
)
)
for bad_row in cursor.fetchall():
raise utils.IntegrityError(
"The row in table '%s' with primary key '%s' has an "
"invalid foreign key: %s.%s contains a value '%s' that "
"does not have a corresponding value in %s.%s." % (
table_name, bad_row[0], table_name, column_name,
bad_row[1], referenced_table_name, referenced_column_name,
)
)
def is_usable(self):
return True
def _start_transaction_under_autocommit(self):
"""
Start a transaction explicitly in autocommit mode.
Staying in autocommit mode works around a bug of sqlite3 that breaks
savepoints when autocommit is disabled.
"""
self.cursor().execute("BEGIN")
def is_in_memory_db(self):
return self.creation.is_in_memory_db(self.settings_dict['NAME'])
FORMAT_QMARK_REGEX = re.compile(r'(?<!%)%s')
class SQLiteCursorWrapper(Database.Cursor):
"""
Django uses "format" style placeholders, but pysqlite2 uses "qmark" style.
This fixes it -- but note that if you want to use a literal "%s" in a query,
you'll need to use "%%s".
"""
def execute(self, query, params=None):
if params is None:
return Database.Cursor.execute(self, query)
query = self.convert_query(query)
return Database.Cursor.execute(self, query, params)
def executemany(self, query, param_list):
query = self.convert_query(query)
return Database.Cursor.executemany(self, query, param_list)
def convert_query(self, query):
return FORMAT_QMARK_REGEX.sub('?', query).replace('%%', '%')
def _sqlite_date_extract(lookup_type, dt):
if dt is None:
return None
try:
dt = backend_utils.typecast_timestamp(dt)
except (ValueError, TypeError):
return None
if lookup_type == 'week_day':
return (dt.isoweekday() % 7) + 1
elif lookup_type == 'week':
return dt.isocalendar()[1]
else:
return getattr(dt, lookup_type)
def _sqlite_date_trunc(lookup_type, dt):
try:
dt = backend_utils.typecast_timestamp(dt)
except (ValueError, TypeError):
return None
if lookup_type == 'year':
return "%i-01-01" % dt.year
elif lookup_type == 'month':
return "%i-%02i-01" % (dt.year, dt.month)
elif lookup_type == 'day':
return "%i-%02i-%02i" % (dt.year, dt.month, dt.day)
def _sqlite_time_trunc(lookup_type, dt):
try:
dt = backend_utils.typecast_time(dt)
except (ValueError, TypeError):
return None
if lookup_type == 'hour':
return "%02i:00:00" % dt.hour
elif lookup_type == 'minute':
return "%02i:%02i:00" % (dt.hour, dt.minute)
elif lookup_type == 'second':
return "%02i:%02i:%02i" % (dt.hour, dt.minute, dt.second)
def _sqlite_datetime_parse(dt, tzname):
if dt is None:
return None
try:
dt = backend_utils.typecast_timestamp(dt)
except (ValueError, TypeError):
return None
if tzname is not None:
dt = timezone.localtime(dt, pytz.timezone(tzname))
return dt
def _sqlite_datetime_cast_date(dt, tzname):
dt = _sqlite_datetime_parse(dt, tzname)
if dt is None:
return None
return dt.date().isoformat()
def _sqlite_datetime_cast_time(dt, tzname):
dt = _sqlite_datetime_parse(dt, tzname)
if dt is None:
return None
return dt.time().isoformat()
def _sqlite_datetime_extract(lookup_type, dt, tzname):
dt = _sqlite_datetime_parse(dt, tzname)
if dt is None:
return None
if lookup_type == 'week_day':
return (dt.isoweekday() % 7) + 1
elif lookup_type == 'week':
return dt.isocalendar()[1]
else:
return getattr(dt, lookup_type)
def _sqlite_datetime_trunc(lookup_type, dt, tzname):
dt = _sqlite_datetime_parse(dt, tzname)
if dt is None:
return None
if lookup_type == 'year':
return "%i-01-01 00:00:00" % dt.year
elif lookup_type == 'month':
return "%i-%02i-01 00:00:00" % (dt.year, dt.month)
elif lookup_type == 'day':
return "%i-%02i-%02i 00:00:00" % (dt.year, dt.month, dt.day)
elif lookup_type == 'hour':
return "%i-%02i-%02i %02i:00:00" % (dt.year, dt.month, dt.day, dt.hour)
elif lookup_type == 'minute':
return "%i-%02i-%02i %02i:%02i:00" % (dt.year, dt.month, dt.day, dt.hour, dt.minute)
elif lookup_type == 'second':
return "%i-%02i-%02i %02i:%02i:%02i" % (dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second)
def _sqlite_time_extract(lookup_type, dt):
if dt is None:
return None
try:
dt = backend_utils.typecast_time(dt)
except (ValueError, TypeError):
return None
return getattr(dt, lookup_type)
def _sqlite_format_dtdelta(conn, lhs, rhs):
"""
LHS and RHS can be either:
- An integer number of microseconds
- A string representing a timedelta object
- A string representing a datetime
"""
try:
if isinstance(lhs, int):
lhs = str(decimal.Decimal(lhs) / decimal.Decimal(1000000))
real_lhs = parse_duration(lhs)
if real_lhs is None:
real_lhs = backend_utils.typecast_timestamp(lhs)
if isinstance(rhs, int):
rhs = str(decimal.Decimal(rhs) / decimal.Decimal(1000000))
real_rhs = parse_duration(rhs)
if real_rhs is None:
real_rhs = backend_utils.typecast_timestamp(rhs)
if conn.strip() == '+':
out = real_lhs + real_rhs
else:
out = real_lhs - real_rhs
except (ValueError, TypeError):
return None
# typecast_timestamp returns a date or a datetime without timezone.
# It will be formatted as "%Y-%m-%d" or "%Y-%m-%d %H:%M:%S[.%f]"
return str(out)
def _sqlite_time_diff(lhs, rhs):
left = backend_utils.typecast_time(lhs)
right = backend_utils.typecast_time(rhs)
return (
(left.hour * 60 * 60 * 1000000) +
(left.minute * 60 * 1000000) +
(left.second * 1000000) +
(left.microsecond) -
(right.hour * 60 * 60 * 1000000) -
(right.minute * 60 * 1000000) -
(right.second * 1000000) -
(right.microsecond)
)
def _sqlite_timestamp_diff(lhs, rhs):
left = backend_utils.typecast_timestamp(lhs)
right = backend_utils.typecast_timestamp(rhs)
return (left - right).total_seconds() * 1000000
def _sqlite_regexp(re_pattern, re_string):
return bool(re.search(re_pattern, force_text(re_string))) if re_string is not None else False
def _sqlite_power(x, y):
return x ** y
| bsd-3-clause | -5,072,088,559,254,659,000 | 37.721983 | 105 | 0.610953 | false |
laborautonomo/bitmask_client | src/leap/bitmask/util/credentials.py | 6 | 2785 | # -*- coding: utf-8 -*-
# credentials.py
# Copyright (C) 2013 LEAP
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Credentials utilities
"""
from PySide import QtCore, QtGui
WEAK_PASSWORDS = ("123456", "qweasd", "qwerty", "password")
USERNAME_REGEX = r"^[a-z][a-z\d_\-\.]+[a-z\d]$"
USERNAME_VALIDATOR = QtGui.QRegExpValidator(QtCore.QRegExp(USERNAME_REGEX))
def username_checks(username):
# translation helper
_tr = QtCore.QObject().tr
message = None
if message is None and len(username) < 2:
message = _tr("Username must have at least 2 characters")
valid = USERNAME_VALIDATOR.validate(username, 0)
valid_username = valid[0] == QtGui.QValidator.State.Acceptable
if message is None and not valid_username:
message = _tr("That username is not allowed. Try another.")
return message is None, message
def password_checks(username, password, password2):
"""
Performs basic password checks to avoid really easy passwords.
:param username: username provided at the registrarion form
:type username: str
:param password: password from the registration form
:type password: str
:param password2: second password from the registration form
:type password: str
:returns: (True, None, None) if all the checks pass,
(False, message, field name) otherwise
:rtype: tuple(bool, str, str)
"""
# translation helper
_tr = QtCore.QObject().tr
message = None
field = None
if message is None and password != password2:
message = _tr("Passwords don't match")
field = 'new_password_confirmation'
if message is None and not password:
message = _tr("Password is empty")
field = 'new_password'
if message is None and len(password) < 8:
message = _tr("Password is too short")
field = 'new_password'
if message is None and password in WEAK_PASSWORDS:
message = _tr("Password is too easy")
field = 'new_password'
if message is None and username == password:
message = _tr("Password can't be the same as username")
field = 'new_password'
return message is None, message, field
| gpl-3.0 | 446,410,588,612,420,500 | 31.011494 | 75 | 0.680431 | false |
JamisHoo/Cloud-Image-Migration-Tool | usr/lib/requests/packages/chardet/langhungarianmodel.py | 2763 | 12536 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# 255: Control characters that usually does not exist in any text
# 254: Carriage/Return
# 253: symbol (punctuation) that does not belong to word
# 252: 0 - 9
# Character Mapping Table:
Latin2_HungarianCharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253, 28, 40, 54, 45, 32, 50, 49, 38, 39, 53, 36, 41, 34, 35, 47,
46, 71, 43, 33, 37, 57, 48, 64, 68, 55, 52,253,253,253,253,253,
253, 2, 18, 26, 17, 1, 27, 12, 20, 9, 22, 7, 6, 13, 4, 8,
23, 67, 10, 5, 3, 21, 19, 65, 62, 16, 11,253,253,253,253,253,
159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,
175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,
191,192,193,194,195,196,197, 75,198,199,200,201,202,203,204,205,
79,206,207,208,209,210,211,212,213,214,215,216,217,218,219,220,
221, 51, 81,222, 78,223,224,225,226, 44,227,228,229, 61,230,231,
232,233,234, 58,235, 66, 59,236,237,238, 60, 69, 63,239,240,241,
82, 14, 74,242, 70, 80,243, 72,244, 15, 83, 77, 84, 30, 76, 85,
245,246,247, 25, 73, 42, 24,248,249,250, 31, 56, 29,251,252,253,
)
win1250HungarianCharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253, 28, 40, 54, 45, 32, 50, 49, 38, 39, 53, 36, 41, 34, 35, 47,
46, 72, 43, 33, 37, 57, 48, 64, 68, 55, 52,253,253,253,253,253,
253, 2, 18, 26, 17, 1, 27, 12, 20, 9, 22, 7, 6, 13, 4, 8,
23, 67, 10, 5, 3, 21, 19, 65, 62, 16, 11,253,253,253,253,253,
161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,
177,178,179,180, 78,181, 69,182,183,184,185,186,187,188,189,190,
191,192,193,194,195,196,197, 76,198,199,200,201,202,203,204,205,
81,206,207,208,209,210,211,212,213,214,215,216,217,218,219,220,
221, 51, 83,222, 80,223,224,225,226, 44,227,228,229, 61,230,231,
232,233,234, 58,235, 66, 59,236,237,238, 60, 70, 63,239,240,241,
84, 14, 75,242, 71, 82,243, 73,244, 15, 85, 79, 86, 30, 77, 87,
245,246,247, 25, 74, 42, 24,248,249,250, 31, 56, 29,251,252,253,
)
# Model Table:
# total sequences: 100%
# first 512 sequences: 94.7368%
# first 1024 sequences:5.2623%
# rest sequences: 0.8894%
# negative sequences: 0.0009%
HungarianLangModel = (
0,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,1,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
3,3,3,3,3,3,3,3,3,3,2,3,3,3,3,3,3,3,3,2,2,3,3,1,1,2,2,2,2,2,1,2,
3,2,2,3,3,3,3,3,2,3,3,3,3,3,3,1,2,3,3,3,3,2,3,3,1,1,3,3,0,1,1,1,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,
3,2,1,3,3,3,3,3,2,3,3,3,3,3,1,1,2,3,3,3,3,3,3,3,1,1,3,2,0,1,1,1,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,1,1,2,3,3,3,1,3,3,3,3,3,1,3,3,2,2,0,3,2,3,
0,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,
3,3,3,3,3,3,2,3,3,3,2,3,3,2,3,3,3,3,3,2,3,3,2,2,3,2,3,2,0,3,2,2,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,
3,3,3,3,3,3,2,3,3,3,3,3,2,3,3,3,1,2,3,2,2,3,1,2,3,3,2,2,0,3,3,3,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,2,2,3,3,3,3,3,3,2,3,3,3,3,2,3,3,3,3,0,2,3,2,
0,0,0,1,1,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,3,1,1,1,3,3,2,1,3,2,2,3,2,1,3,2,2,1,0,3,3,1,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,2,2,3,3,3,3,3,1,2,3,3,3,3,1,2,1,3,3,3,3,2,2,3,1,1,3,2,0,1,1,1,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,2,2,3,3,3,3,3,2,1,3,3,3,3,3,2,2,1,3,3,3,0,1,1,2,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,3,3,2,3,3,2,3,3,3,2,0,3,2,3,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,1,0,
3,3,3,3,3,3,2,3,3,3,2,3,2,3,3,3,1,3,2,2,2,3,1,1,3,3,1,1,0,3,3,2,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,2,3,3,3,2,3,2,3,3,3,2,3,3,3,3,3,1,2,3,2,2,0,2,2,2,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,3,3,2,2,2,3,1,3,3,2,2,1,3,3,3,1,1,3,1,2,3,2,3,2,2,2,1,0,2,2,2,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,
3,1,1,3,3,3,3,3,1,2,3,3,3,3,1,2,1,3,3,3,2,2,3,2,1,0,3,2,0,1,1,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,1,1,3,3,3,3,3,1,2,3,3,3,3,1,1,0,3,3,3,3,0,2,3,0,0,2,1,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,2,2,3,3,2,2,2,2,3,3,0,1,2,3,2,3,2,2,3,2,1,2,0,2,2,2,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,
3,3,3,3,3,3,1,2,3,3,3,2,1,2,3,3,2,2,2,3,2,3,3,1,3,3,1,1,0,2,3,2,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,3,3,1,2,2,2,2,3,3,3,1,1,1,3,3,1,1,3,1,1,3,2,1,2,3,1,1,0,2,2,2,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,3,3,2,1,2,1,1,3,3,1,1,1,1,3,3,1,1,2,2,1,2,1,1,2,2,1,1,0,2,2,1,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,3,3,1,1,2,1,1,3,3,1,0,1,1,3,3,2,0,1,1,2,3,1,0,2,2,1,0,0,1,3,2,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,2,1,3,3,3,3,3,1,2,3,2,3,3,2,1,1,3,2,3,2,1,2,2,0,1,2,1,0,0,1,1,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
3,3,3,3,2,2,2,2,3,1,2,2,1,1,3,3,0,3,2,1,2,3,2,1,3,3,1,1,0,2,1,3,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,3,3,2,2,2,3,2,3,3,3,2,1,1,3,3,1,1,1,2,2,3,2,3,2,2,2,1,0,2,2,1,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
1,0,0,3,3,3,3,3,0,0,3,3,2,3,0,0,0,2,3,3,1,0,1,2,0,0,1,1,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,1,2,3,3,3,3,3,1,2,3,3,2,2,1,1,0,3,3,2,2,1,2,2,1,0,2,2,0,1,1,1,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,2,2,1,3,1,2,3,3,2,2,1,1,2,2,1,1,1,1,3,2,1,1,1,1,2,1,0,1,2,1,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,1,0,0,0,0,0,0,0,0,0,
2,3,3,1,1,1,1,1,3,3,3,0,1,1,3,3,1,1,1,1,1,2,2,0,3,1,1,2,0,2,1,1,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,1,0,1,2,1,2,2,0,1,2,3,1,2,0,0,0,2,1,1,1,1,1,2,0,0,1,1,0,0,0,0,
1,2,1,2,2,2,1,2,1,2,0,2,0,2,2,1,1,2,1,1,2,1,1,1,0,1,0,0,0,1,1,0,
1,1,1,2,3,2,3,3,0,1,2,2,3,1,0,1,0,2,1,2,2,0,1,1,0,0,1,1,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,0,3,3,2,2,1,0,0,3,2,3,2,0,0,0,1,1,3,0,0,1,1,0,0,2,1,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,1,1,2,2,3,3,1,0,1,3,2,3,1,1,1,0,1,1,1,1,1,3,1,0,0,2,2,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,1,1,1,2,2,2,1,0,1,2,3,3,2,0,0,0,2,1,1,1,2,1,1,1,0,1,1,1,0,0,0,
1,2,2,2,2,2,1,1,1,2,0,2,1,1,1,1,1,2,1,1,1,1,1,1,0,1,1,1,0,0,1,1,
3,2,2,1,0,0,1,1,2,2,0,3,0,1,2,1,1,0,0,1,1,1,0,1,1,1,1,0,2,1,1,1,
2,2,1,1,1,2,1,2,1,1,1,1,1,1,1,2,1,1,1,2,3,1,1,1,1,1,1,1,1,1,0,1,
2,3,3,0,1,0,0,0,3,3,1,0,0,1,2,2,1,0,0,0,0,2,0,0,1,1,1,0,2,1,1,1,
2,1,1,1,1,1,1,2,1,1,0,1,1,0,1,1,1,0,1,2,1,1,0,1,1,1,1,1,1,1,0,1,
2,3,3,0,1,0,0,0,2,2,0,0,0,0,1,2,2,0,0,0,0,1,0,0,1,1,0,0,2,0,1,0,
2,1,1,1,1,2,1,1,1,1,1,1,1,2,1,1,1,1,1,1,1,1,1,2,0,1,1,1,1,1,0,1,
3,2,2,0,1,0,1,0,2,3,2,0,0,1,2,2,1,0,0,1,1,1,0,0,2,1,0,1,2,2,1,1,
2,1,1,1,1,1,1,2,1,1,1,1,1,1,0,2,1,0,1,1,0,1,1,1,0,1,1,2,1,1,0,1,
2,2,2,0,0,1,0,0,2,2,1,1,0,0,2,1,1,0,0,0,1,2,0,0,2,1,0,0,2,1,1,1,
2,1,1,1,1,2,1,2,1,1,1,2,2,1,1,2,1,1,1,2,1,1,1,1,1,1,1,1,1,1,0,1,
1,2,3,0,0,0,1,0,3,2,1,0,0,1,2,1,1,0,0,0,0,2,1,0,1,1,0,0,2,1,2,1,
1,1,0,0,0,1,0,1,1,1,1,1,2,0,0,1,0,0,0,2,0,0,1,1,1,1,1,1,1,1,0,1,
3,0,0,2,1,2,2,1,0,0,2,1,2,2,0,0,0,2,1,1,1,0,1,1,0,0,1,1,2,0,0,0,
1,2,1,2,2,1,1,2,1,2,0,1,1,1,1,1,1,1,1,1,2,1,1,0,0,1,1,1,1,0,0,1,
1,3,2,0,0,0,1,0,2,2,2,0,0,0,2,2,1,0,0,0,0,3,1,1,1,1,0,0,2,1,1,1,
2,1,0,1,1,1,0,1,1,1,1,1,1,1,0,2,1,0,0,1,0,1,1,0,1,1,1,1,1,1,0,1,
2,3,2,0,0,0,1,0,2,2,0,0,0,0,2,1,1,0,0,0,0,2,1,0,1,1,0,0,2,1,1,0,
2,1,1,1,1,2,1,2,1,2,0,1,1,1,0,2,1,1,1,2,1,1,1,1,0,1,1,1,1,1,0,1,
3,1,1,2,2,2,3,2,1,1,2,2,1,1,0,1,0,2,2,1,1,1,1,1,0,0,1,1,0,1,1,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,2,2,0,0,0,0,0,2,2,0,0,0,0,2,2,1,0,0,0,1,1,0,0,1,2,0,0,2,1,1,1,
2,2,1,1,1,2,1,2,1,1,0,1,1,1,1,2,1,1,1,2,1,1,1,1,0,1,2,1,1,1,0,1,
1,0,0,1,2,3,2,1,0,0,2,0,1,1,0,0,0,1,1,1,1,0,1,1,0,0,1,0,0,0,0,0,
1,2,1,2,1,2,1,1,1,2,0,2,1,1,1,0,1,2,0,0,1,1,1,0,0,0,0,0,0,0,0,0,
2,3,2,0,0,0,0,0,1,1,2,1,0,0,1,1,1,0,0,0,0,2,0,0,1,1,0,0,2,1,1,1,
2,1,1,1,1,1,1,2,1,0,1,1,1,1,0,2,1,1,1,1,1,1,0,1,0,1,1,1,1,1,0,1,
1,2,2,0,1,1,1,0,2,2,2,0,0,0,3,2,1,0,0,0,1,1,0,0,1,1,0,1,1,1,0,0,
1,1,0,1,1,1,1,1,1,1,1,2,1,1,1,1,1,1,1,2,1,1,1,0,0,1,1,1,0,1,0,1,
2,1,0,2,1,1,2,2,1,1,2,1,1,1,0,0,0,1,1,0,1,1,1,1,0,0,1,1,1,0,0,0,
1,2,2,2,2,2,1,1,1,2,0,2,1,1,1,1,1,1,1,1,1,1,1,1,0,1,1,0,0,0,1,0,
1,2,3,0,0,0,1,0,2,2,0,0,0,0,2,2,0,0,0,0,0,1,0,0,1,0,0,0,2,0,1,0,
2,1,1,1,1,1,0,2,0,0,0,1,2,1,1,1,1,0,1,2,0,1,0,1,0,1,1,1,0,1,0,1,
2,2,2,0,0,0,1,0,2,1,2,0,0,0,1,1,2,0,0,0,0,1,0,0,1,1,0,0,2,1,0,1,
2,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,2,0,1,1,1,1,1,0,1,
1,2,2,0,0,0,1,0,2,2,2,0,0,0,1,1,0,0,0,0,0,1,1,0,2,0,0,1,1,1,0,1,
1,0,1,1,1,1,1,1,0,1,1,1,1,0,0,1,0,0,1,1,0,1,0,1,1,1,1,1,0,0,0,1,
1,0,0,1,0,1,2,1,0,0,1,1,1,2,0,0,0,1,1,0,1,0,1,1,0,0,1,0,0,0,0,0,
0,2,1,2,1,1,1,1,1,2,0,2,0,1,1,0,1,2,1,0,1,1,1,0,0,0,0,0,0,1,0,0,
2,1,1,0,1,2,0,0,1,1,1,0,0,0,1,1,0,0,0,0,0,1,0,0,1,0,0,0,2,1,0,1,
2,2,1,1,1,1,1,2,1,1,0,1,1,1,1,2,1,1,1,2,1,1,0,1,0,1,1,1,1,1,0,1,
1,2,2,0,0,0,0,0,1,1,0,0,0,0,2,1,0,0,0,0,0,2,0,0,2,2,0,0,2,0,0,1,
2,1,1,1,1,1,1,1,0,1,1,0,1,1,0,1,0,0,0,1,1,1,1,0,0,1,1,1,1,0,0,1,
1,1,2,0,0,3,1,0,2,1,1,1,0,0,1,1,1,0,0,0,1,1,0,0,0,1,0,0,1,0,1,0,
1,2,1,0,1,1,1,2,1,1,0,1,1,1,1,1,0,0,0,1,1,1,1,1,0,1,0,0,0,1,0,0,
2,1,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,1,0,1,0,0,0,1,0,0,0,0,2,0,0,0,
2,1,1,1,1,1,1,1,1,1,0,1,1,1,1,1,1,1,1,1,2,1,1,0,0,1,1,1,1,1,0,1,
2,1,1,1,2,1,1,1,0,1,1,2,1,0,0,0,0,1,1,1,1,0,1,0,0,0,0,1,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,1,0,1,1,1,1,1,0,0,1,1,2,1,0,0,0,1,1,0,0,0,1,1,0,0,1,0,1,0,0,0,
1,2,1,1,1,1,1,1,1,1,0,1,0,1,1,1,1,1,1,0,1,1,1,0,0,0,0,0,0,1,0,0,
2,0,0,0,1,1,1,1,0,0,1,1,0,0,0,0,0,1,1,1,2,0,0,1,0,0,1,0,1,0,0,0,
0,1,1,1,1,1,1,1,1,2,0,1,1,1,1,0,1,1,1,0,1,1,1,0,0,0,0,0,0,0,0,0,
1,0,0,1,1,1,1,1,0,0,2,1,0,1,0,0,0,1,0,1,0,0,0,0,0,0,1,0,0,0,0,0,
0,1,1,1,1,1,1,0,1,1,0,1,0,1,1,0,1,1,0,0,1,1,1,0,0,0,0,0,0,0,0,0,
1,0,0,1,1,1,0,0,0,0,1,0,2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,
0,1,1,1,1,1,0,0,1,1,0,1,0,1,0,0,1,1,1,0,1,1,1,0,0,0,0,0,0,0,0,0,
0,0,0,1,0,0,0,0,0,0,1,1,2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,1,1,1,0,1,0,0,1,1,0,1,0,1,1,0,1,1,1,0,1,1,1,0,0,0,0,0,0,0,0,0,
2,1,1,1,1,1,1,1,1,1,1,0,0,1,1,1,0,0,1,0,0,1,0,1,0,1,1,1,0,0,1,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,0,1,1,1,1,0,0,0,1,1,1,0,0,0,0,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,
0,1,1,1,1,1,1,0,1,1,0,1,0,1,0,0,1,1,0,0,1,1,0,0,0,0,0,0,0,0,0,0,
)
Latin2HungarianModel = {
'charToOrderMap': Latin2_HungarianCharToOrderMap,
'precedenceMatrix': HungarianLangModel,
'mTypicalPositiveRatio': 0.947368,
'keepEnglishLetter': True,
'charsetName': "ISO-8859-2"
}
Win1250HungarianModel = {
'charToOrderMap': win1250HungarianCharToOrderMap,
'precedenceMatrix': HungarianLangModel,
'mTypicalPositiveRatio': 0.947368,
'keepEnglishLetter': True,
'charsetName': "windows-1250"
}
# flake8: noqa
| mit | -7,997,055,318,188,062,000 | 54.715556 | 70 | 0.555361 | false |
pico12/trading-with-python | sandbox/spreadCalculations.py | 78 | 1496 | '''
Created on 28 okt 2011
@author: jev
'''
from tradingWithPython import estimateBeta, Spread, returns, Portfolio, readBiggerScreener
from tradingWithPython.lib import yahooFinance
from pandas import DataFrame, Series
import numpy as np
import matplotlib.pyplot as plt
import os
symbols = ['SPY','IWM']
y = yahooFinance.HistData('temp.csv')
y.startDate = (2007,1,1)
df = y.loadSymbols(symbols,forceDownload=False)
#df = y.downloadData(symbols)
res = readBiggerScreener('CointPairs.csv')
#---check with spread scanner
#sp = DataFrame(index=symbols)
#
#sp['last'] = df.ix[-1,:]
#sp['targetCapital'] = Series({'SPY':100,'IWM':-100})
#sp['targetShares'] = sp['targetCapital']/sp['last']
#print sp
#The dollar-neutral ratio is about 1 * IWM - 1.7 * IWM. You will get the spread = zero (or probably very near zero)
#s = Spread(symbols, histClose = df)
#print s
#s.value.plot()
#print 'beta (returns)', estimateBeta(df[symbols[0]],df[symbols[1]],algo='returns')
#print 'beta (log)', estimateBeta(df[symbols[0]],df[symbols[1]],algo='log')
#print 'beta (standard)', estimateBeta(df[symbols[0]],df[symbols[1]],algo='standard')
#p = Portfolio(df)
#p.setShares([1, -1.7])
#p.value.plot()
quote = yahooFinance.getQuote(symbols)
print quote
s = Spread(symbols,histClose=df, estimateBeta = False)
s.setLast(quote['last'])
s.setShares(Series({'SPY':1,'IWM':-1.7}))
print s
#s.value.plot()
#s.plot()
fig = figure(2)
s.plot()
| bsd-3-clause | 2,500,766,741,004,289,500 | 21.375 | 115 | 0.670455 | false |
chenjun0210/tensorflow | tensorflow/python/tools/strip_unused_lib.py | 37 | 4314 | # pylint: disable=g-bad-file-header
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities to remove unneeded nodes from a GraphDefs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
from google.protobuf import text_format
from tensorflow.core.framework import attr_value_pb2
from tensorflow.core.framework import graph_pb2
from tensorflow.core.framework import node_def_pb2
from tensorflow.python.framework import graph_util
from tensorflow.python.platform import gfile
def strip_unused(input_graph_def, input_node_names, output_node_names,
placeholder_type_enum):
"""Removes unused nodes from a GraphDef.
Args:
input_graph_def: A graph with nodes we want to prune.
input_node_names: A list of the nodes we use as inputs.
output_node_names: A list of the output nodes.
placeholder_type_enum: The AttrValue enum for the placeholder data type, or
a list that specifies one value per input node name.
Returns:
A GraphDef with all unnecessary ops removed.
"""
# Here we replace the nodes we're going to override as inputs with
# placeholders so that any unused nodes that are inputs to them are
# automatically stripped out by extract_sub_graph().
inputs_replaced_graph_def = graph_pb2.GraphDef()
for node in input_graph_def.node:
if node.name in input_node_names:
placeholder_node = node_def_pb2.NodeDef()
placeholder_node.op = "Placeholder"
placeholder_node.name = node.name
if isinstance(placeholder_type_enum, list):
input_node_index = input_node_names.index(node.name)
placeholder_node.attr["dtype"].CopyFrom(
attr_value_pb2.AttrValue(type=placeholder_type_enum[
input_node_index]))
else:
placeholder_node.attr["dtype"].CopyFrom(
attr_value_pb2.AttrValue(type=placeholder_type_enum))
if "_output_shapes" in node.attr:
placeholder_node.attr["_output_shapes"].CopyFrom(node.attr[
"_output_shapes"])
inputs_replaced_graph_def.node.extend([placeholder_node])
else:
inputs_replaced_graph_def.node.extend([copy.deepcopy(node)])
output_graph_def = graph_util.extract_sub_graph(inputs_replaced_graph_def,
output_node_names)
return output_graph_def
def strip_unused_from_files(input_graph, input_binary, output_graph,
output_binary, input_node_names, output_node_names,
placeholder_type_enum):
"""Removes unused nodes from a graph file."""
if not gfile.Exists(input_graph):
print("Input graph file '" + input_graph + "' does not exist!")
return -1
if not output_node_names:
print("You need to supply the name of a node to --output_node_names.")
return -1
input_graph_def = graph_pb2.GraphDef()
mode = "rb" if input_binary else "r"
with gfile.FastGFile(input_graph, mode) as f:
if input_binary:
input_graph_def.ParseFromString(f.read())
else:
text_format.Merge(f.read(), input_graph_def)
output_graph_def = strip_unused(input_graph_def,
input_node_names.split(","),
output_node_names.split(","),
placeholder_type_enum)
if output_binary:
with gfile.GFile(output_graph, "wb") as f:
f.write(output_graph_def.SerializeToString())
else:
with gfile.GFile(output_graph, "w") as f:
f.write(text_format.MessageToString(output_graph_def))
print("%d ops in the final graph." % len(output_graph_def.node))
| apache-2.0 | 4,960,497,711,000,664,000 | 39.317757 | 80 | 0.665276 | false |
monikasulik/django-oscar | sites/demo/apps/checkout/views.py | 35 | 5404 | from django.contrib import messages
from django import http
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
from datacash.facade import Facade
from oscar.apps.checkout import views, exceptions
from oscar.apps.payment.forms import BankcardForm
from oscar.apps.payment.models import SourceType
from oscar.apps.order.models import BillingAddress
from .forms import BillingAddressForm
# Customise the core PaymentDetailsView to integrate Datacash
class PaymentDetailsView(views.PaymentDetailsView):
def check_payment_data_is_captured(self, request):
if request.method != "POST":
raise exceptions.FailedPreCondition(
url=reverse('checkout:payment-details'),
message=_("Please enter your payment details"))
def get_context_data(self, **kwargs):
ctx = super(PaymentDetailsView, self).get_context_data(**kwargs)
# Ensure newly instantiated instances of the bankcard and billing
# address forms are passed to the template context (when they aren't
# already specified).
if 'bankcard_form' not in kwargs:
ctx['bankcard_form'] = BankcardForm()
if 'billing_address_form' not in kwargs:
ctx['billing_address_form'] = self.get_billing_address_form(
ctx['shipping_address']
)
elif kwargs['billing_address_form'].is_valid():
# On the preview view, we extract the billing address into the
# template context so we can show it to the customer.
ctx['billing_address'] = kwargs[
'billing_address_form'].save(commit=False)
return ctx
def get_billing_address_form(self, shipping_address):
"""
Return an instantiated billing address form
"""
addr = self.get_default_billing_address()
if not addr:
return BillingAddressForm(shipping_address=shipping_address)
billing_addr = BillingAddress()
addr.populate_alternative_model(billing_addr)
return BillingAddressForm(shipping_address=shipping_address,
instance=billing_addr)
def handle_payment_details_submission(self, request):
# Validate the submitted forms
bankcard_form = BankcardForm(request.POST)
shipping_address = self.get_shipping_address(
self.request.basket)
address_form = BillingAddressForm(shipping_address, request.POST)
if address_form.is_valid() and bankcard_form.is_valid():
# If both forms are valid, we render the preview view with the
# forms hidden within the page. This seems odd but means we don't
# have to store sensitive details on the server.
return self.render_preview(
request, bankcard_form=bankcard_form,
billing_address_form=address_form)
# Forms are invalid - show them to the customer along with the
# validation errors.
return self.render_payment_details(
request, bankcard_form=bankcard_form,
billing_address_form=address_form)
def handle_place_order_submission(self, request):
bankcard_form = BankcardForm(request.POST)
shipping_address = self.get_shipping_address(
self.request.basket)
address_form = BillingAddressForm(shipping_address, request.POST)
if address_form.is_valid() and bankcard_form.is_valid():
# Forms still valid, let's submit an order
submission = self.build_submission(
order_kwargs={
'billing_address': address_form.save(commit=False),
},
payment_kwargs={
'bankcard_form': bankcard_form,
'billing_address_form': address_form
}
)
return self.submit(**submission)
# Must be DOM tampering as these forms were valid and were rendered in
# a hidden element. Hence, we don't need to be that friendly with our
# error message.
messages.error(request, _("Invalid submission"))
return http.HttpResponseRedirect(
reverse('checkout:payment-details'))
def handle_payment(self, order_number, total, **kwargs):
# Make request to DataCash - if there any problems (eg bankcard
# not valid / request refused by bank) then an exception would be
# raised and handled by the parent PaymentDetail view)
facade = Facade()
bankcard = kwargs['bankcard_form'].bankcard
datacash_ref = facade.pre_authorise(
order_number, total.incl_tax, bankcard)
# Request was successful - record the "payment source". As this
# request was a 'pre-auth', we set the 'amount_allocated' - if we had
# performed an 'auth' request, then we would set 'amount_debited'.
source_type, _ = SourceType.objects.get_or_create(name='Datacash')
source = source_type.sources.model(
source_type=source_type,
currency=total.currency,
amount_allocated=total.incl_tax,
reference=datacash_ref)
self.add_payment_source(source)
# Also record payment event
self.add_payment_event(
'pre-auth', total.incl_tax, reference=datacash_ref)
| bsd-3-clause | 6,843,957,011,346,356,000 | 43.295082 | 78 | 0.640822 | false |
maxamillion/ansible | lib/ansible/executor/discovery/python_target.py | 84 | 1234 | # Copyright: (c) 2018 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# FUTURE: this could be swapped out for our bundled version of distro to move more complete platform
# logic to the targets, so long as we maintain Py2.6 compat and don't need to do any kind of script assembly
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
import platform
import io
import os
def read_utf8_file(path, encoding='utf-8'):
if not os.access(path, os.R_OK):
return None
with io.open(path, 'r', encoding=encoding) as fd:
content = fd.read()
return content
def get_platform_info():
result = dict(platform_dist_result=[])
if hasattr(platform, 'dist'):
result['platform_dist_result'] = platform.dist()
osrelease_content = read_utf8_file('/etc/os-release')
# try to fall back to /usr/lib/os-release
if not osrelease_content:
osrelease_content = read_utf8_file('/usr/lib/os-release')
result['osrelease_content'] = osrelease_content
return result
def main():
info = get_platform_info()
print(json.dumps(info))
if __name__ == '__main__':
main()
| gpl-3.0 | 6,264,226,885,292,910,000 | 24.708333 | 108 | 0.675041 | false |
wyc/django | tests/forms_tests/widget_tests/test_selectdatewidget.py | 144 | 20641 | from datetime import date
from django.forms import DateField, Form, SelectDateWidget
from django.test import override_settings
from django.utils import translation
from django.utils.dates import MONTHS_AP
from .base import WidgetTest
class SelectDateWidgetTest(WidgetTest):
maxDiff = None
widget = SelectDateWidget(
years=('2007', '2008', '2009', '2010', '2011', '2012', '2013', '2014', '2015', '2016'),
)
def test_render_empty(self):
self.check_html(self.widget, 'mydate', '', html=(
"""
<select name="mydate_month" id="id_mydate_month">
<option value="0">---</option>
<option value="1">January</option>
<option value="2">February</option>
<option value="3">March</option>
<option value="4">April</option>
<option value="5">May</option>
<option value="6">June</option>
<option value="7">July</option>
<option value="8">August</option>
<option value="9">September</option>
<option value="10">October</option>
<option value="11">November</option>
<option value="12">December</option>
</select>
<select name="mydate_day" id="id_mydate_day">
<option value="0">---</option>
<option value="1">1</option>
<option value="2">2</option>
<option value="3">3</option>
<option value="4">4</option>
<option value="5">5</option>
<option value="6">6</option>
<option value="7">7</option>
<option value="8">8</option>
<option value="9">9</option>
<option value="10">10</option>
<option value="11">11</option>
<option value="12">12</option>
<option value="13">13</option>
<option value="14">14</option>
<option value="15">15</option>
<option value="16">16</option>
<option value="17">17</option>
<option value="18">18</option>
<option value="19">19</option>
<option value="20">20</option>
<option value="21">21</option>
<option value="22">22</option>
<option value="23">23</option>
<option value="24">24</option>
<option value="25">25</option>
<option value="26">26</option>
<option value="27">27</option>
<option value="28">28</option>
<option value="29">29</option>
<option value="30">30</option>
<option value="31">31</option>
</select>
<select name="mydate_year" id="id_mydate_year">
<option value="0">---</option>
<option value="2007">2007</option>
<option value="2008">2008</option>
<option value="2009">2009</option>
<option value="2010">2010</option>
<option value="2011">2011</option>
<option value="2012">2012</option>
<option value="2013">2013</option>
<option value="2014">2014</option>
<option value="2015">2015</option>
<option value="2016">2016</option>
</select>
"""
))
def test_render_none(self):
"""
Rendering the None or '' values should yield the same output.
"""
self.assertHTMLEqual(
self.widget.render('mydate', None),
self.widget.render('mydate', ''),
)
def test_render_string(self):
self.check_html(self.widget, 'mydate', '2010-04-15', html=(
"""
<select name="mydate_month" id="id_mydate_month">
<option value="0">---</option>
<option value="1">January</option>
<option value="2">February</option>
<option value="3">March</option>
<option value="4" selected="selected">April</option>
<option value="5">May</option>
<option value="6">June</option>
<option value="7">July</option>
<option value="8">August</option>
<option value="9">September</option>
<option value="10">October</option>
<option value="11">November</option>
<option value="12">December</option>
</select>
<select name="mydate_day" id="id_mydate_day">
<option value="0">---</option>
<option value="1">1</option>
<option value="2">2</option>
<option value="3">3</option>
<option value="4">4</option>
<option value="5">5</option>
<option value="6">6</option>
<option value="7">7</option>
<option value="8">8</option>
<option value="9">9</option>
<option value="10">10</option>
<option value="11">11</option>
<option value="12">12</option>
<option value="13">13</option>
<option value="14">14</option>
<option value="15" selected="selected">15</option>
<option value="16">16</option>
<option value="17">17</option>
<option value="18">18</option>
<option value="19">19</option>
<option value="20">20</option>
<option value="21">21</option>
<option value="22">22</option>
<option value="23">23</option>
<option value="24">24</option>
<option value="25">25</option>
<option value="26">26</option>
<option value="27">27</option>
<option value="28">28</option>
<option value="29">29</option>
<option value="30">30</option>
<option value="31">31</option>
</select>
<select name="mydate_year" id="id_mydate_year">
<option value="0">---</option>
<option value="2007">2007</option>
<option value="2008">2008</option>
<option value="2009">2009</option>
<option value="2010" selected="selected">2010</option>
<option value="2011">2011</option>
<option value="2012">2012</option>
<option value="2013">2013</option>
<option value="2014">2014</option>
<option value="2015">2015</option>
<option value="2016">2016</option>
</select>
"""
))
def test_render_datetime(self):
self.assertHTMLEqual(
self.widget.render('mydate', date(2010, 4, 15)),
self.widget.render('mydate', '2010-04-15'),
)
def test_render_invalid_date(self):
"""
Invalid dates should still render the failed date.
"""
self.check_html(self.widget, 'mydate', '2010-02-31', html=(
"""
<select name="mydate_month" id="id_mydate_month">
<option value="0">---</option>
<option value="1">January</option>
<option value="2" selected="selected">February</option>
<option value="3">March</option>
<option value="4">April</option>
<option value="5">May</option>
<option value="6">June</option>
<option value="7">July</option>
<option value="8">August</option>
<option value="9">September</option>
<option value="10">October</option>
<option value="11">November</option>
<option value="12">December</option>
</select>
<select name="mydate_day" id="id_mydate_day">
<option value="0">---</option>
<option value="1">1</option>
<option value="2">2</option>
<option value="3">3</option>
<option value="4">4</option>
<option value="5">5</option>
<option value="6">6</option>
<option value="7">7</option>
<option value="8">8</option>
<option value="9">9</option>
<option value="10">10</option>
<option value="11">11</option>
<option value="12">12</option>
<option value="13">13</option>
<option value="14">14</option>
<option value="15">15</option>
<option value="16">16</option>
<option value="17">17</option>
<option value="18">18</option>
<option value="19">19</option>
<option value="20">20</option>
<option value="21">21</option>
<option value="22">22</option>
<option value="23">23</option>
<option value="24">24</option>
<option value="25">25</option>
<option value="26">26</option>
<option value="27">27</option>
<option value="28">28</option>
<option value="29">29</option>
<option value="30">30</option>
<option value="31" selected="selected">31</option>
</select>
<select name="mydate_year" id="id_mydate_year">
<option value="0">---</option>
<option value="2007">2007</option>
<option value="2008">2008</option>
<option value="2009">2009</option>
<option value="2010" selected="selected">2010</option>
<option value="2011">2011</option>
<option value="2012">2012</option>
<option value="2013">2013</option>
<option value="2014">2014</option>
<option value="2015">2015</option>
<option value="2016">2016</option>
</select>
"""
))
def test_custom_months(self):
widget = SelectDateWidget(months=MONTHS_AP, years=('2013',))
self.check_html(widget, 'mydate', '', html=(
"""
<select name="mydate_month" id="id_mydate_month">
<option value="0">---</option>
<option value="1">Jan.</option>
<option value="2">Feb.</option>
<option value="3">March</option>
<option value="4">April</option>
<option value="5">May</option>
<option value="6">June</option>
<option value="7">July</option>
<option value="8">Aug.</option>
<option value="9">Sept.</option>
<option value="10">Oct.</option>
<option value="11">Nov.</option>
<option value="12">Dec.</option>
</select>
<select name="mydate_day" id="id_mydate_day">
<option value="0">---</option>
<option value="1">1</option>
<option value="2">2</option>
<option value="3">3</option>
<option value="4">4</option>
<option value="5">5</option>
<option value="6">6</option>
<option value="7">7</option>
<option value="8">8</option>
<option value="9">9</option>
<option value="10">10</option>
<option value="11">11</option>
<option value="12">12</option>
<option value="13">13</option>
<option value="14">14</option>
<option value="15">15</option>
<option value="16">16</option>
<option value="17">17</option>
<option value="18">18</option>
<option value="19">19</option>
<option value="20">20</option>
<option value="21">21</option>
<option value="22">22</option>
<option value="23">23</option>
<option value="24">24</option>
<option value="25">25</option>
<option value="26">26</option>
<option value="27">27</option>
<option value="28">28</option>
<option value="29">29</option>
<option value="30">30</option>
<option value="31">31</option>
</select>
<select name="mydate_year" id="id_mydate_year">
<option value="0">---</option>
<option value="2013">2013</option>
</select>
"""
))
def test_selectdate_required(self):
class GetNotRequiredDate(Form):
mydate = DateField(widget=SelectDateWidget, required=False)
class GetRequiredDate(Form):
mydate = DateField(widget=SelectDateWidget, required=True)
self.assertFalse(GetNotRequiredDate().fields['mydate'].widget.is_required)
self.assertTrue(GetRequiredDate().fields['mydate'].widget.is_required)
def test_selectdate_empty_label(self):
w = SelectDateWidget(years=('2014',), empty_label='empty_label')
# Rendering the default state with empty_label setted as string.
self.assertInHTML('<option value="0">empty_label</option>', w.render('mydate', ''), count=3)
w = SelectDateWidget(years=('2014',), empty_label=('empty_year', 'empty_month', 'empty_day'))
# Rendering the default state with empty_label tuple.
self.assertHTMLEqual(
w.render('mydate', ''),
"""
<select name="mydate_month" id="id_mydate_month">
<option value="0">empty_month</option>
<option value="1">January</option>
<option value="2">February</option>
<option value="3">March</option>
<option value="4">April</option>
<option value="5">May</option>
<option value="6">June</option>
<option value="7">July</option>
<option value="8">August</option>
<option value="9">September</option>
<option value="10">October</option>
<option value="11">November</option>
<option value="12">December</option>
</select>
<select name="mydate_day" id="id_mydate_day">
<option value="0">empty_day</option>
<option value="1">1</option>
<option value="2">2</option>
<option value="3">3</option>
<option value="4">4</option>
<option value="5">5</option>
<option value="6">6</option>
<option value="7">7</option>
<option value="8">8</option>
<option value="9">9</option>
<option value="10">10</option>
<option value="11">11</option>
<option value="12">12</option>
<option value="13">13</option>
<option value="14">14</option>
<option value="15">15</option>
<option value="16">16</option>
<option value="17">17</option>
<option value="18">18</option>
<option value="19">19</option>
<option value="20">20</option>
<option value="21">21</option>
<option value="22">22</option>
<option value="23">23</option>
<option value="24">24</option>
<option value="25">25</option>
<option value="26">26</option>
<option value="27">27</option>
<option value="28">28</option>
<option value="29">29</option>
<option value="30">30</option>
<option value="31">31</option>
</select>
<select name="mydate_year" id="id_mydate_year">
<option value="0">empty_year</option>
<option value="2014">2014</option>
</select>
""",
)
self.assertRaisesMessage(ValueError, 'empty_label list/tuple must have 3 elements.',
SelectDateWidget, years=('2014',), empty_label=('not enough', 'values'))
@override_settings(USE_L10N=True)
@translation.override('nl')
def test_l10n(self):
w = SelectDateWidget(
years=('2007', '2008', '2009', '2010', '2011', '2012', '2013', '2014', '2015', '2016')
)
self.assertEqual(
w.value_from_datadict({'date_year': '2010', 'date_month': '8', 'date_day': '13'}, {}, 'date'),
'13-08-2010',
)
self.assertHTMLEqual(
w.render('date', '13-08-2010'),
"""
<select name="date_day" id="id_date_day">
<option value="0">---</option>
<option value="1">1</option>
<option value="2">2</option>
<option value="3">3</option>
<option value="4">4</option>
<option value="5">5</option>
<option value="6">6</option>
<option value="7">7</option>
<option value="8">8</option>
<option value="9">9</option>
<option value="10">10</option>
<option value="11">11</option>
<option value="12">12</option>
<option value="13" selected="selected">13</option>
<option value="14">14</option>
<option value="15">15</option>
<option value="16">16</option>
<option value="17">17</option>
<option value="18">18</option>
<option value="19">19</option>
<option value="20">20</option>
<option value="21">21</option>
<option value="22">22</option>
<option value="23">23</option>
<option value="24">24</option>
<option value="25">25</option>
<option value="26">26</option>
<option value="27">27</option>
<option value="28">28</option>
<option value="29">29</option>
<option value="30">30</option>
<option value="31">31</option>
</select>
<select name="date_month" id="id_date_month">
<option value="0">---</option>
<option value="1">januari</option>
<option value="2">februari</option>
<option value="3">maart</option>
<option value="4">april</option>
<option value="5">mei</option>
<option value="6">juni</option>
<option value="7">juli</option>
<option value="8" selected="selected">augustus</option>
<option value="9">september</option>
<option value="10">oktober</option>
<option value="11">november</option>
<option value="12">december</option>
</select>
<select name="date_year" id="id_date_year">
<option value="0">---</option>
<option value="2007">2007</option>
<option value="2008">2008</option>
<option value="2009">2009</option>
<option value="2010" selected="selected">2010</option>
<option value="2011">2011</option>
<option value="2012">2012</option>
<option value="2013">2013</option>
<option value="2014">2014</option>
<option value="2015">2015</option>
<option value="2016">2016</option>
</select>
""",
)
# Even with an invalid date, the widget should reflect the entered value (#17401).
self.assertEqual(w.render('mydate', '2010-02-30').count('selected="selected"'), 3)
# Years before 1900 should work.
w = SelectDateWidget(years=('1899',))
self.assertEqual(
w.value_from_datadict({'date_year': '1899', 'date_month': '8', 'date_day': '13'}, {}, 'date'),
'13-08-1899',
)
| bsd-3-clause | 1,726,808,776,881,684,700 | 42.091858 | 106 | 0.479337 | false |
mhnatiuk/phd_sociology_of_religion | scrapper/lib/python2.7/site-packages/scrapy/spider.py | 15 | 2307 | """
Base class for Scrapy spiders
See documentation in docs/topics/spiders.rst
"""
from scrapy import log
from scrapy.http import Request
from scrapy.utils.trackref import object_ref
from scrapy.utils.url import url_is_from_spider
from scrapy.utils.deprecate import create_deprecated_class
class Spider(object_ref):
"""Base class for scrapy spiders. All spiders must inherit from this
class.
"""
name = None
def __init__(self, name=None, **kwargs):
if name is not None:
self.name = name
elif not getattr(self, 'name', None):
raise ValueError("%s must have a name" % type(self).__name__)
self.__dict__.update(kwargs)
if not hasattr(self, 'start_urls'):
self.start_urls = []
def log(self, message, level=log.DEBUG, **kw):
"""Log the given messages at the given log level. Always use this
method to send log messages from your spider
"""
log.msg(message, spider=self, level=level, **kw)
def set_crawler(self, crawler):
assert not hasattr(self, '_crawler'), "Spider already bounded to %s" % crawler
self._crawler = crawler
@property
def crawler(self):
assert hasattr(self, '_crawler'), "Spider not bounded to any crawler"
return self._crawler
@property
def settings(self):
return self.crawler.settings
def start_requests(self):
for url in self.start_urls:
yield self.make_requests_from_url(url)
def make_requests_from_url(self, url):
return Request(url, dont_filter=True)
def parse(self, response):
raise NotImplementedError
@classmethod
def handles_request(cls, request):
return url_is_from_spider(request.url, cls)
def __str__(self):
return "<%s %r at 0x%0x>" % (type(self).__name__, self.name, id(self))
__repr__ = __str__
BaseSpider = create_deprecated_class('BaseSpider', Spider)
class ObsoleteClass(object):
def __init__(self, message):
self.message = message
def __getattr__(self, name):
raise AttributeError(self.message)
spiders = ObsoleteClass("""
"from scrapy.spider import spiders" no longer works - use "from scrapy.project import crawler" and then access crawler.spiders attribute"
""")
| gpl-2.0 | 6,059,015,674,953,269,000 | 27.481481 | 137 | 0.642826 | false |
DavidIngraham/ardupilot | libraries/AP_HAL_ChibiOS/hwdef/scripts/STM32F103xB.py | 10 | 2607 | #!/usr/bin/env python
'''
these tables are generated from the STM32 datasheets for the
STM32F103x8
'''
# additional build information for ChibiOS
build = {
"CHIBIOS_STARTUP_MK" : "os/common/startup/ARMCMx/compilers/GCC/mk/startup_stm32f1xx.mk",
"CHIBIOS_PLATFORM_MK" : "os/hal/ports/STM32/STM32F1xx/platform.mk",
"CHPRINTF_USE_FLOAT" : 'no',
"USE_FPU" : 'no'
}
pincount = {
'A': 16,
'B': 16,
'C': 16,
'D': 16,
'E': 16
}
# MCU parameters
mcu = {
# location of MCU serial number
'UDID_START' : 0x1FFFF7E8,
'RAM_MAP' : [
(0x20000000, 20, 1), # main memory, DMA safe
],
'EXPECTED_CLOCK' : 72000000
}
ADC1_map = {
# format is PIN : ADC1_CHAN
"PA0" : 0,
"PA1" : 1,
"PA2" : 2,
"PA3" : 3,
"PA4" : 4,
"PA5" : 5,
"PA6" : 6,
"PA7" : 7,
"PB0" : 8,
"PB1" : 9,
"PC0" : 10,
"PC1" : 11,
"PC2" : 12,
"PC3" : 13,
"PC4" : 14,
"PC5" : 15,
}
DMA_Map = {
# format is (DMA_TABLE, StreamNum, Channel)
"ADC1" : [(1,1,0)],
"TIM1_CH1" : [(1,2,0)],
"TIM1_CH3" : [(1,6,0)],
"TIM1_CH4" : [(1,4,0)],
"TIM1_UP" : [(1,5,0)],
"TIM2_CH1" : [(1,5,0)],
"TIM2_CH2" : [(1,7,0)],
"TIM2_CH3" : [(1,1,0)],
"TIM2_CH4" : [(1,7,0)],
"TIM2_UP" : [(1,2,0)],
"TIM3_CH1" : [(1,6,0)],
"TIM3_CH3" : [(1,2,0)],
"TIM3_CH4" : [(1,3,0)],
"TIM3_UP" : [(1,3,0)],
"TIM4_CH1" : [(1,1,0)],
"TIM4_CH2" : [(1,4,0)],
"TIM4_CH3" : [(1,5,0)],
"TIM4_UP" : [(1,7,0)],
"TIM5_CH1" : [(2,5,0)],
"TIM5_CH2" : [(2,4,0)],
"TIM5_CH3" : [(2,2,0)],
"TIM5_CH4" : [(2,1,0)],
"TIM5_UP" : [(2,2,0)],
"TIM8_CH1" : [(2,3,0)],
"TIM8_CH2" : [(2,5,0)],
"TIM8_CH3" : [(2,1,0)],
"TIM8_CH4" : [(2,2,0)],
"TIM8_UP" : [(2,1,0)],
"TIM6_UP" : [(2,3,0)],
"TIM7_UP" : [(2,4,0)],
"I2C1_RX" : [(1,7,0)],
"I2C1_TX" : [(1,6,0)],
"I2C2_RX" : [(1,5,0)],
"I2C2_TX" : [(1,4,0)],
"SPI1_RX" : [(1,2,0)],
"SPI1_TX" : [(1,3,0)],
"SPI2_RX" : [(1,4,0)],
"SPI2_TX" : [(1,5,0)],
"SPI3_RX" : [(2,1,0)],
"SPI3_TX" : [(2,2,0)],
"UART4_RX" : [(2,3,0)],
"UART4_TX" : [(2,5,0)],
"USART1_RX" : [(1,5,0)],
"USART1_TX" : [(1,4,0)],
"USART2_RX" : [(1,6,0)],
"USART2_TX" : [(1,7,0)],
"USART3_RX" : [(1,3,0)],
"USART3_TX" : [(1,2,0)],
}
| gpl-3.0 | -1,778,594,524,064,420,900 | 23.364486 | 93 | 0.384733 | false |
pshen/ansible | lib/ansible/module_utils/pure.py | 71 | 3161 | # -*- coding: utf-8 -*-
# This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# Copyright (c), Simon Dodsley <simon@purestorage.com>,2017
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
HAS_PURESTORAGE = True
try:
from purestorage import purestorage
except ImportError:
HAS_PURESTORAGE = False
from functools import wraps
from os import environ
from os import path
import platform
VERSION = 1.0
USER_AGENT_BASE = 'Ansible'
def get_system(module):
"""Return System Object or Fail"""
user_agent = '%(base)s %(class)s/%(version)s (%(platform)s)' % {
'base': USER_AGENT_BASE,
'class': __name__,
'version': VERSION,
'platform': platform.platform()
}
array_name = module.params['fa_url']
api = module.params['api_token']
if array_name and api:
system = purestorage.FlashArray(array_name, api_token=api, user_agent=user_agent)
elif environ.get('PUREFA_URL') and environ.get('PUREFA_API'):
system = purestorage.FlashArray(environ.get('PUREFA_URL'), api_token=(environ.get('PUREFA_API')), user_agent=user_agent)
else:
module.fail_json(msg="You must set PUREFA_URL and PUREFA_API environment variables or the fa_url and api_token module arguments")
try:
system.get()
except Exception:
module.fail_json(msg="Pure Storage FlashArray authentication failed. Check your credentials")
return system
def purefa_argument_spec():
"""Return standard base dictionary used for the argument_spec argument in AnsibleModule"""
return dict(
fa_url=dict(),
api_token=dict(no_log=True),
)
| gpl-3.0 | -7,795,782,129,839,667,000 | 40.051948 | 137 | 0.726036 | false |
CompassionCH/bank-payment | account_payment_partner/models/account_invoice.py | 1 | 5160 | # -*- coding: utf-8 -*-
# Copyright 2014-16 Akretion - Alexis de Lattre <alexis.delattre@akretion.com>
# Copyright 2014 Serv. Tecnol. Avanzados - Pedro M. Baeza
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from odoo import models, fields, api
class AccountInvoice(models.Model):
_inherit = 'account.invoice'
payment_mode_id = fields.Many2one(
comodel_name='account.payment.mode', string="Payment Mode",
ondelete='restrict',
readonly=True, states={'draft': [('readonly', False)]})
bank_account_required = fields.Boolean(
related='payment_mode_id.payment_method_id.bank_account_required',
readonly=True)
partner_bank_id = fields.Many2one(ondelete='restrict')
@api.onchange('partner_id', 'company_id')
def _onchange_partner_id(self):
res = super(AccountInvoice, self)._onchange_partner_id()
if self.partner_id:
if self.type == 'in_invoice':
pay_mode = self.partner_id.supplier_payment_mode_id
self.payment_mode_id = pay_mode
if (
pay_mode and
pay_mode.payment_type == 'outbound' and
pay_mode.payment_method_id.bank_account_required and
self.commercial_partner_id.bank_ids):
self.partner_bank_id =\
self.commercial_partner_id.bank_ids[0]
elif self.type == 'out_invoice':
# No bank account assignation is done here as this is only
# needed for printing purposes and it can conflict with
# SEPA direct debit payments. Current report prints it.
self.payment_mode_id = self.partner_id.customer_payment_mode_id
else:
self.payment_mode_id = False
if self.type == 'in_invoice':
self.partner_bank_id = False
return res
@api.model
def create(self, vals):
"""Fill the payment_mode_id from the partner if none is provided on
creation, using same method as upstream."""
onchanges = {
'_onchange_partner_id': ['payment_mode_id'],
}
for onchange_method, changed_fields in onchanges.items():
if any(f not in vals for f in changed_fields):
invoice = self.new(vals)
getattr(invoice, onchange_method)()
for field in changed_fields:
if field not in vals and invoice[field]:
vals[field] = invoice._fields[field].convert_to_write(
invoice[field], invoice,
)
return super(AccountInvoice, self).create(vals)
@api.onchange('payment_mode_id')
def payment_mode_id_change(self):
if (
self.payment_mode_id and
self.payment_mode_id.payment_type == 'outbound' and
not self.payment_mode_id.payment_method_id.
bank_account_required):
self.partner_bank_id = False
elif not self.payment_mode_id:
self.partner_bank_id = False
@api.model
def line_get_convert(self, line, part):
"""Copy payment mode from invoice to account move line"""
res = super(AccountInvoice, self).line_get_convert(line, part)
if line.get('type') == 'dest' and line.get('invoice_id'):
invoice = self.browse(line['invoice_id'])
res['payment_mode_id'] = invoice.payment_mode_id.id or False
return res
# I think copying payment mode from invoice to refund by default
# is a good idea because the most common way of "paying" a refund is to
# deduct it on the payment of the next invoice (and OCA/bank-payment
# allows to have negative payment lines since March 2016)
@api.model
def _prepare_refund(
self, invoice, date_invoice=None, date=None, description=None,
journal_id=None):
vals = super(AccountInvoice, self)._prepare_refund(
invoice, date_invoice=date_invoice, date=date,
description=description, journal_id=journal_id)
vals['payment_mode_id'] = invoice.payment_mode_id.id
if invoice.type == 'in_invoice':
vals['partner_bank_id'] = invoice.partner_bank_id.id
return vals
def partner_banks_to_show(self):
self.ensure_one()
if self.partner_bank_id:
return self.partner_bank_id
if self.payment_mode_id.show_bank_account_from_journal:
if self.payment_mode_id.bank_account_link == 'fixed':
return self.payment_mode_id.fixed_journal_id.bank_account_id
else:
return self.payment_mode_id.variable_journal_ids.mapped(
'bank_account_id')
if self.payment_mode_id.payment_method_id.code == \
'sepa_direct_debit': # pragma: no cover
return (self.mandate_id.partner_bank_id or
self.partner_id.valid_mandate_id.partner_bank_id)
# Return this as empty recordset
return self.partner_bank_id
| agpl-3.0 | 2,527,082,670,949,137,000 | 43.869565 | 79 | 0.590116 | false |
dharmabumstead/ansible | test/units/plugins/action/test_raw.py | 44 | 3774 | # (c) 2016, Saran Ahluwalia <ahlusar.ahluwalia@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.errors import AnsibleActionFail
from ansible.compat.tests import unittest
from ansible.compat.tests.mock import patch, MagicMock, Mock
from ansible.plugins.action.raw import ActionModule
from ansible.playbook.task import Task
class TestCopyResultExclude(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
# The current behavior of the raw aciton in regards to executable is currently in question;
# the test_raw_executable_is_not_empty_string verifies the current behavior (whether it is desireed or not.
# Please refer to the following for context:
# Issue: https://github.com/ansible/ansible/issues/16054
# PR: https://github.com/ansible/ansible/pull/16085
def test_raw_executable_is_not_empty_string(self):
play_context = Mock()
task = MagicMock(Task)
task.async_val = False
connection = Mock()
task.args = {'_raw_params': 'Args1'}
play_context.check_mode = False
self.mock_am = ActionModule(task, connection, play_context, loader=None, templar=None, shared_loader_obj=None)
self.mock_am._low_level_execute_command = Mock(return_value={})
self.mock_am.display = Mock()
self.mock_am.run()
self.mock_am._low_level_execute_command.assert_called_with('Args1', executable=False)
def test_raw_check_mode_is_True(self):
play_context = Mock()
task = MagicMock(Task)
task.async_val = False
connection = Mock()
task.args = {'_raw_params': 'Args1'}
play_context.check_mode = True
try:
self.mock_am = ActionModule(task, connection, play_context, loader=None, templar=None, shared_loader_obj=None)
except AnsibleActionFail:
pass
def test_raw_test_environment_is_None(self):
play_context = Mock()
task = MagicMock(Task)
task.async_val = False
connection = Mock()
task.args = {'_raw_params': 'Args1'}
task.environment = None
play_context.check_mode = False
self.mock_am = ActionModule(task, connection, play_context, loader=None, templar=None, shared_loader_obj=None)
self.mock_am._low_level_execute_command = Mock(return_value={})
self.mock_am.display = Mock()
self.assertEqual(task.environment, None)
def test_raw_task_vars_is_not_None(self):
play_context = Mock()
task = MagicMock(Task)
task.async_val = False
connection = Mock()
task.args = {'_raw_params': 'Args1'}
task.environment = None
play_context.check_mode = False
self.mock_am = ActionModule(task, connection, play_context, loader=None, templar=None, shared_loader_obj=None)
self.mock_am._low_level_execute_command = Mock(return_value={})
self.mock_am.display = Mock()
self.mock_am.run(task_vars={'a': 'b'})
self.assertEqual(task.environment, None)
| gpl-3.0 | -3,491,157,236,456,923,000 | 34.271028 | 122 | 0.670111 | false |
stevec7/ratking | ratking/engine.py | 2 | 3052 | import ast
import ConfigParser
import glob
import grp
import importlib
import multiprocessing
import os
import sys
from drop_privileges import drop_privileges
from jobhandler import JobCtl
from pwd import getpwnam
class SchedCtl(object):
def __init__(self, sched, config, logging):
self.sched = sched
self.config = config
self.logging = logging
# create an object to link to the job control class. only really used by this class to import
# jobs in the $RATKINGROOT/etc/jobs.d directory
self.job_control_instance = JobCtl(self.sched, self.config, self.logging)
def check_sched(self):
"""Checks to see if scheduler is running"""
if self.sched.running is True:
return True, "Scheduler is running."
else:
return False, "Scheduler is stopped."
def import_jobs(self):
"""read jobs from persistent directory, specified in the config file, under option job_dir"""
for infile in glob.glob( os.path.join(self.config.get('main', 'job_dir'), '*.conf') ):
self.logging.info("Trying to import jobfile: %s", infile)
try:
self.job_control_instance.add_job(infile, 'initial_startup', 'initial_startup')
except RatkingException as error:
print "RatkingException: Error adding job, jobfile: %s. " % infile
pass
except ConfigParser.ParsingError as error:
self.logging.error("ConfigParser.ParsingError: %s. ", error)
pass
def initialize(self):
"""Starts the scheduler for the first time. Only to be used in ratkingd daemon"""
self.sched.start()
return True
def start_sched(self, user):
"""Start the AP Scheduler. Return 'True' if success."""
if user != 'root':
return False, "Only root can stop/start scheduling."
if self.sched.running is True:
return False, "Scheduler already running."
else:
try:
self.sched.start()
except exceptions.AttributeError as e:
raise RatkingException("Error starting scheduling: %s" % e)
return True, "Scheduler started."
def stop_sched(self, user):
"""Stop the AP Scheduler. Return 'True' if success."""
if user != 'root':
return False, "Only root can stop/start scheduling."
if self.sched.running is False:
return False, "Scheduler is not running."
else:
try:
self.sched.shutdown()
except exceptions.AttributeError as e:
raise RatkingException("Error stopping scheduling: %s" % e)
self.sched.shutdown()
return True, "Ratkingd job scheduling has been stopped."
class RatkingException(Exception):
def __init__(self, message):
self.message = message
def __str__(self):
return repr(self.message)
| mit | -9,168,645,899,484,377,000 | 26.745455 | 101 | 0.598296 | false |
franek/weboob | modules/ing/pages/transfer.py | 1 | 9640 | # -*- coding: utf-8 -*-
# Copyright(C) 2009-2011 Romain Bignon, Florent Fourcot
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
from decimal import Decimal
from weboob.tools.capabilities.bank.transactions import FrenchTransaction
from weboob.tools.captcha.virtkeyboard import VirtKeyboardError
from weboob.capabilities.bank import Recipient, AccountNotFound, Transfer
from weboob.tools.browser import BasePage, BrokenPageError
from weboob.tools.mech import ClientForm
from .login import INGVirtKeyboard
from logging import error
__all__ = ['TransferPage']
class TransferPage(BasePage):
def on_loaded(self):
pass
def get_recipients(self):
# First, internals recipients
table = self.document.xpath('//table[@id="transfer_form:receiptAccount"]')
for tr in table[0].xpath('tbody/tr'):
tds = tr.xpath('td')
id = tds[0].xpath('input')[0].attrib['value']
name = tds[0].xpath('label')[0].text
name += u" " + tds[1].xpath('label')[0].text.replace('\n', '')
name += u" " + tds[2].xpath('label')[0].text.replace('\n', '')
recipient = Recipient()
recipient.id = id
recipient.label = name
recipient._type = "int"
yield recipient
# Second, externals recipients
select = self.document.xpath('//select[@id="transfer_form:externalAccounts"]')
if len(select) > 0:
recipients = select[0].xpath('option')
recipients.pop(0)
for option in recipients:
recipient = Recipient()
recipient.id = option.attrib['value']
recipient.label = option.text
recipient._type = "ext"
yield recipient
def ischecked(self, account):
id = account.id
# remove prefix (CC-, LA-, ...)
if "-" in id:
id = id.split('-')[1]
option = self.document.xpath('//input[@value="%s"]' % id)
if len(option) == 0:
raise AccountNotFound()
else:
option = option[0]
try:
if option.attrib["checked"] == "checked":
return True
else:
return False
except:
return False
def transfer(self, recipient, amount, reason):
self.browser.select_form("transfer_form")
self.browser.set_all_readonly(False)
for a in self.browser.controls[:]:
#for label in a.get_labels():
if "transfer_form:_link_hidden_" in str(a) or "transfer_form:j_idcl" in str(a):
self.browser.controls.remove(a)
if "transfer_form:valide" in str(a):
self.browser.controls.remove(a)
self.browser.controls.append(ClientForm.TextControl('text',
'AJAXREQUEST', {'value': "_viewRoot"}))
self.browser.controls.append(ClientForm.TextControl('text',
'AJAX:EVENTS_COUNT', {'value': "1"}))
self.browser['transfer_form:transferMotive'] = reason
self.browser.controls.append(ClientForm.TextControl('text', 'transfer_form:valide', {'value': "transfer_form:valide"}))
self.browser['transfer_form:validateDoTransfer'] = "needed"
self.browser['transfer_form:transferAmount'] = str(amount)
if recipient._type == "int":
self.browser['transfer_recipient_radio'] = [recipient.id]
else:
self.browser['transfer_form:externalAccounts'] = [recipient.id]
self.browser.submit()
def buildonclick(self, recipient, account):
javax = self.document.xpath('//input[@id="javax.faces.ViewState"]')[0].attrib['value']
if recipient._type == "ext":
select = self.document.xpath('//select[@id="transfer_form:externalAccounts"]')[0]
onclick = select.attrib['onchange']
params = onclick.split(',')[3].split('{')[1]
idparam = params.split("'")[1]
param = params.split("'")[3]
request = self.browser.buildurl('', ("AJAXREQUEST", "transfer_form:transfer_radios_form"),
("transfer_form:generalMessages", ""),
("transfer_issuer_radio", account.id[3:]),
("transfer_form:externalAccounts", recipient.id),
("transfer_date", 0),
("transfer_form:transferAmount", ""),
("transfer_form:transferMotive", ""),
("transfer_form:validateDoTransfer", "needed"),
("transfer_form", "transfer_form"),
("autoScrol", ""),
("javax.faces.ViewState", javax),
(idparam, param))
request = request[1:] # remove the "?"
return request
elif recipient._type == "int":
for input in self.document.xpath('//input[@value=%s]' % recipient.id):
if input.attrib['name'] == "transfer_recipient_radio":
onclick = input.attrib['onclick']
break
# Get something like transfer_form:issueAccount:0:click
params = onclick.split(',')[3].split('{')[1]
idparam = params.split("'")[1]
param = params.split("'")[3]
request = self.browser.buildurl('', ("AJAXREQUEST", "transfer_form:transfer_radios_form"),
('transfer_issuer_radio', account.id[3:]),
("transfer_recipient_radio", recipient.id),
("transfer_form:externalAccounts", "na"),
("transfer_date", 0),
("transfer_form:transferAmount", ""),
("transfer_form:transferMotive", ""),
("transfer_form:validateDoTransfer", "needed"),
("transfer_form", "transfer_form"),
("autoScroll", ""),
("javax.faces.ViewState", javax),
(idparam, param))
request = request[1:]
return request
class TransferConfirmPage(BasePage):
def on_loaded(self):
pass
def confirm(self, password):
try:
vk = INGVirtKeyboard(self)
except VirtKeyboardError, err:
error("Error: %s" % err)
return
realpasswd = ""
span = self.document.find('//span[@id="digitpadtransfer"]')
i = 0
for font in span.getiterator('font'):
if font.attrib.get('class') == "vide":
realpasswd += password[i]
i += 1
confirmform = None
for form in self.document.xpath('//form'):
try:
if form.attrib['name'][0:4] == "j_id":
confirmform = form
break
except:
continue
if confirmform is None:
raise BrokenPageError('Unable to find confirm form')
formname = confirmform.attrib['name']
self.browser.logger.debug('We are looking for : ' + realpasswd)
self.browser.select_form(formname)
self.browser.set_all_readonly(False)
for a in self.browser.controls[:]:
if "_link_hidden_" in str(a) or "j_idcl" in str(a):
self.browser.controls.remove(a)
coordinates = vk.get_string_code(realpasswd)
self.browser.logger.debug("Coordonates: " + coordinates)
self.browser.controls.append(ClientForm.TextControl('text',
'AJAXREQUEST', {'value': '_viewRoot'}))
self.browser.controls.append(ClientForm.TextControl(
'text', '%s:mrgtransfer' % formname,
{'value': '%s:mrgtransfer' % formname}))
self.browser['%s:mrltransfer' % formname] = coordinates
self.browser.submit(nologin=True)
def recap(self):
if len(self.document.xpath('//p[@class="alert alert-success"]')) == 0:
raise BrokenPageError('Unable to find confirmation')
div = self.document.find(
'//div[@class="encadre transfert-validation"]')
transfer = Transfer(0)
transfer.amount = Decimal(FrenchTransaction.clean_amount(
div.xpath('.//label[@id="confirmtransferAmount"]')[0].text))
transfer.origin = div.xpath(
'.//span[@id="confirmfromAccount"]')[0].text
transfer.recipient = div.xpath(
'.//span[@id="confirmtoAccount"]')[0].text
transfer.reason = unicode(
div.xpath('.//span[@id="confirmtransferMotive"]')[0].text)
return transfer
| agpl-3.0 | 6,485,394,124,487,427,000 | 44.687204 | 127 | 0.540664 | false |
FlintHill/SUAS-Competition | env/lib/python2.7/site-packages/setuptools/ssl_support.py | 64 | 8492 | import os
import socket
import atexit
import re
import functools
from setuptools.extern.six.moves import urllib, http_client, map, filter
from pkg_resources import ResolutionError, ExtractionError
try:
import ssl
except ImportError:
ssl = None
__all__ = [
'VerifyingHTTPSHandler', 'find_ca_bundle', 'is_available', 'cert_paths',
'opener_for'
]
cert_paths = """
/etc/pki/tls/certs/ca-bundle.crt
/etc/ssl/certs/ca-certificates.crt
/usr/share/ssl/certs/ca-bundle.crt
/usr/local/share/certs/ca-root.crt
/etc/ssl/cert.pem
/System/Library/OpenSSL/certs/cert.pem
/usr/local/share/certs/ca-root-nss.crt
/etc/ssl/ca-bundle.pem
""".strip().split()
try:
HTTPSHandler = urllib.request.HTTPSHandler
HTTPSConnection = http_client.HTTPSConnection
except AttributeError:
HTTPSHandler = HTTPSConnection = object
is_available = ssl is not None and object not in (HTTPSHandler, HTTPSConnection)
try:
from ssl import CertificateError, match_hostname
except ImportError:
try:
from backports.ssl_match_hostname import CertificateError
from backports.ssl_match_hostname import match_hostname
except ImportError:
CertificateError = None
match_hostname = None
if not CertificateError:
class CertificateError(ValueError):
pass
if not match_hostname:
def _dnsname_match(dn, hostname, max_wildcards=1):
"""Matching according to RFC 6125, section 6.4.3
http://tools.ietf.org/html/rfc6125#section-6.4.3
"""
pats = []
if not dn:
return False
# Ported from python3-syntax:
# leftmost, *remainder = dn.split(r'.')
parts = dn.split(r'.')
leftmost = parts[0]
remainder = parts[1:]
wildcards = leftmost.count('*')
if wildcards > max_wildcards:
# Issue #17980: avoid denials of service by refusing more
# than one wildcard per fragment. A survey of established
# policy among SSL implementations showed it to be a
# reasonable choice.
raise CertificateError(
"too many wildcards in certificate DNS name: " + repr(dn))
# speed up common case w/o wildcards
if not wildcards:
return dn.lower() == hostname.lower()
# RFC 6125, section 6.4.3, subitem 1.
# The client SHOULD NOT attempt to match a presented identifier in which
# the wildcard character comprises a label other than the left-most label.
if leftmost == '*':
# When '*' is a fragment by itself, it matches a non-empty dotless
# fragment.
pats.append('[^.]+')
elif leftmost.startswith('xn--') or hostname.startswith('xn--'):
# RFC 6125, section 6.4.3, subitem 3.
# The client SHOULD NOT attempt to match a presented identifier
# where the wildcard character is embedded within an A-label or
# U-label of an internationalized domain name.
pats.append(re.escape(leftmost))
else:
# Otherwise, '*' matches any dotless string, e.g. www*
pats.append(re.escape(leftmost).replace(r'\*', '[^.]*'))
# add the remaining fragments, ignore any wildcards
for frag in remainder:
pats.append(re.escape(frag))
pat = re.compile(r'\A' + r'\.'.join(pats) + r'\Z', re.IGNORECASE)
return pat.match(hostname)
def match_hostname(cert, hostname):
"""Verify that *cert* (in decoded format as returned by
SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 and RFC 6125
rules are followed, but IP addresses are not accepted for *hostname*.
CertificateError is raised on failure. On success, the function
returns nothing.
"""
if not cert:
raise ValueError("empty or no certificate")
dnsnames = []
san = cert.get('subjectAltName', ())
for key, value in san:
if key == 'DNS':
if _dnsname_match(value, hostname):
return
dnsnames.append(value)
if not dnsnames:
# The subject is only checked when there is no dNSName entry
# in subjectAltName
for sub in cert.get('subject', ()):
for key, value in sub:
# XXX according to RFC 2818, the most specific Common Name
# must be used.
if key == 'commonName':
if _dnsname_match(value, hostname):
return
dnsnames.append(value)
if len(dnsnames) > 1:
raise CertificateError("hostname %r "
"doesn't match either of %s"
% (hostname, ', '.join(map(repr, dnsnames))))
elif len(dnsnames) == 1:
raise CertificateError("hostname %r "
"doesn't match %r"
% (hostname, dnsnames[0]))
else:
raise CertificateError("no appropriate commonName or "
"subjectAltName fields were found")
class VerifyingHTTPSHandler(HTTPSHandler):
"""Simple verifying handler: no auth, subclasses, timeouts, etc."""
def __init__(self, ca_bundle):
self.ca_bundle = ca_bundle
HTTPSHandler.__init__(self)
def https_open(self, req):
return self.do_open(
lambda host, **kw: VerifyingHTTPSConn(host, self.ca_bundle, **kw), req
)
class VerifyingHTTPSConn(HTTPSConnection):
"""Simple verifying connection: no auth, subclasses, timeouts, etc."""
def __init__(self, host, ca_bundle, **kw):
HTTPSConnection.__init__(self, host, **kw)
self.ca_bundle = ca_bundle
def connect(self):
sock = socket.create_connection(
(self.host, self.port), getattr(self, 'source_address', None)
)
# Handle the socket if a (proxy) tunnel is present
if hasattr(self, '_tunnel') and getattr(self, '_tunnel_host', None):
self.sock = sock
self._tunnel()
# http://bugs.python.org/issue7776: Python>=3.4.1 and >=2.7.7
# change self.host to mean the proxy server host when tunneling is
# being used. Adapt, since we are interested in the destination
# host for the match_hostname() comparison.
actual_host = self._tunnel_host
else:
actual_host = self.host
if hasattr(ssl, 'create_default_context'):
ctx = ssl.create_default_context(cafile=self.ca_bundle)
self.sock = ctx.wrap_socket(sock, server_hostname=actual_host)
else:
# This is for python < 2.7.9 and < 3.4?
self.sock = ssl.wrap_socket(
sock, cert_reqs=ssl.CERT_REQUIRED, ca_certs=self.ca_bundle
)
try:
match_hostname(self.sock.getpeercert(), actual_host)
except CertificateError:
self.sock.shutdown(socket.SHUT_RDWR)
self.sock.close()
raise
def opener_for(ca_bundle=None):
"""Get a urlopen() replacement that uses ca_bundle for verification"""
return urllib.request.build_opener(
VerifyingHTTPSHandler(ca_bundle or find_ca_bundle())
).open
# from jaraco.functools
def once(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
if not hasattr(func, 'always_returns'):
func.always_returns = func(*args, **kwargs)
return func.always_returns
return wrapper
@once
def get_win_certfile():
try:
import wincertstore
except ImportError:
return None
class CertFile(wincertstore.CertFile):
def __init__(self):
super(CertFile, self).__init__()
atexit.register(self.close)
def close(self):
try:
super(CertFile, self).close()
except OSError:
pass
_wincerts = CertFile()
_wincerts.addstore('CA')
_wincerts.addstore('ROOT')
return _wincerts.name
def find_ca_bundle():
"""Return an existing CA bundle path, or None"""
extant_cert_paths = filter(os.path.isfile, cert_paths)
return (
get_win_certfile()
or next(extant_cert_paths, None)
or _certifi_where()
)
def _certifi_where():
try:
return __import__('certifi').where()
except (ImportError, ResolutionError, ExtractionError):
pass
| mit | -5,096,789,671,977,791,000 | 31.661538 | 82 | 0.596326 | false |
daonb/Open-Knesset | auxiliary/migrations/0013_auto__add_tagkeyphrase.py | 14 | 7851 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'TagKeyphrase'
db.create_table(u'auxiliary_tagkeyphrase', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('tag', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['tagging.Tag'])),
('phrase', self.gf('django.db.models.fields.CharField')(max_length=100)),
))
db.send_create_signal(u'auxiliary', ['TagKeyphrase'])
def backwards(self, orm):
# Deleting model 'TagKeyphrase'
db.delete_table(u'auxiliary_tagkeyphrase')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'auxiliary.feedback': {
'Meta': {'object_name': 'Feedback'},
'content': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip_address': ('django.db.models.fields.IPAddressField', [], {'max_length': '15', 'null': 'True', 'blank': 'True'}),
'suggested_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'suggested_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'feedback'", 'null': 'True', 'to': u"orm['auth.User']"}),
'url': ('django.db.models.fields.TextField', [], {}),
'user_agent': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
u'auxiliary.tagkeyphrase': {
'Meta': {'object_name': 'TagKeyphrase'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'phrase': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['tagging.Tag']"})
},
u'auxiliary.tagsuggestion': {
'Meta': {'object_name': 'TagSuggestion'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.TextField', [], {'unique': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'suggested_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'tagsuggestion'", 'null': 'True', 'to': u"orm['auth.User']"})
},
u'auxiliary.tagsynonym': {
'Meta': {'object_name': 'TagSynonym'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'synonym_tag': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'synonym_synonym_tag'", 'unique': 'True', 'to': u"orm['tagging.Tag']"}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'synonym_proper_tag'", 'to': u"orm['tagging.Tag']"})
},
u'auxiliary.tidbit': {
'Meta': {'object_name': 'Tidbit'},
'button_link': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'button_text': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content': ('tinymce.models.HTMLField', [], {}),
'icon': ('django.db.models.fields.CharField', [], {'max_length': '15'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'ordering': ('django.db.models.fields.IntegerField', [], {'default': '20', 'db_index': 'True'}),
'photo': ('django.db.models.fields.files.ImageField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'suggested_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'tidbits'", 'null': 'True', 'to': u"orm['auth.User']"}),
'title': ('django.db.models.fields.CharField', [], {'default': "u'Did you know ?'", 'max_length': '40'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'tagging.tag': {
'Meta': {'ordering': "('name',)", 'object_name': 'Tag'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'})
}
}
complete_apps = ['auxiliary'] | bsd-3-clause | 6,347,406,213,089,847,000 | 69.107143 | 195 | 0.552286 | false |
larsbutler/coveragepy | coverage/test_helpers.py | 1 | 9946 | # Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
"""Mixin classes to help make good tests."""
import atexit
import collections
import os
import random
import shutil
import sys
import tempfile
import textwrap
from coverage.backunittest import TestCase
from coverage.backward import StringIO, to_bytes
class Tee(object):
"""A file-like that writes to all the file-likes it has."""
def __init__(self, *files):
"""Make a Tee that writes to all the files in `files.`"""
self._files = files
if hasattr(files[0], "encoding"):
self.encoding = files[0].encoding
def write(self, data):
"""Write `data` to all the files."""
for f in self._files:
f.write(data)
def flush(self):
"""Flush the data on all the files."""
for f in self._files:
f.flush()
if 0:
# Use this if you need to use a debugger, though it makes some tests
# fail, I'm not sure why...
def __getattr__(self, name):
return getattr(self._files[0], name)
class ModuleAwareMixin(TestCase):
"""A test case mixin that isolates changes to sys.modules."""
def setUp(self):
super(ModuleAwareMixin, self).setUp()
# Record sys.modules here so we can restore it in cleanup_modules.
self.old_modules = dict(sys.modules)
self.addCleanup(self.cleanup_modules)
def cleanup_modules(self):
"""Remove any new modules imported during the test run.
This lets us import the same source files for more than one test.
"""
for m in [m for m in sys.modules if m not in self.old_modules]:
del sys.modules[m]
class SysPathAwareMixin(TestCase):
"""A test case mixin that isolates changes to sys.path."""
def setUp(self):
super(SysPathAwareMixin, self).setUp()
self.old_syspath = sys.path[:]
self.addCleanup(self.cleanup_syspath)
def cleanup_syspath(self):
"""Restore the original sys.path."""
sys.path = self.old_syspath
class EnvironmentAwareMixin(TestCase):
"""A test case mixin that isolates changes to the environment."""
def setUp(self):
super(EnvironmentAwareMixin, self).setUp()
# Record environment variables that we changed with set_environ.
self.environ_undos = {}
self.addCleanup(self.cleanup_environ)
def set_environ(self, name, value):
"""Set an environment variable `name` to be `value`.
The environment variable is set, and record is kept that it was set,
so that `cleanup_environ` can restore its original value.
"""
if name not in self.environ_undos:
self.environ_undos[name] = os.environ.get(name)
os.environ[name] = value
def cleanup_environ(self):
"""Undo all the changes made by `set_environ`."""
for name, value in self.environ_undos.items():
if value is None:
del os.environ[name]
else:
os.environ[name] = value
class StdStreamCapturingMixin(TestCase):
"""A test case mixin that captures stdout and stderr."""
def setUp(self):
super(StdStreamCapturingMixin, self).setUp()
# Capture stdout and stderr so we can examine them in tests.
# nose keeps stdout from littering the screen, so we can safely Tee it,
# but it doesn't capture stderr, so we don't want to Tee stderr to the
# real stderr, since it will interfere with our nice field of dots.
self.old_stdout = sys.stdout
self.captured_stdout = StringIO()
sys.stdout = Tee(sys.stdout, self.captured_stdout)
self.old_stderr = sys.stderr
self.captured_stderr = StringIO()
sys.stderr = self.captured_stderr
self.addCleanup(self.cleanup_std_streams)
def cleanup_std_streams(self):
"""Restore stdout and stderr."""
sys.stdout = self.old_stdout
sys.stderr = self.old_stderr
def stdout(self):
"""Return the data written to stdout during the test."""
return self.captured_stdout.getvalue()
def stderr(self):
"""Return the data written to stderr during the test."""
return self.captured_stderr.getvalue()
class TempDirMixin(SysPathAwareMixin, ModuleAwareMixin, TestCase):
"""A test case mixin that creates a temp directory and files in it.
Includes SysPathAwareMixin and ModuleAwareMixin, because making and using
temp dirs like this will also need that kind of isolation.
"""
# Our own setting: most of these tests run in their own temp directory.
# Set this to False in your subclass if you don't want a temp directory
# created.
run_in_temp_dir = True
# Set this if you aren't creating any files with make_file, but still want
# the temp directory. This will stop the test behavior checker from
# complaining.
no_files_in_temp_dir = False
def setUp(self):
super(TempDirMixin, self).setUp()
if self.run_in_temp_dir:
# Create a temporary directory.
self.temp_dir = self.make_temp_dir("test_cover")
self.chdir(self.temp_dir)
# Modules should be importable from this temp directory. We don't
# use '' because we make lots of different temp directories and
# nose's caching importer can get confused. The full path prevents
# problems.
sys.path.insert(0, os.getcwd())
class_behavior = self.class_behavior()
class_behavior.tests += 1
class_behavior.temp_dir = self.run_in_temp_dir
class_behavior.no_files_ok = self.no_files_in_temp_dir
self.addCleanup(self.check_behavior)
def make_temp_dir(self, slug="test_cover"):
"""Make a temp directory that is cleaned up when the test is done."""
name = "%s_%08d" % (slug, random.randint(0, 99999999))
temp_dir = os.path.join(tempfile.gettempdir(), name)
os.makedirs(temp_dir)
self.addCleanup(shutil.rmtree, temp_dir)
return temp_dir
def chdir(self, new_dir):
"""Change directory, and change back when the test is done."""
old_dir = os.getcwd()
os.chdir(new_dir)
self.addCleanup(os.chdir, old_dir)
def check_behavior(self):
"""Check that we did the right things."""
class_behavior = self.class_behavior()
if class_behavior.test_method_made_any_files:
class_behavior.tests_making_files += 1
def make_file(self, filename, text="", newline=None):
"""Create a file for testing.
`filename` is the relative path to the file, including directories if
desired, which will be created if need be.
`text` is the content to create in the file, a native string (bytes in
Python 2, unicode in Python 3).
If `newline` is provided, it is a string that will be used as the line
endings in the created file, otherwise the line endings are as provided
in `text`.
Returns `filename`.
"""
# Tests that call `make_file` should be run in a temp environment.
assert self.run_in_temp_dir
self.class_behavior().test_method_made_any_files = True
text = textwrap.dedent(text)
if newline:
text = text.replace("\n", newline)
# Make sure the directories are available.
dirs, _ = os.path.split(filename)
if dirs and not os.path.exists(dirs):
os.makedirs(dirs)
# Create the file.
with open(filename, 'wb') as f:
f.write(to_bytes(text))
return filename
# We run some tests in temporary directories, because they may need to make
# files for the tests. But this is expensive, so we can change per-class
# whether a temp dir is used or not. It's easy to forget to set that
# option properly, so we track information about what the tests did, and
# then report at the end of the process on test classes that were set
# wrong.
class ClassBehavior(object):
"""A value object to store per-class."""
def __init__(self):
self.tests = 0
self.skipped = 0
self.temp_dir = True
self.no_files_ok = False
self.tests_making_files = 0
self.test_method_made_any_files = False
# Map from class to info about how it ran.
class_behaviors = collections.defaultdict(ClassBehavior)
@classmethod
def report_on_class_behavior(cls):
"""Called at process exit to report on class behavior."""
for test_class, behavior in cls.class_behaviors.items():
bad = ""
if behavior.tests <= behavior.skipped:
bad = ""
elif behavior.temp_dir and behavior.tests_making_files == 0:
if not behavior.no_files_ok:
bad = "Inefficient"
elif not behavior.temp_dir and behavior.tests_making_files > 0:
bad = "Unsafe"
if bad:
if behavior.temp_dir:
where = "in a temp directory"
else:
where = "without a temp directory"
print(
"%s: %s ran %d tests, %d made files %s" % (
bad,
test_class.__name__,
behavior.tests,
behavior.tests_making_files,
where,
)
)
def class_behavior(self):
"""Get the ClassBehavior instance for this test."""
return self.class_behaviors[self.__class__]
# When the process ends, find out about bad classes.
atexit.register(TempDirMixin.report_on_class_behavior)
| apache-2.0 | -7,562,683,615,410,326,000 | 33.061644 | 79 | 0.612709 | false |
jiaphuan/models | research/object_detection/models/faster_rcnn_inception_resnet_v2_feature_extractor.py | 1 | 9271 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Inception Resnet v2 Faster R-CNN implementation.
See "Inception-v4, Inception-ResNet and the Impact of Residual Connections on
Learning" by Szegedy et al. (https://arxiv.org/abs/1602.07261)
as well as
"Speed/accuracy trade-offs for modern convolutional object detectors" by
Huang et al. (https://arxiv.org/abs/1611.10012)
"""
import tensorflow as tf
from object_detection.meta_architectures import faster_rcnn_meta_arch
from nets import inception_resnet_v2
slim = tf.contrib.slim
class FasterRCNNInceptionResnetV2FeatureExtractor(
faster_rcnn_meta_arch.FasterRCNNFeatureExtractor):
"""Faster R-CNN with Inception Resnet v2 feature extractor implementation."""
def __init__(self,
is_training,
first_stage_features_stride,
batch_norm_trainable=False,
reuse_weights=None,
weight_decay=0.0):
"""Constructor.
Args:
is_training: See base class.
first_stage_features_stride: See base class.
batch_norm_trainable: See base class.
reuse_weights: See base class.
weight_decay: See base class.
Raises:
ValueError: If `first_stage_features_stride` is not 8 or 16.
"""
if first_stage_features_stride != 8 and first_stage_features_stride != 16:
raise ValueError('`first_stage_features_stride` must be 8 or 16.')
super(FasterRCNNInceptionResnetV2FeatureExtractor, self).__init__(
is_training, first_stage_features_stride, batch_norm_trainable,
reuse_weights, weight_decay)
def preprocess(self, resized_inputs):
"""Faster R-CNN with Inception Resnet v2 preprocessing.
Maps pixel values to the range [-1, 1].
Args:
resized_inputs: A [batch, height_in, width_in, channels] float32 tensor
representing a batch of images with values between 0 and 255.0.
Returns:
preprocessed_inputs: A [batch, height_out, width_out, channels] float32
tensor representing a batch of images.
"""
return (2.0 / 255.0) * resized_inputs - 1.0
def _extract_proposal_features(self, preprocessed_inputs, scope):
"""Extracts first stage RPN features.
Extracts features using the first half of the Inception Resnet v2 network.
We construct the network in `align_feature_maps=True` mode, which means
that all VALID paddings in the network are changed to SAME padding so that
the feature maps are aligned.
Args:
preprocessed_inputs: A [batch, height, width, channels] float32 tensor
representing a batch of images.
scope: A scope name.
Returns:
rpn_feature_map: A tensor with shape [batch, height, width, depth]
Raises:
InvalidArgumentError: If the spatial size of `preprocessed_inputs`
(height or width) is less than 33.
ValueError: If the created network is missing the required activation.
"""
if len(preprocessed_inputs.get_shape().as_list()) != 4:
raise ValueError('`preprocessed_inputs` must be 4 dimensional, got a '
'tensor of shape %s' % preprocessed_inputs.get_shape())
with slim.arg_scope(inception_resnet_v2.inception_resnet_v2_arg_scope(
weight_decay=self._weight_decay)):
# Forces is_training to False to disable batch norm update.
with slim.arg_scope([slim.batch_norm],
is_training=self._train_batch_norm):
with tf.variable_scope('InceptionResnetV2',
reuse=self._reuse_weights) as scope:
rpn_feature_map, _ = (
inception_resnet_v2.inception_resnet_v2_base(
preprocessed_inputs, final_endpoint='PreAuxLogits',
scope=scope, output_stride=self._first_stage_features_stride,
align_feature_maps=True))
return rpn_feature_map
def _extract_box_classifier_features(self, proposal_feature_maps, scope):
"""Extracts second stage box classifier features.
This function reconstructs the "second half" of the Inception ResNet v2
network after the part defined in `_extract_proposal_features`.
Args:
proposal_feature_maps: A 4-D float tensor with shape
[batch_size * self.max_num_proposals, crop_height, crop_width, depth]
representing the feature map cropped to each proposal.
scope: A scope name.
Returns:
proposal_classifier_features: A 4-D float tensor with shape
[batch_size * self.max_num_proposals, height, width, depth]
representing box classifier features for each proposal.
"""
with tf.variable_scope('InceptionResnetV2', reuse=self._reuse_weights):
with slim.arg_scope(inception_resnet_v2.inception_resnet_v2_arg_scope(
weight_decay=self._weight_decay)):
# Forces is_training to False to disable batch norm update.
with slim.arg_scope([slim.batch_norm],
is_training=self._train_batch_norm):
with slim.arg_scope([slim.conv2d, slim.max_pool2d, slim.avg_pool2d],
stride=1, padding='SAME'):
with tf.variable_scope('Mixed_7a'):
with tf.variable_scope('Branch_0'):
tower_conv = slim.conv2d(proposal_feature_maps,
256, 1, scope='Conv2d_0a_1x1')
tower_conv_1 = slim.conv2d(
tower_conv, 384, 3, stride=2,
padding='VALID', scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_1'):
tower_conv1 = slim.conv2d(
proposal_feature_maps, 256, 1, scope='Conv2d_0a_1x1')
tower_conv1_1 = slim.conv2d(
tower_conv1, 288, 3, stride=2,
padding='VALID', scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_2'):
tower_conv2 = slim.conv2d(
proposal_feature_maps, 256, 1, scope='Conv2d_0a_1x1')
tower_conv2_1 = slim.conv2d(tower_conv2, 288, 3,
scope='Conv2d_0b_3x3')
tower_conv2_2 = slim.conv2d(
tower_conv2_1, 320, 3, stride=2,
padding='VALID', scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_3'):
tower_pool = slim.max_pool2d(
proposal_feature_maps, 3, stride=2, padding='VALID',
scope='MaxPool_1a_3x3')
net = tf.concat(
[tower_conv_1, tower_conv1_1, tower_conv2_2, tower_pool], 3)
net = slim.repeat(net, 9, inception_resnet_v2.block8, scale=0.20)
net = inception_resnet_v2.block8(net, activation_fn=None)
proposal_classifier_features = slim.conv2d(
net, 1536, 1, scope='Conv2d_7b_1x1')
return proposal_classifier_features
def restore_from_classification_checkpoint_fn(
self,
first_stage_feature_extractor_scope,
second_stage_feature_extractor_scope):
"""Returns a map of variables to load from a foreign checkpoint.
Note that this overrides the default implementation in
faster_rcnn_meta_arch.FasterRCNNFeatureExtractor which does not work for
InceptionResnetV2 checkpoints.
TODO(jonathanhuang,rathodv): revisit whether it's possible to force the
`Repeat` namescope as created in `_extract_box_classifier_features` to
start counting at 2 (e.g. `Repeat_2`) so that the default restore_fn can
be used.
Args:
first_stage_feature_extractor_scope: A scope name for the first stage
feature extractor.
second_stage_feature_extractor_scope: A scope name for the second stage
feature extractor.
Returns:
A dict mapping variable names (to load from a checkpoint) to variables in
the model graph.
"""
variables_to_restore = {}
for variable in tf.global_variables():
if variable.op.name.startswith(
first_stage_feature_extractor_scope):
var_name = variable.op.name.replace(
first_stage_feature_extractor_scope + '/', '')
variables_to_restore[var_name] = variable
if variable.op.name.startswith(
second_stage_feature_extractor_scope):
var_name = variable.op.name.replace(
second_stage_feature_extractor_scope
+ '/InceptionResnetV2/Repeat', 'InceptionResnetV2/Repeat_2')
var_name = var_name.replace(
second_stage_feature_extractor_scope + '/', '')
variables_to_restore[var_name] = variable
return variables_to_restore
| apache-2.0 | -2,195,494,220,945,714,000 | 42.12093 | 80 | 0.637795 | false |
pfi/maf | maf_template.py | 1 | 4169 | #!/usr/bin/env python
# coding: ISO8859-1
#
# Copyright (c) 2013, Preferred Infrastructure, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
maf - a waf extension for automation of parameterized computational experiments
"""
# NOTE: coding ISO8859-1 is necessary for attaching maflib at the end of this
# file.
import os
import os.path
import shutil
import subprocess
import sys
import tarfile
import waflib.Context
import waflib.Logs
TAR_NAME = 'maflib.tar'
NEW_LINE = '#XXX'.encode()
CARRIAGE_RETURN = '#YYY'.encode()
ARCHIVE_BEGIN = '#==>\n'.encode()
ARCHIVE_END = '#<==\n'.encode()
class _Cleaner:
def __init__(self, directory):
self._cwd = os.getcwd()
self._directory = directory
def __enter__(self):
self.clean()
def __exit__(self, exc_type, exc_value, traceback):
os.chdir(self._cwd)
if exc_type:
self.clean()
return False
def clean(self):
try:
path = os.path.join(self._directory, 'maflib')
shutil.rmtree(path)
except OSError:
pass
def _read_archive(filename):
if filename.endswith('.pyc'):
filename = filename[:-1]
with open(filename, 'rb') as f:
while True:
line = f.readline()
if not line:
raise Exception('archive not found')
if line == ARCHIVE_BEGIN:
content = f.readline()
if not content or f.readline() != ARCHIVE_END:
raise Exception('corrupt archive')
break
return content[1:-1].replace(NEW_LINE, '\n'.encode()).replace(
CARRIAGE_RETURN, '\r'.encode())
def unpack_maflib(directory):
with _Cleaner(directory) as c:
content = _read_archive(__file__)
os.makedirs(os.path.join(directory, 'maflib'))
os.chdir(directory)
bz2_name = TAR_NAME + '.bz2'
with open(bz2_name, 'wb') as f:
f.write(content)
try:
t = tarfile.open(bz2_name)
except:
try:
os.system('bunzip2 ' + bz2_name)
t = tarfile.open(TAR_NAME)
except:
raise Exception('Cannot extract maflib. Check that python bz2 module or bunzip2 command is available.')
try:
t.extractall()
finally:
t.close()
try:
os.remove(bz2_name)
os.remove(TAR_NAME)
except:
pass
maflib_path = os.path.abspath(os.getcwd())
return maflib_path
def test_maflib(directory):
try:
os.stat(os.path.join(directory, 'maflib'))
return os.path.abspath(directory)
except OSError:
return None
def find_maflib():
path = waflib.Context.waf_dir
if not test_maflib(path):
unpack_maflib(path)
return path
find_maflib()
import maflib.core
| bsd-2-clause | 8,330,315,998,417,407,000 | 29.210145 | 119 | 0.633725 | false |
roninek/python101 | games_str/pong_str/pong_str4.py | 4 | 3983 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
import pygame, sys
from pygame.locals import *
# Przygotowanie zmiennych opisujących okno gry oraz obiekty gry i ich właściwości (paletki, piłeczka)
# Inicjacja modułu i obiektów Pygame'a
# inicjacja modułu pygame
pygame.init()
# liczba klatek na sekundę
FPS = 30
# obiekt zegara, który pozwala śledzić czas
fpsClock = pygame.time.Clock()
# szerokość i wysokość okna gry
OKNOGRY_SZER = 800
OKNOGRY_WYS = 400
# przygotowanie powierzchni do rysowania, czyli inicjacja okna gry
OKNOGRY = pygame.display.set_mode((OKNOGRY_SZER, OKNOGRY_WYS), 0, 32)
# tytuł okna gry
pygame.display.set_caption('Prosty Pong')
# kolory wykorzystywane w grze, których składowe RGB zapisane są w tuplach
LT_BLUE = (230, 255, 255)
WHITE = (255, 255, 255)
RED = (255, 0, 0)
GREEN = (0, 255, 0)
BLUE = (0, 0, 255)
# szerokość, wysokość i pozycja paletek
PALETKA_SZER = 100
PALETKA_WYS = 20
# Inicjacja PALETEK:
# utworzenie powierzchni dla obrazka, wypełnienie jej kolorem,
# pobranie prostokątnego obszaru obrazka i ustawienie go na wstępnej pozycji
PALETKA_1_POZ = (350, 360) # początkowa pozycja paletki gracza
paletka1_obr = pygame.Surface([PALETKA_SZER, PALETKA_WYS])
paletka1_obr.fill(BLUE)
paletka1_prost = paletka1_obr.get_rect()
paletka1_prost.x = PALETKA_1_POZ[0]
paletka1_prost.y = PALETKA_1_POZ[1]
PALETKA_2_POZ = (350, 20) # początkowa pozycja paletki komputera
paletka2_obr = pygame.Surface([PALETKA_SZER, PALETKA_WYS])
paletka2_obr.fill(RED)
paletka2_prost = paletka2_obr.get_rect()
paletka2_prost.x = PALETKA_2_POZ[0]
paletka2_prost.y = PALETKA_2_POZ[1]
# szybkość paletki 1 (AI - ang. artificial inteligence, sztuczna inteligencja), czyli komputera
AI_PREDKOSC = 3
# Inicjacja PIŁKI
# szerokość, wysokość, prędkość pozioma (x) i pionowa (y) PIŁKI
# utworzenie powierzchni dla piłki, narysowanie na niej koła, ustawienie pozycji początkowej
PILKA_SZER = 20
PILKA_WYS = 20
PILKA_PREDKOSC_X = 6
PILKA_PREDKOSC_Y = 6
pilka_obr = pygame.Surface([PILKA_SZER, PILKA_WYS], pygame.SRCALPHA, 32).convert_alpha()
pygame.draw.ellipse(pilka_obr, GREEN, [0, 0, PILKA_SZER, PILKA_WYS])
pilka_prost = pilka_obr.get_rect()
pilka_prost.x = OKNOGRY_SZER/2
pilka_prost.y = OKNOGRY_WYS/2
# Rysowanie komunikatów tekstowych
# ustawienie początkowych wartości liczników punktów
# utworzenie obiektu czcionki z podanego pliku o podanym rozmiarze
GRACZ_1_PKT = '0'
GRACZ_2_PKT = '0'
fontObj = pygame.font.Font('freesansbold.ttf', 64)
# funkcje wyświetlające punkty gracza
# tworzą nowy obrazek z tekstem, pobierają prostokątny obszar obrazka,
# pozycjonują go i rysują w oknie gry
def drukuj_punkty_p1():
tekst_obr1 = fontObj.render(GRACZ_1_PKT, True, (0,0,0))
tekst_prost1 = tekst_obr1.get_rect()
tekst_prost1.center = (OKNOGRY_SZER/2, OKNOGRY_WYS*0.75)
OKNOGRY.blit(tekst_obr1, tekst_prost1)
def drukuj_punkty_p2():
tekst_obr2 = fontObj.render(GRACZ_2_PKT, True, (0,0,0))
tekst_prost2 = tekst_obr2.get_rect()
tekst_prost2.center = (OKNOGRY_SZER/2, OKNOGRY_WYS/4)
OKNOGRY.blit(tekst_obr2, tekst_prost2)
# pętla główna programu
while True:
# obsługa zdarzeń generowanych przez gracza
for event in pygame.event.get():
# przechwyć zamknięcie okna
if event.type == QUIT:
pygame.quit()
sys.exit()
# przechwyć ruch myszy
if event.type == MOUSEMOTION:
# pobierz współrzędne x, y kursora myszy
myszaX, myszaY = event.pos
# przesunięcie paletki gracza
przesuniecie = myszaX-(PALETKA_SZER/2)
# jeżeli wykraczamy poza okno gry w prawo
if przesuniecie > OKNOGRY_SZER-PALETKA_SZER:
przesuniecie = OKNOGRY_SZER-PALETKA_SZER
# jeżeli wykraczamy poza okno gry w lewo
if przesuniecie < 0:
przesuniecie = 0
paletka1_prost.x = przesuniecie
| mit | -6,045,156,583,652,450,000 | 32.452991 | 101 | 0.703117 | false |
Inspq/ansible | test/units/executor/test_task_result.py | 104 | 5583 | # (c) 2016, James Cammarata <jimi@sngx.net>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.compat.tests import unittest
from ansible.compat.tests.mock import patch, MagicMock
from ansible.executor.task_result import TaskResult
class TestTaskResult(unittest.TestCase):
def test_task_result_basic(self):
mock_host = MagicMock()
mock_task = MagicMock()
# test loading a result with a dict
tr = TaskResult(mock_host, mock_task, dict())
# test loading a result with a JSON string
with patch('ansible.parsing.dataloader.DataLoader.load') as p:
tr = TaskResult(mock_host, mock_task, '{}')
def test_task_result_is_changed(self):
mock_host = MagicMock()
mock_task = MagicMock()
# test with no changed in result
tr = TaskResult(mock_host, mock_task, dict())
self.assertFalse(tr.is_changed())
# test with changed in the result
tr = TaskResult(mock_host, mock_task, dict(changed=True))
self.assertTrue(tr.is_changed())
# test with multiple results but none changed
mock_task.loop = 'foo'
tr = TaskResult(mock_host, mock_task, dict(results=[dict(foo='bar'), dict(bam='baz'), True]))
self.assertFalse(tr.is_changed())
# test with multiple results and one changed
mock_task.loop = 'foo'
tr = TaskResult(mock_host, mock_task, dict(results=[dict(changed=False), dict(changed=True), dict(some_key=False)]))
self.assertTrue(tr.is_changed())
def test_task_result_is_skipped(self):
mock_host = MagicMock()
mock_task = MagicMock()
# test with no skipped in result
tr = TaskResult(mock_host, mock_task, dict())
self.assertFalse(tr.is_skipped())
# test with skipped in the result
tr = TaskResult(mock_host, mock_task, dict(skipped=True))
self.assertTrue(tr.is_skipped())
# test with multiple results but none skipped
mock_task.loop = 'foo'
tr = TaskResult(mock_host, mock_task, dict(results=[dict(foo='bar'), dict(bam='baz'), True]))
self.assertFalse(tr.is_skipped())
# test with multiple results and one skipped
mock_task.loop = 'foo'
tr = TaskResult(mock_host, mock_task, dict(results=[dict(skipped=False), dict(skipped=True), dict(some_key=False)]))
self.assertFalse(tr.is_skipped())
# test with multiple results and all skipped
mock_task.loop = 'foo'
tr = TaskResult(mock_host, mock_task, dict(results=[dict(skipped=True), dict(skipped=True), dict(skipped=True)]))
self.assertTrue(tr.is_skipped())
# test with multiple squashed results (list of strings)
# first with the main result having skipped=False
mock_task.loop = 'foo'
tr = TaskResult(mock_host, mock_task, dict(results=["a", "b", "c"], skipped=False))
self.assertFalse(tr.is_skipped())
# then with the main result having skipped=True
tr = TaskResult(mock_host, mock_task, dict(results=["a", "b", "c"], skipped=True))
self.assertTrue(tr.is_skipped())
def test_task_result_is_unreachable(self):
mock_host = MagicMock()
mock_task = MagicMock()
# test with no unreachable in result
tr = TaskResult(mock_host, mock_task, dict())
self.assertFalse(tr.is_unreachable())
# test with unreachable in the result
tr = TaskResult(mock_host, mock_task, dict(unreachable=True))
self.assertTrue(tr.is_unreachable())
# test with multiple results but none unreachable
mock_task.loop = 'foo'
tr = TaskResult(mock_host, mock_task, dict(results=[dict(foo='bar'), dict(bam='baz'), True]))
self.assertFalse(tr.is_unreachable())
# test with multiple results and one unreachable
mock_task.loop = 'foo'
tr = TaskResult(mock_host, mock_task, dict(results=[dict(unreachable=False), dict(unreachable=True), dict(some_key=False)]))
self.assertTrue(tr.is_unreachable())
def test_task_result_is_failed(self):
mock_host = MagicMock()
mock_task = MagicMock()
# test with no failed in result
tr = TaskResult(mock_host, mock_task, dict())
self.assertFalse(tr.is_failed())
# test failed result with rc values
tr = TaskResult(mock_host, mock_task, dict(rc=0))
self.assertFalse(tr.is_failed())
tr = TaskResult(mock_host, mock_task, dict(rc=1))
self.assertTrue(tr.is_failed())
# test with failed in result
tr = TaskResult(mock_host, mock_task, dict(failed=True))
self.assertTrue(tr.is_failed())
# test with failed_when in result
tr = TaskResult(mock_host, mock_task, dict(failed_when_result=True))
self.assertTrue(tr.is_failed())
| gpl-3.0 | -8,862,854,582,317,598,000 | 39.165468 | 132 | 0.649113 | false |
olsaki/ansible-modules-core | system/authorized_key.py | 55 | 15877 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Ansible module to add authorized_keys for ssh logins.
(c) 2012, Brad Olson <brado@movedbylight.com>
This file is part of Ansible
Ansible is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Ansible is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Ansible. If not, see <http://www.gnu.org/licenses/>.
"""
DOCUMENTATION = '''
---
module: authorized_key
short_description: Adds or removes an SSH authorized key
description:
- Adds or removes an SSH authorized key for a user from a remote host.
version_added: "0.5"
options:
user:
description:
- The username on the remote host whose authorized_keys file will be modified
required: true
default: null
key:
description:
- The SSH public key(s), as a string or (since 1.9) url (https://github.com/username.keys)
required: true
default: null
path:
description:
- Alternate path to the authorized_keys file
required: false
default: "(homedir)+/.ssh/authorized_keys"
version_added: "1.2"
manage_dir:
description:
- Whether this module should manage the directory of the authorized key file. If
set, the module will create the directory, as well as set the owner and permissions
of an existing directory. Be sure to
set C(manage_dir=no) if you are using an alternate directory for
authorized_keys, as set with C(path), since you could lock yourself out of
SSH access. See the example below.
required: false
choices: [ "yes", "no" ]
default: "yes"
version_added: "1.2"
state:
description:
- Whether the given key (with the given key_options) should or should not be in the file
required: false
choices: [ "present", "absent" ]
default: "present"
key_options:
description:
- A string of ssh key options to be prepended to the key in the authorized_keys file
required: false
default: null
version_added: "1.4"
exclusive:
description:
- Whether to remove all other non-specified keys from the authorized_keys file. Multiple keys
can be specified in a single C(key) string value by separating them by newlines.
- This option is not loop aware, so if you use C(with_) , it will be exclusive per iteration
of the loop, if you want multiple keys in the file you need to pass them all to C(key) in a
single batch as mentioned above.
required: false
choices: [ "yes", "no" ]
default: "no"
version_added: "1.9"
description:
- "Adds or removes authorized keys for particular user accounts"
author: "Brad Olson (@bradobro)"
'''
EXAMPLES = '''
# Example using key data from a local file on the management machine
- authorized_key: user=charlie key="{{ lookup('file', '/home/charlie/.ssh/id_rsa.pub') }}"
# Using github url as key source
- authorized_key: user=charlie key=https://github.com/charlie.keys
# Using alternate directory locations:
- authorized_key: user=charlie
key="{{ lookup('file', '/home/charlie/.ssh/id_rsa.pub') }}"
path='/etc/ssh/authorized_keys/charlie'
manage_dir=no
# Using with_file
- name: Set up authorized_keys for the deploy user
authorized_key: user=deploy
key="{{ item }}"
with_file:
- public_keys/doe-jane
- public_keys/doe-john
# Using key_options:
- authorized_key: user=charlie
key="{{ lookup('file', '/home/charlie/.ssh/id_rsa.pub') }}"
key_options='no-port-forwarding,from="10.0.1.1"'
# Set up authorized_keys exclusively with one key
- authorized_key: user=root key="{{ item }}" state=present
exclusive=yes
with_file:
- public_keys/doe-jane
'''
# Makes sure the public key line is present or absent in the user's .ssh/authorized_keys.
#
# Arguments
# =========
# user = username
# key = line to add to authorized_keys for user
# path = path to the user's authorized_keys file (default: ~/.ssh/authorized_keys)
# manage_dir = whether to create, and control ownership of the directory (default: true)
# state = absent|present (default: present)
#
# see example in examples/playbooks
import sys
import os
import pwd
import os.path
import tempfile
import re
import shlex
class keydict(dict):
""" a dictionary that maintains the order of keys as they are added """
# http://stackoverflow.com/questions/2328235/pythonextend-the-dict-class
def __init__(self, *args, **kw):
super(keydict,self).__init__(*args, **kw)
self.itemlist = super(keydict,self).keys()
def __setitem__(self, key, value):
self.itemlist.append(key)
super(keydict,self).__setitem__(key, value)
def __iter__(self):
return iter(self.itemlist)
def keys(self):
return self.itemlist
def values(self):
return [self[key] for key in self]
def itervalues(self):
return (self[key] for key in self)
def keyfile(module, user, write=False, path=None, manage_dir=True):
"""
Calculate name of authorized keys file, optionally creating the
directories and file, properly setting permissions.
:param str user: name of user in passwd file
:param bool write: if True, write changes to authorized_keys file (creating directories if needed)
:param str path: if not None, use provided path rather than default of '~user/.ssh/authorized_keys'
:param bool manage_dir: if True, create and set ownership of the parent dir of the authorized_keys file
:return: full path string to authorized_keys for user
"""
if module.check_mode and path is not None:
keysfile = path
return keysfile
try:
user_entry = pwd.getpwnam(user)
except KeyError, e:
if module.check_mode and path is None:
module.fail_json(msg="Either user must exist or you must provide full path to key file in check mode")
module.fail_json(msg="Failed to lookup user %s: %s" % (user, str(e)))
if path is None:
homedir = user_entry.pw_dir
sshdir = os.path.join(homedir, ".ssh")
keysfile = os.path.join(sshdir, "authorized_keys")
else:
sshdir = os.path.dirname(path)
keysfile = path
if not write:
return keysfile
uid = user_entry.pw_uid
gid = user_entry.pw_gid
if manage_dir:
if not os.path.exists(sshdir):
os.mkdir(sshdir, 0700)
if module.selinux_enabled():
module.set_default_selinux_context(sshdir, False)
os.chown(sshdir, uid, gid)
os.chmod(sshdir, 0700)
if not os.path.exists(keysfile):
basedir = os.path.dirname(keysfile)
if not os.path.exists(basedir):
os.makedirs(basedir)
try:
f = open(keysfile, "w") #touches file so we can set ownership and perms
finally:
f.close()
if module.selinux_enabled():
module.set_default_selinux_context(keysfile, False)
try:
os.chown(keysfile, uid, gid)
os.chmod(keysfile, 0600)
except OSError:
pass
return keysfile
def parseoptions(module, options):
'''
reads a string containing ssh-key options
and returns a dictionary of those options
'''
options_dict = keydict() #ordered dict
if options:
try:
# the following regex will split on commas while
# ignoring those commas that fall within quotes
regex = re.compile(r'''((?:[^,"']|"[^"]*"|'[^']*')+)''')
parts = regex.split(options)[1:-1]
for part in parts:
if "=" in part:
(key, value) = part.split("=", 1)
options_dict[key] = value
elif part != ",":
options_dict[part] = None
except:
module.fail_json(msg="invalid option string: %s" % options)
return options_dict
def parsekey(module, raw_key):
'''
parses a key, which may or may not contain a list
of ssh-key options at the beginning
'''
VALID_SSH2_KEY_TYPES = [
'ssh-ed25519',
'ecdsa-sha2-nistp256',
'ecdsa-sha2-nistp384',
'ecdsa-sha2-nistp521',
'ssh-dss',
'ssh-rsa',
]
options = None # connection options
key = None # encrypted key string
key_type = None # type of ssh key
type_index = None # index of keytype in key string|list
# remove comment yaml escapes
raw_key = raw_key.replace('\#', '#')
# split key safely
lex = shlex.shlex(raw_key)
lex.quotes = []
lex.commenters = '' #keep comment hashes
lex.whitespace_split = True
key_parts = list(lex)
for i in range(0, len(key_parts)):
if key_parts[i] in VALID_SSH2_KEY_TYPES:
type_index = i
key_type = key_parts[i]
break
# check for options
if type_index is None:
return None
elif type_index > 0:
options = " ".join(key_parts[:type_index])
# parse the options (if any)
options = parseoptions(module, options)
# get key after the type index
key = key_parts[(type_index + 1)]
# set comment to everything after the key
if len(key_parts) > (type_index + 1):
comment = " ".join(key_parts[(type_index + 2):])
return (key, key_type, options, comment)
def readkeys(module, filename):
if not os.path.isfile(filename):
return {}
keys = {}
f = open(filename)
for line in f.readlines():
key_data = parsekey(module, line)
if key_data:
# use key as identifier
keys[key_data[0]] = key_data
else:
# for an invalid line, just append the line
# to the array so it will be re-output later
keys[line] = line
f.close()
return keys
def writekeys(module, filename, keys):
fd, tmp_path = tempfile.mkstemp('', 'tmp', os.path.dirname(filename))
f = open(tmp_path,"w")
try:
for index, key in keys.items():
try:
(keyhash,type,options,comment) = key
option_str = ""
if options:
option_strings = []
for option_key in options.keys():
if options[option_key]:
option_strings.append("%s=%s" % (option_key, options[option_key]))
else:
option_strings.append("%s" % option_key)
option_str = ",".join(option_strings)
option_str += " "
key_line = "%s%s %s %s\n" % (option_str, type, keyhash, comment)
except:
key_line = key
f.writelines(key_line)
except IOError, e:
module.fail_json(msg="Failed to write to file %s: %s" % (tmp_path, str(e)))
f.close()
module.atomic_move(tmp_path, filename)
def enforce_state(module, params):
"""
Add or remove key.
"""
user = params["user"]
key = params["key"]
path = params.get("path", None)
manage_dir = params.get("manage_dir", True)
state = params.get("state", "present")
key_options = params.get("key_options", None)
exclusive = params.get("exclusive", False)
error_msg = "Error getting key from: %s"
# if the key is a url, request it and use it as key source
if key.startswith("http"):
try:
resp, info = fetch_url(module, key)
if info['status'] != 200:
module.fail_json(msg=error_msg % key)
else:
key = resp.read()
except Exception:
module.fail_json(msg=error_msg % key)
# extract individual keys into an array, skipping blank lines and comments
key = [s for s in key.splitlines() if s and not s.startswith('#')]
# check current state -- just get the filename, don't create file
do_write = False
params["keyfile"] = keyfile(module, user, do_write, path, manage_dir)
existing_keys = readkeys(module, params["keyfile"])
# Add a place holder for keys that should exist in the state=present and
# exclusive=true case
keys_to_exist = []
# Check our new keys, if any of them exist we'll continue.
for new_key in key:
parsed_new_key = parsekey(module, new_key)
if not parsed_new_key:
module.fail_json(msg="invalid key specified: %s" % new_key)
if key_options is not None:
parsed_options = parseoptions(module, key_options)
parsed_new_key = (parsed_new_key[0], parsed_new_key[1], parsed_options, parsed_new_key[3])
present = False
matched = False
non_matching_keys = []
if parsed_new_key[0] in existing_keys:
present = True
# Then we check if everything matches, including
# the key type and options. If not, we append this
# existing key to the non-matching list
# We only want it to match everything when the state
# is present
if parsed_new_key != existing_keys[parsed_new_key[0]] and state == "present":
non_matching_keys.append(existing_keys[parsed_new_key[0]])
else:
matched = True
# handle idempotent state=present
if state=="present":
keys_to_exist.append(parsed_new_key[0])
if len(non_matching_keys) > 0:
for non_matching_key in non_matching_keys:
if non_matching_key[0] in existing_keys:
del existing_keys[non_matching_key[0]]
do_write = True
if not matched:
existing_keys[parsed_new_key[0]] = parsed_new_key
do_write = True
elif state=="absent":
if not matched:
continue
del existing_keys[parsed_new_key[0]]
do_write = True
# remove all other keys to honor exclusive
if state == "present" and exclusive:
to_remove = frozenset(existing_keys).difference(keys_to_exist)
for key in to_remove:
del existing_keys[key]
do_write = True
if do_write:
if module.check_mode:
module.exit_json(changed=True)
writekeys(module, keyfile(module, user, do_write, path, manage_dir), existing_keys)
params['changed'] = True
else:
if module.check_mode:
module.exit_json(changed=False)
return params
def main():
module = AnsibleModule(
argument_spec = dict(
user = dict(required=True, type='str'),
key = dict(required=True, type='str'),
path = dict(required=False, type='str'),
manage_dir = dict(required=False, type='bool', default=True),
state = dict(default='present', choices=['absent','present']),
key_options = dict(required=False, type='str'),
unique = dict(default=False, type='bool'),
exclusive = dict(default=False, type='bool'),
),
supports_check_mode=True
)
results = enforce_state(module, module.params)
module.exit_json(**results)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.urls import *
main()
| gpl-3.0 | -2,530,987,097,980,303,400 | 32.566596 | 114 | 0.601877 | false |
DuCorey/bokeh | tests/integration/annotations/test_title.py | 6 | 1584 | from __future__ import absolute_import
from bokeh.io import save
from bokeh.models import Plot, Range1d, LinearAxis, Circle, Column, ColumnDataSource
import pytest
pytestmark = pytest.mark.integration
HEIGHT = 600
WIDTH = 600
@pytest.mark.screenshot
def test_the_default_titles_settings_and_ensure_outside_any_axes(output_file_url, selenium, screenshot):
# Testing title rendering of background and border is covered in the
# label test. The title added to plot as the primary title
# should always be outside axes and other side renderers.
source = ColumnDataSource(data=dict(x=[1, 2], y=[1, 2]))
def make_plot(location, title_align, two_axes=True):
plot = Plot(
plot_width=400, plot_height=200,
x_range=Range1d(0, 2), y_range=Range1d(0, 2),
toolbar_location=None,
title_location=location,
)
plot.title.text = "Title %s - %s" % (location, title_align)
plot.title.align = title_align
plot.add_glyph(source, Circle(x='x', y='y', radius=0.4))
plot.add_layout(LinearAxis(), location)
if two_axes:
plot.add_layout(LinearAxis(), location)
return plot
layout = Column(
make_plot('above', 'left', two_axes=False), # This is a workaround top doesn't like two axes
make_plot('right', 'right'),
make_plot('below', 'center'),
make_plot('left', 'left')
)
# Save the plot and start the test
save(layout)
selenium.get(output_file_url)
# Take screenshot
screenshot.assert_is_valid()
| bsd-3-clause | 2,372,366,653,181,085,000 | 32 | 104 | 0.643308 | false |
mzdaniel/oh-mainline | vendor/packages/twisted/twisted/test/test_modules.py | 18 | 15268 | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for twisted.python.modules, abstract access to imported or importable
objects.
"""
import sys
import itertools
import zipfile
import compileall
import twisted
from twisted.trial.unittest import TestCase
from twisted.python import modules
from twisted.python.filepath import FilePath
from twisted.python.reflect import namedAny
from twisted.test.test_paths import zipit
class PySpaceTestCase(TestCase):
def findByIteration(self, modname, where=modules, importPackages=False):
"""
You don't ever actually want to do this, so it's not in the public API, but
sometimes we want to compare the result of an iterative call with a
lookup call and make sure they're the same for test purposes.
"""
for modinfo in where.walkModules(importPackages=importPackages):
if modinfo.name == modname:
return modinfo
self.fail("Unable to find module %r through iteration." % (modname,))
def replaceSysPath(self, sysPath):
"""
Replace sys.path, for the duration of the test, with the given value.
"""
originalSysPath = sys.path[:]
def cleanUpSysPath():
sys.path[:] = originalSysPath
self.addCleanup(cleanUpSysPath)
sys.path[:] = sysPath
def replaceSysModules(self, sysModules):
"""
Replace sys.modules, for the duration of the test, with the given value.
"""
originalSysModules = sys.modules.copy()
def cleanUpSysModules():
sys.modules.clear()
sys.modules.update(originalSysModules)
self.addCleanup(cleanUpSysModules)
sys.modules.clear()
sys.modules.update(sysModules)
def pathEntryWithOnePackage(self, pkgname="test_package"):
"""
Generate a L{FilePath} with one package, named C{pkgname}, on it, and
return the L{FilePath} of the path entry.
"""
entry = FilePath(self.mktemp())
pkg = entry.child("test_package")
pkg.makedirs()
pkg.child("__init__.py").setContent("")
return entry
class BasicTests(PySpaceTestCase):
def test_unimportablePackageGetItem(self):
"""
If a package has been explicitly forbidden from importing by setting a
C{None} key in sys.modules under its name,
L{modules.PythonPath.__getitem__} should still be able to retrieve an
unloaded L{modules.PythonModule} for that package.
"""
shouldNotLoad = []
path = modules.PythonPath(sysPath=[self.pathEntryWithOnePackage().path],
moduleLoader=shouldNotLoad.append,
importerCache={},
sysPathHooks={},
moduleDict={'test_package': None})
self.assertEquals(shouldNotLoad, [])
self.assertEquals(path['test_package'].isLoaded(), False)
def test_unimportablePackageWalkModules(self):
"""
If a package has been explicitly forbidden from importing by setting a
C{None} key in sys.modules under its name, L{modules.walkModules} should
still be able to retrieve an unloaded L{modules.PythonModule} for that
package.
"""
existentPath = self.pathEntryWithOnePackage()
self.replaceSysPath([existentPath.path])
self.replaceSysModules({"test_package": None})
walked = list(modules.walkModules())
self.assertEquals([m.name for m in walked],
["test_package"])
self.assertEquals(walked[0].isLoaded(), False)
def test_nonexistentPaths(self):
"""
Verify that L{modules.walkModules} ignores entries in sys.path which
do not exist in the filesystem.
"""
existentPath = self.pathEntryWithOnePackage()
nonexistentPath = FilePath(self.mktemp())
self.failIf(nonexistentPath.exists())
self.replaceSysPath([existentPath.path])
expected = [modules.getModule("test_package")]
beforeModules = list(modules.walkModules())
sys.path.append(nonexistentPath.path)
afterModules = list(modules.walkModules())
self.assertEquals(beforeModules, expected)
self.assertEquals(afterModules, expected)
def test_nonDirectoryPaths(self):
"""
Verify that L{modules.walkModules} ignores entries in sys.path which
refer to regular files in the filesystem.
"""
existentPath = self.pathEntryWithOnePackage()
nonDirectoryPath = FilePath(self.mktemp())
self.failIf(nonDirectoryPath.exists())
nonDirectoryPath.setContent("zip file or whatever\n")
self.replaceSysPath([existentPath.path])
beforeModules = list(modules.walkModules())
sys.path.append(nonDirectoryPath.path)
afterModules = list(modules.walkModules())
self.assertEquals(beforeModules, afterModules)
def test_twistedShowsUp(self):
"""
Scrounge around in the top-level module namespace and make sure that
Twisted shows up, and that the module thusly obtained is the same as
the module that we find when we look for it explicitly by name.
"""
self.assertEquals(modules.getModule('twisted'),
self.findByIteration("twisted"))
def test_dottedNames(self):
"""
Verify that the walkModules APIs will give us back subpackages, not just
subpackages.
"""
self.assertEquals(
modules.getModule('twisted.python'),
self.findByIteration("twisted.python",
where=modules.getModule('twisted')))
def test_onlyTopModules(self):
"""
Verify that the iterModules API will only return top-level modules and
packages, not submodules or subpackages.
"""
for module in modules.iterModules():
self.failIf(
'.' in module.name,
"no nested modules should be returned from iterModules: %r"
% (module.filePath))
def test_loadPackagesAndModules(self):
"""
Verify that we can locate and load packages, modules, submodules, and
subpackages.
"""
for n in ['os',
'twisted',
'twisted.python',
'twisted.python.reflect']:
m = namedAny(n)
self.failUnlessIdentical(
modules.getModule(n).load(),
m)
self.failUnlessIdentical(
self.findByIteration(n).load(),
m)
def test_pathEntriesOnPath(self):
"""
Verify that path entries discovered via module loading are, in fact, on
sys.path somewhere.
"""
for n in ['os',
'twisted',
'twisted.python',
'twisted.python.reflect']:
self.failUnlessIn(
modules.getModule(n).pathEntry.filePath.path,
sys.path)
def test_alwaysPreferPy(self):
"""
Verify that .py files will always be preferred to .pyc files, regardless of
directory listing order.
"""
mypath = FilePath(self.mktemp())
mypath.createDirectory()
pp = modules.PythonPath(sysPath=[mypath.path])
originalSmartPath = pp._smartPath
def _evilSmartPath(pathName):
o = originalSmartPath(pathName)
originalChildren = o.children
def evilChildren():
# normally this order is random; let's make sure it always
# comes up .pyc-first.
x = originalChildren()
x.sort()
x.reverse()
return x
o.children = evilChildren
return o
mypath.child("abcd.py").setContent('\n')
compileall.compile_dir(mypath.path, quiet=True)
# sanity check
self.assertEquals(len(mypath.children()), 2)
pp._smartPath = _evilSmartPath
self.assertEquals(pp['abcd'].filePath,
mypath.child('abcd.py'))
def test_packageMissingPath(self):
"""
A package can delete its __path__ for some reasons,
C{modules.PythonPath} should be able to deal with it.
"""
mypath = FilePath(self.mktemp())
mypath.createDirectory()
pp = modules.PythonPath(sysPath=[mypath.path])
subpath = mypath.child("abcd")
subpath.createDirectory()
subpath.child("__init__.py").setContent('del __path__\n')
sys.path.append(mypath.path)
import abcd
try:
l = list(pp.walkModules())
self.assertEquals(len(l), 1)
self.assertEquals(l[0].name, 'abcd')
finally:
del abcd
del sys.modules['abcd']
sys.path.remove(mypath.path)
class PathModificationTest(PySpaceTestCase):
"""
These tests share setup/cleanup behavior of creating a dummy package and
stuffing some code in it.
"""
_serialnum = itertools.count().next # used to generate serial numbers for
# package names.
def setUp(self):
self.pathExtensionName = self.mktemp()
self.pathExtension = FilePath(self.pathExtensionName)
self.pathExtension.createDirectory()
self.packageName = "pyspacetests%d" % (self._serialnum(),)
self.packagePath = self.pathExtension.child(self.packageName)
self.packagePath.createDirectory()
self.packagePath.child("__init__.py").setContent("")
self.packagePath.child("a.py").setContent("")
self.packagePath.child("b.py").setContent("")
self.packagePath.child("c__init__.py").setContent("")
self.pathSetUp = False
def _setupSysPath(self):
assert not self.pathSetUp
self.pathSetUp = True
sys.path.append(self.pathExtensionName)
def _underUnderPathTest(self, doImport=True):
moddir2 = self.mktemp()
fpmd = FilePath(moddir2)
fpmd.createDirectory()
fpmd.child("foozle.py").setContent("x = 123\n")
self.packagePath.child("__init__.py").setContent(
"__path__.append(%r)\n" % (moddir2,))
# Cut here
self._setupSysPath()
modinfo = modules.getModule(self.packageName)
self.assertEquals(
self.findByIteration(self.packageName+".foozle", modinfo,
importPackages=doImport),
modinfo['foozle'])
self.assertEquals(modinfo['foozle'].load().x, 123)
def test_underUnderPathAlreadyImported(self):
"""
Verify that iterModules will honor the __path__ of already-loaded packages.
"""
self._underUnderPathTest()
def test_underUnderPathNotAlreadyImported(self):
"""
Verify that iterModules will honor the __path__ of already-loaded packages.
"""
self._underUnderPathTest(False)
test_underUnderPathNotAlreadyImported.todo = (
"This may be impossible but it sure would be nice.")
def _listModules(self):
pkginfo = modules.getModule(self.packageName)
nfni = [modinfo.name.split(".")[-1] for modinfo in
pkginfo.iterModules()]
nfni.sort()
self.failUnlessEqual(nfni, ['a', 'b', 'c__init__'])
def test_listingModules(self):
"""
Make sure the module list comes back as we expect from iterModules on a
package, whether zipped or not.
"""
self._setupSysPath()
self._listModules()
def test_listingModulesAlreadyImported(self):
"""
Make sure the module list comes back as we expect from iterModules on a
package, whether zipped or not, even if the package has already been
imported.
"""
self._setupSysPath()
namedAny(self.packageName)
self._listModules()
def tearDown(self):
# Intentionally using 'assert' here, this is not a test assertion, this
# is just an "oh fuck what is going ON" assertion. -glyph
if self.pathSetUp:
HORK = "path cleanup failed: don't be surprised if other tests break"
assert sys.path.pop() is self.pathExtensionName, HORK+", 1"
assert self.pathExtensionName not in sys.path, HORK+", 2"
class RebindingTest(PathModificationTest):
"""
These tests verify that the default path interrogation API works properly
even when sys.path has been rebound to a different object.
"""
def _setupSysPath(self):
assert not self.pathSetUp
self.pathSetUp = True
self.savedSysPath = sys.path
sys.path = sys.path[:]
sys.path.append(self.pathExtensionName)
def tearDown(self):
"""
Clean up sys.path by re-binding our original object.
"""
if self.pathSetUp:
sys.path = self.savedSysPath
class ZipPathModificationTest(PathModificationTest):
def _setupSysPath(self):
assert not self.pathSetUp
zipit(self.pathExtensionName, self.pathExtensionName+'.zip')
self.pathExtensionName += '.zip'
assert zipfile.is_zipfile(self.pathExtensionName)
PathModificationTest._setupSysPath(self)
class PythonPathTestCase(TestCase):
"""
Tests for the class which provides the implementation for all of the
public API of L{twisted.python.modules}, L{PythonPath}.
"""
def test_unhandledImporter(self):
"""
Make sure that the behavior when encountering an unknown importer
type is not catastrophic failure.
"""
class SecretImporter(object):
pass
def hook(name):
return SecretImporter()
syspath = ['example/path']
sysmodules = {}
syshooks = [hook]
syscache = {}
def sysloader(name):
return None
space = modules.PythonPath(
syspath, sysmodules, syshooks, syscache, sysloader)
entries = list(space.iterEntries())
self.assertEquals(len(entries), 1)
self.assertRaises(KeyError, lambda: entries[0]['module'])
def test_inconsistentImporterCache(self):
"""
If the path a module loaded with L{PythonPath.__getitem__} is not
present in the path importer cache, a warning is emitted, but the
L{PythonModule} is returned as usual.
"""
space = modules.PythonPath([], sys.modules, [], {})
thisModule = space[__name__]
warnings = self.flushWarnings([self.test_inconsistentImporterCache])
self.assertEquals(warnings[0]['category'], UserWarning)
self.assertEquals(
warnings[0]['message'],
FilePath(twisted.__file__).parent().dirname() +
" (for module " + __name__ + ") not in path importer cache "
"(PEP 302 violation - check your local configuration).")
self.assertEquals(len(warnings), 1)
self.assertEquals(thisModule.name, __name__)
| agpl-3.0 | 3,595,598,122,497,762,300 | 32.853659 | 83 | 0.606694 | false |
sjotterman/python_koans | python2/koans/about_tuples.py | 73 | 2259 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from runner.koan import *
class AboutTuples(Koan):
def test_creating_a_tuple(self):
count_of_three = (1, 2, 5)
self.assertEqual(__, count_of_three[2])
def test_tuples_are_immutable_so_item_assignment_is_not_possible(self):
count_of_three = (1, 2, 5)
try:
count_of_three[2] = "three"
except TypeError as ex:
self.assertMatch(__, ex[0])
def test_tuples_are_immutable_so_appending_is_not_possible(self):
count_of_three = (1, 2, 5)
try:
count_of_three.append("boom")
except Exception as ex:
self.assertEqual(AttributeError, type(ex))
# Note, assertMatch() uses regular expression pattern matching,
# so you don't have to copy the whole message.
self.assertMatch(__, ex[0])
# Tuples are less flexible than lists, but faster.
def test_tuples_can_only_be_changed_through_replacement(self):
count_of_three = (1, 2, 5)
list_count = list(count_of_three)
list_count.append("boom")
count_of_three = tuple(list_count)
self.assertEqual(__, count_of_three)
def test_tuples_of_one_look_peculiar(self):
self.assertEqual(__, (1).__class__)
self.assertEqual(__, (1,).__class__)
self.assertEqual(__, ("Hello comma!", ))
def test_tuple_constructor_can_be_surprising(self):
self.assertEqual(__, tuple("Surprise!"))
def test_creating_empty_tuples(self):
self.assertEqual(__, ())
self.assertEqual(__, tuple()) # Sometimes less confusing
def test_tuples_can_be_embedded(self):
lat = (37, 14, 6, 'N')
lon = (115, 48, 40, 'W')
place = ('Area 51', lat, lon)
self.assertEqual(__, place)
def test_tuples_are_good_for_representing_records(self):
locations = [
("Illuminati HQ", (38, 52, 15.56, 'N'), (77, 3, 21.46, 'W')),
("Stargate B", (41, 10, 43.92, 'N'), (1, 49, 34.29, 'W')),
]
locations.append(
("Cthulhu", (26, 40, 1, 'N'), (70, 45, 7, 'W'))
)
self.assertEqual(__, locations[2][0])
self.assertEqual(__, locations[0][1][2])
| mit | -9,187,506,441,115,051,000 | 31.271429 | 75 | 0.556884 | false |
paolinux79/FTCryPTUploader | FTCryPTUploader/FtpCoord.py | 1 | 1473 | import threading
class FtpCoord:
shutdown = None
lock = None
stats = {}
def __init__(self):
self.shutdown = False
self.lock = threading.Lock()
def kill(self):
print("raising shutdown")
self.shutdown = True
def need_to_stop(self):
return self.shutdown
def update_stats(self, filepath, size, status, elapsed):
with self.lock:
self.stats[filepath] = {'size':size, 'status' : status, 'elapsed' :elapsed}
def show_stats(self):
xferred = 0
resumed = 0
failed = 0
already = 0
elapsed = 0
size = 0
with self.lock:
for k, v in self.stats.items():
if v['status'] == 'xferred':
xferred += 1
elif v['status'] == 'resumed':
resumed += 1
elif v['status'] == 'failed':
print(k)
failed += 1
elif v['status'] == 'already':
already += 1
elapsed += v['elapsed']
size += v['size']
print("xferred: " + str(xferred))
print("resumed: " + str(resumed))
print("failed: " + str(failed))
print("already: " + str(already))
print("elapsed: " + str(elapsed))
print("size: " + str(size))
if size > 0 and elapsed > 0:
print("bandwith: " + str((size/elapsed)/1024) + " KiB/s") | bsd-2-clause | 5,517,717,484,509,087,000 | 28.48 | 87 | 0.468432 | false |
fgesora/odoo | openerp/addons/base/tests/test_mail_examples.py | 302 | 57129 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
MISC_HTML_SOURCE = """
<font size="2" style="color: rgb(31, 31, 31); font-family: monospace; font-variant: normal; line-height: normal; ">test1</font>
<div style="color: rgb(31, 31, 31); font-family: monospace; font-variant: normal; line-height: normal; font-size: 12px; font-style: normal; ">
<b>test2</b></div><div style="color: rgb(31, 31, 31); font-family: monospace; font-variant: normal; line-height: normal; font-size: 12px; ">
<i>test3</i></div><div style="color: rgb(31, 31, 31); font-family: monospace; font-variant: normal; line-height: normal; font-size: 12px; ">
<u>test4</u></div><div style="color: rgb(31, 31, 31); font-family: monospace; font-variant: normal; line-height: normal; font-size: 12px; ">
<strike>test5</strike></div><div style="color: rgb(31, 31, 31); font-family: monospace; font-variant: normal; line-height: normal; ">
<font size="5">test6</font></div><div><ul><li><font color="#1f1f1f" face="monospace" size="2">test7</font></li><li>
<font color="#1f1f1f" face="monospace" size="2">test8</font></li></ul><div><ol><li><font color="#1f1f1f" face="monospace" size="2">test9</font>
</li><li><font color="#1f1f1f" face="monospace" size="2">test10</font></li></ol></div></div>
<blockquote style="margin: 0 0 0 40px; border: none; padding: 0px;"><div><div><div><font color="#1f1f1f" face="monospace" size="2">
test11</font></div></div></div></blockquote><blockquote style="margin: 0 0 0 40px; border: none; padding: 0px;">
<blockquote style="margin: 0 0 0 40px; border: none; padding: 0px;"><div><font color="#1f1f1f" face="monospace" size="2">
test12</font></div><div><font color="#1f1f1f" face="monospace" size="2"><br></font></div></blockquote></blockquote>
<font color="#1f1f1f" face="monospace" size="2"><a href="http://google.com">google</a></font>
<a href="javascript:alert('malicious code')">test link</a>
"""
EDI_LIKE_HTML_SOURCE = """<div style="font-family: 'Lucica Grande', Ubuntu, Arial, Verdana, sans-serif; font-size: 12px; color: rgb(34, 34, 34); background-color: #FFF; ">
<p>Hello ${object.partner_id.name},</p>
<p>A new invoice is available for you: </p>
<p style="border-left: 1px solid #8e0000; margin-left: 30px;">
<strong>REFERENCES</strong><br />
Invoice number: <strong>${object.number}</strong><br />
Invoice total: <strong>${object.amount_total} ${object.currency_id.name}</strong><br />
Invoice date: ${object.date_invoice}<br />
Order reference: ${object.origin}<br />
Your contact: <a href="mailto:${object.user_id.email or ''}?subject=Invoice%20${object.number}">${object.user_id.name}</a>
</p>
<br/>
<p>It is also possible to directly pay with Paypal:</p>
<a style="margin-left: 120px;" href="${object.paypal_url}">
<img class="oe_edi_paypal_button" src="https://www.paypal.com/en_US/i/btn/btn_paynowCC_LG.gif"/>
</a>
<br/>
<p>If you have any question, do not hesitate to contact us.</p>
<p>Thank you for choosing ${object.company_id.name or 'us'}!</p>
<br/>
<br/>
<div style="width: 375px; margin: 0px; padding: 0px; background-color: #8E0000; border-top-left-radius: 5px 5px; border-top-right-radius: 5px 5px; background-repeat: repeat no-repeat;">
<h3 style="margin: 0px; padding: 2px 14px; font-size: 12px; color: #DDD;">
<strong style="text-transform:uppercase;">${object.company_id.name}</strong></h3>
</div>
<div style="width: 347px; margin: 0px; padding: 5px 14px; line-height: 16px; background-color: #F2F2F2;">
<span style="color: #222; margin-bottom: 5px; display: block; ">
${object.company_id.street}<br/>
${object.company_id.street2}<br/>
${object.company_id.zip} ${object.company_id.city}<br/>
${object.company_id.state_id and ('%s, ' % object.company_id.state_id.name) or ''} ${object.company_id.country_id.name or ''}<br/>
</span>
<div style="margin-top: 0px; margin-right: 0px; margin-bottom: 0px; margin-left: 0px; padding-top: 0px; padding-right: 0px; padding-bottom: 0px; padding-left: 0px; ">
Phone: ${object.company_id.phone}
</div>
<div>
Web : <a href="${object.company_id.website}">${object.company_id.website}</a>
</div>
</div>
</div></body></html>"""
OERP_WEBSITE_HTML_1 = """
<div>
<div class="container">
<div class="row">
<div class="col-md-12 text-center mt16 mb16" data-snippet-id="colmd">
<h2>OpenERP HR Features</h2>
<h3 class="text-muted">Manage your company most important asset: People</h3>
</div>
<div class="col-md-4" data-snippet-id="colmd">
<img class="img-rounded img-responsive" src="/website/static/src/img/china_thumb.jpg">
<h4 class="mt16">Streamline Recruitments</h4>
<p>Post job offers and keep track of each application received. Follow applicants in your recruitment process with the smart kanban view.</p>
<p>Save time by automating some communications with email templates. Resumes are indexed automatically, allowing you to easily find for specific profiles.</p>
</div>
<div class="col-md-4" data-snippet-id="colmd">
<img class="img-rounded img-responsive" src="/website/static/src/img/desert_thumb.jpg">
<h4 class="mt16">Enterprise Social Network</h4>
<p>Break down information silos. Share knowledge and best practices amongst all employees. Follow specific people or documents and join groups of interests to share expertise and documents.</p>
<p>Interact with your collegues in real time with live chat.</p>
</div>
<div class="col-md-4" data-snippet-id="colmd">
<img class="img-rounded img-responsive" src="/website/static/src/img/deers_thumb.jpg">
<h4 class="mt16">Leaves Management</h4>
<p>Keep track of the vacation days accrued by each employee. Employees enter their requests (paid holidays, sick leave, etc), for managers to approve and validate. It's all done in just a few clicks. The agenda of each employee is updated accordingly.</p>
</div>
</div>
</div>
</div>"""
OERP_WEBSITE_HTML_1_IN = [
'Manage your company most important asset: People',
'img class="img-rounded img-responsive" src="/website/static/src/img/china_thumb.jpg"',
]
OERP_WEBSITE_HTML_1_OUT = [
'Break down information silos.',
'Keep track of the vacation days accrued by each employee',
'img class="img-rounded img-responsive" src="/website/static/src/img/deers_thumb.jpg',
]
OERP_WEBSITE_HTML_2 = """
<div class="mt16 cke_widget_editable cke_widget_element oe_editable oe_dirty" data-oe-model="blog.post" data-oe-id="6" data-oe-field="content" data-oe-type="html" data-oe-translate="0" data-oe-expression="blog_post.content" data-cke-widget-data="{}" data-cke-widget-keep-attr="0" data-widget="oeref" contenteditable="true" data-cke-widget-editable="text">
<section class="mt16 mb16" data-snippet-id="text-block">
<div class="container">
<div class="row">
<div class="col-md-12 text-center mt16 mb32" data-snippet-id="colmd">
<h2>
OpenERP Project Management
</h2>
<h3 class="text-muted">Infinitely flexible. Incredibly easy to use.</h3>
</div>
<div class="col-md-12 mb16 mt16" data-snippet-id="colmd">
<p>
OpenERP's <b>collaborative and realtime</b> project
management helps your team get work done. Keep
track of everything, from the big picture to the
minute details, from the customer contract to the
billing.
</p><p>
Organize projects around <b>your own processes</b>. Work
on tasks and issues using the kanban view, schedule
tasks using the gantt chart and control deadlines
in the calendar view. Every project may have it's
own stages allowing teams to optimize their job.
</p>
</div>
</div>
</div>
</section>
<section class="" data-snippet-id="image-text">
<div class="container">
<div class="row">
<div class="col-md-6 mt16 mb16" data-snippet-id="colmd">
<img class="img-responsive shadow" src="/website/static/src/img/image_text.jpg">
</div>
<div class="col-md-6 mt32" data-snippet-id="colmd">
<h3>Manage Your Shops</h3>
<p>
OpenERP's Point of Sale introduces a super clean
interface with no installation required that runs
online and offline on modern hardwares.
</p><p>
It's full integration with the company inventory
and accounting, gives you real time statistics and
consolidations amongst all shops without the hassle
of integrating several applications.
</p>
</div>
</div>
</div>
</section>
<section class="" data-snippet-id="text-image">
<div class="container">
<div class="row">
<div class="col-md-6 mt32" data-snippet-id="colmd">
<h3>Enterprise Social Network</h3>
<p>
Make every employee feel more connected and engaged
with twitter-like features for your own company. Follow
people, share best practices, 'like' top ideas, etc.
</p><p>
Connect with experts, follow what interests you, share
documents and promote best practices with OpenERP
Social application. Get work done with effective
collaboration across departments, geographies
and business applications.
</p>
</div>
<div class="col-md-6 mt16 mb16" data-snippet-id="colmd">
<img class="img-responsive shadow" src="/website/static/src/img/text_image.png">
</div>
</div>
</div>
</section><section class="" data-snippet-id="portfolio">
<div class="container">
<div class="row">
<div class="col-md-12 text-center mt16 mb32" data-snippet-id="colmd">
<h2>Our Porfolio</h2>
<h4 class="text-muted">More than 500 successful projects</h4>
</div>
<div class="col-md-4" data-snippet-id="colmd">
<img class="img-thumbnail img-responsive" src="/website/static/src/img/deers.jpg">
<img class="img-thumbnail img-responsive" src="/website/static/src/img/desert.jpg">
<img class="img-thumbnail img-responsive" src="/website/static/src/img/china.jpg">
</div>
<div class="col-md-4" data-snippet-id="colmd">
<img class="img-thumbnail img-responsive" src="/website/static/src/img/desert.jpg">
<img class="img-thumbnail img-responsive" src="/website/static/src/img/china.jpg">
<img class="img-thumbnail img-responsive" src="/website/static/src/img/deers.jpg">
</div>
<div class="col-md-4" data-snippet-id="colmd">
<img class="img-thumbnail img-responsive" src="/website/static/src/img/landscape.jpg">
<img class="img-thumbnail img-responsive" src="/website/static/src/img/china.jpg">
<img class="img-thumbnail img-responsive" src="/website/static/src/img/desert.jpg">
</div>
</div>
</div>
</section>
</div>
"""
OERP_WEBSITE_HTML_2_IN = [
'management helps your team get work done',
]
OERP_WEBSITE_HTML_2_OUT = [
'Make every employee feel more connected',
'img class="img-responsive shadow" src="/website/static/src/img/text_image.png',
]
TEXT_1 = """I contact you about our meeting tomorrow. Here is the schedule I propose:
9 AM: brainstorming about our new amazing business app
9.45 AM: summary
10 AM: meeting with Ignasse to present our app
Is everything ok for you ?
--
MySignature"""
TEXT_1_IN = ["""I contact you about our meeting tomorrow. Here is the schedule I propose:
9 AM: brainstorming about our new amazing business app
9.45 AM: summary
10 AM: meeting with Ignasse to present our app
Is everything ok for you ?"""]
TEXT_1_OUT = ["""--
MySignature"""]
TEXT_2 = """Salut Raoul!
Le 28 oct. 2012 à 00:02, Raoul Grosbedon a écrit :
> I contact you about our meeting tomorrow. Here is the schedule I propose: (quote)
Of course. This seems viable.
> 2012/10/27 Bert Tartopoils :
>> blahblahblah (quote)?
>>
>> blahblahblah (quote)
>>
>> Bert TARTOPOILS
>> bert.tartopoils@miam.miam
>>
>
>
> --
> RaoulSignature
Bert TARTOPOILS
bert.tartopoils@miam.miam
"""
TEXT_2_IN = ["Salut Raoul!", "Of course. This seems viable."]
TEXT_2_OUT = ["I contact you about our meeting tomorrow. Here is the schedule I propose: (quote)",
"""> 2012/10/27 Bert Tartopoils :
>> blahblahblah (quote)?
>>
>> blahblahblah (quote)
>>
>> Bert TARTOPOILS
>> bert.tartopoils@miam.miam
>>
>
>
> --
> RaoulSignature"""]
HTML_1 = """<p>I contact you about our meeting for tomorrow. Here is the schedule I propose: (keep)
9 AM: brainstorming about our new amazing business app
9.45 AM: summary
10 AM: meeting with Ignasse to present our app
Is everything ok for you ?
--
MySignature</p>"""
HTML_1_IN = ["""I contact you about our meeting for tomorrow. Here is the schedule I propose: (keep)
9 AM: brainstorming about our new amazing business app
9.45 AM: summary
10 AM: meeting with Ignasse to present our app
Is everything ok for you ?"""]
HTML_1_OUT = ["""--
MySignature"""]
HTML_2 = """<div>
<font><span>I contact you about our meeting for tomorrow. Here is the schedule I propose:</span></font>
</div>
<div>
<ul>
<li><span>9 AM: brainstorming about our new amazing business app</span></li>
<li><span>9.45 AM: summary</span></li>
<li><span>10 AM: meeting with Fabien to present our app</span></li>
</ul>
</div>
<div>
<font><span>Is everything ok for you ?</span></font>
</div>"""
HTML_2_IN = ["<font><span>I contact you about our meeting for tomorrow. Here is the schedule I propose:</span></font>",
"<li><span>9 AM: brainstorming about our new amazing business app</span></li>",
"<li><span>9.45 AM: summary</span></li>",
"<li><span>10 AM: meeting with Fabien to present our app</span></li>",
"<font><span>Is everything ok for you ?</span></font>"]
HTML_2_OUT = []
HTML_3 = """<div><pre>This is an answer.
Regards,
XXXXXX
----- Mail original -----</pre>
<pre>Hi,
My CRM-related question.
Regards,
XXXX</pre></div>"""
HTML_3_IN = ["""<div><pre>This is an answer.
Regards,
XXXXXX
----- Mail original -----</pre>"""]
HTML_3_OUT = ["Hi,", "My CRM-related question.",
"Regards,"]
HTML_4 = """
<div>
<div>Hi Nicholas,</div>
<br>
<div>I'm free now. 00447710085916.</div>
<br>
<div>Regards,</div>
<div>Nicholas</div>
<br>
<span id="OLK_SRC_BODY_SECTION">
<div style="font-family:Calibri; font-size:11pt; text-align:left; color:black; BORDER-BOTTOM: medium none; BORDER-LEFT: medium none; PADDING-BOTTOM: 0in; PADDING-LEFT: 0in; PADDING-RIGHT: 0in; BORDER-TOP: #b5c4df 1pt solid; BORDER-RIGHT: medium none; PADDING-TOP: 3pt">
<span style="font-weight:bold">From: </span>OpenERP Enterprise <<a href="mailto:sales@openerp.com">sales@openerp.com</a>><br><span style="font-weight:bold">Reply-To: </span><<a href="mailto:sales@openerp.com">sales@openerp.com</a>><br><span style="font-weight:bold">Date: </span>Wed, 17 Apr 2013 13:30:47 +0000<br><span style="font-weight:bold">To: </span>Microsoft Office User <<a href="mailto:n.saxlund@babydino.com">n.saxlund@babydino.com</a>><br><span style="font-weight:bold">Subject: </span>Re: your OpenERP.com registration<br>
</div>
<br>
<div>
<p>Hello Nicholas Saxlund, </p>
<p>I noticed you recently registered to our OpenERP Online solution. </p>
<p>You indicated that you wish to use OpenERP in your own company. We would like to know more about your your business needs and requirements, and see how we can help you. When would you be available to discuss your project ?
</p>
<p>Best regards, </p>
<pre><a href="http://openerp.com">http://openerp.com</a>
Belgium: +32.81.81.37.00
U.S.: +1 (650) 307-6736
India: +91 (79) 40 500 100
</pre>
</div>
</span>
</div>"""
HTML_5 = """<div><pre>Hi,
I have downloaded OpenERP installer 7.0 and successfully installed the postgresql server and the OpenERP.
I created a database and started to install module by log in as administrator.
However, I was not able to install any module due to "OpenERP Server Error" as shown in the attachement.
Could you please let me know how could I fix this problem?
Regards,
Goh Sin Yih
________________________________
From: OpenERP Enterprise <sales@openerp.com>
To: sinyih_goh@yahoo.com
Sent: Friday, February 8, 2013 12:46 AM
Subject: Feedback From Your OpenERP Trial
Hello Goh Sin Yih,
Thank you for having tested OpenERP Online.
I noticed you started a trial of OpenERP Online (gsy) but you did not decide to keep using it.
So, I just wanted to get in touch with you to get your feedback. Can you tell me what kind of application you were you looking for and why you didn't decide to continue with OpenERP?
Thanks in advance for providing your feedback,
Do not hesitate to contact me if you have any questions,
Thanks,
</pre>"""
GMAIL_1 = """Hello,<div><br></div><div>Ok for me. I am replying directly in gmail, without signature.</div><div><br></div><div>Kind regards,</div><div><br></div><div>Demo.<br><br><div>On Thu, Nov 8, 2012 at 5:29 PM, <span><<a href="mailto:dummy@example.com">dummy@example.com</a>></span> wrote:<br><blockquote><div>I contact you about our meeting for tomorrow. Here is the schedule I propose:</div><div><ul><li>9 AM: brainstorming about our new amazing business app</span></li></li>
<li>9.45 AM: summary</li><li>10 AM: meeting with Fabien to present our app</li></ul></div><div>Is everything ok for you ?</div>
<div><p>--<br>Administrator</p></div>
<div><p>Log in our portal at: <a href="http://localhost:8069#action=login&db=mail_1&login=demo">http://localhost:8069#action=login&db=mail_1&login=demo</a></p></div>
</blockquote></div><br></div>"""
GMAIL_1_IN = ['Ok for me. I am replying directly in gmail, without signature.']
GMAIL_1_OUT = ['Administrator', 'Log in our portal at:']
THUNDERBIRD_1 = """<div>On 11/08/2012 05:29 PM,
<a href="mailto:dummy@example.com">dummy@example.com</a> wrote:<br></div>
<blockquote>
<div>I contact you about our meeting for tomorrow. Here is the
schedule I propose:</div>
<div>
<ul><li>9 AM: brainstorming about our new amazing business
app</span></li></li>
<li>9.45 AM: summary</li>
<li>10 AM: meeting with Fabien to present our app</li>
</ul></div>
<div>Is everything ok for you ?</div>
<div>
<p>--<br>
Administrator</p>
</div>
<div>
<p>Log in our portal at:
<a href="http://localhost:8069#action=login&db=mail_1&token=rHdWcUART5PhEnJRaXjH">http://localhost:8069#action=login&db=mail_1&token=rHdWcUART5PhEnJRaXjH</a></p>
</div>
</blockquote>
Ok for me. I am replying directly below your mail, using Thunderbird, with a signature.<br><br>
Did you receive my email about my new laptop, by the way ?<br><br>
Raoul.<br><pre>--
Raoul Grosbedonnée
</pre>"""
THUNDERBIRD_1_IN = ['Ok for me. I am replying directly below your mail, using Thunderbird, with a signature.']
THUNDERBIRD_1_OUT = ['I contact you about our meeting for tomorrow.', 'Raoul Grosbedon']
HOTMAIL_1 = """<div>
<div dir="ltr"><br>
I have an amazing company, i'm learning OpenERP, it is a small company yet, but plannig to grow up quickly.
<br> <br>Kindest regards,<br>xxx<br>
<div>
<div id="SkyDrivePlaceholder">
</div>
<hr id="stopSpelling">
Subject: Re: your OpenERP.com registration<br>From: xxx@xxx.xxx<br>To: xxx@xxx.xxx<br>Date: Wed, 27 Mar 2013 17:12:12 +0000
<br><br>
Hello xxx,
<br>
I noticed you recently created an OpenERP.com account to access OpenERP Apps.
<br>
You indicated that you wish to use OpenERP in your own company.
We would like to know more about your your business needs and requirements, and see how
we can help you. When would you be available to discuss your project ?<br>
Best regards,<br>
<pre>
<a href="http://openerp.com" target="_blank">http://openerp.com</a>
Belgium: +32.81.81.37.00
U.S.: +1 (650) 307-6736
India: +91 (79) 40 500 100
</pre>
</div>
</div>
</div>"""
HOTMAIL_1_IN = ["I have an amazing company, i'm learning OpenERP, it is a small company yet, but plannig to grow up quickly."]
HOTMAIL_1_OUT = ["Subject: Re: your OpenERP.com registration", " I noticed you recently created an OpenERP.com account to access OpenERP Apps.",
"We would like to know more about your your business needs and requirements", "Belgium: +32.81.81.37.00"]
MSOFFICE_1 = """
<div>
<div class="WordSection1">
<p class="MsoNormal">
<span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">
Our requirements are simple. Just looking to replace some spreadsheets for tracking quotes and possibly using the timecard module.
We are a company of 25 engineers providing product design services to clients.
</span>
</p>
<p></p>
<p></p>
<p class="MsoNormal">
<span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">
I’ll install on a windows server and run a very limited trial to see how it works.
If we adopt OpenERP we will probably move to Linux or look for a hosted SaaS option.
</span>
</p>
<p></p>
<p></p>
<p class="MsoNormal">
<span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">
<br>
I am also evaluating Adempiere and maybe others.
</span>
</p>
<p></p>
<p></p>
<p class="MsoNormal">
<span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">
</span>
</p>
<p> </p>
<p></p>
<p class="MsoNormal">
<span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">
I expect the trial will take 2-3 months as this is not a high priority for us.
</span>
</p>
<p></p>
<p></p>
<p class="MsoNormal">
<span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">
</span>
</p>
<p> </p>
<p></p>
<p class="MsoNormal">
<span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">
Alan
</span>
</p>
<p></p>
<p></p>
<p class="MsoNormal">
<span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">
</span>
</p>
<p> </p>
<p></p>
<div>
<div style="border:none;border-top:solid #B5C4DF 1.0pt;padding:3.0pt 0in 0in 0in">
<p class="MsoNormal">
<b><span style="font-size:10.0pt;font-family:"Tahoma","sans-serif"">
From:
</span></b>
<span style="font-size:10.0pt;font-family:"Tahoma","sans-serif"">
OpenERP Enterprise [mailto:sales@openerp.com]
<br><b>Sent:</b> Monday, 11 March, 2013 14:47<br><b>To:</b> Alan Widmer<br><b>Subject:</b> Re: your OpenERP.com registration
</span>
</p>
<p></p>
<p></p>
</div>
</div>
<p class="MsoNormal"></p>
<p> </p>
<p>Hello Alan Widmer, </p>
<p></p>
<p>I noticed you recently downloaded OpenERP. </p>
<p></p>
<p>
Uou mentioned you wish to use OpenERP in your own company. Please let me more about your
business needs and requirements? When will you be available to discuss about your project?
</p>
<p></p>
<p>Thanks for your interest in OpenERP, </p>
<p></p>
<p>Feel free to contact me if you have any questions, </p>
<p></p>
<p>Looking forward to hear from you soon. </p>
<p></p>
<pre><p> </p></pre>
<pre>--<p></p></pre>
<pre>Nicolas<p></p></pre>
<pre><a href="http://openerp.com">http://openerp.com</a><p></p></pre>
<pre>Belgium: +32.81.81.37.00<p></p></pre>
<pre>U.S.: +1 (650) 307-6736<p></p></pre>
<pre>India: +91 (79) 40 500 100<p></p></pre>
<pre> <p></p></pre>
</div>
</div>"""
MSOFFICE_1_IN = ['Our requirements are simple. Just looking to replace some spreadsheets for tracking quotes and possibly using the timecard module.']
MSOFFICE_1_OUT = ['I noticed you recently downloaded OpenERP.', 'Uou mentioned you wish to use OpenERP in your own company.', 'Belgium: +32.81.81.37.00']
MSOFFICE_2 = """
<div>
<div class="WordSection1">
<p class="MsoNormal">
<span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">Nicolas,</span></p><p></p>
<p></p>
<p class="MsoNormal" style="text-indent:.5in">
<span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">We are currently investigating the possibility of moving away from our current ERP </span></p><p></p>
<p></p>
<p class="MsoNormal">
<span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">
</span></p><p> </p>
<p></p>
<p class="MsoNormal">
<span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">Thank You</span></p><p></p>
<p></p>
<p class="MsoNormal">
<span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">Matt</span></p><p></p>
<p></p>
<p class="MsoNormal">
<span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">
</span></p><p> </p>
<p></p>
<div>
<p class="MsoNormal">
<span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">Raoul Petitpoil</span></p><p></p>
<p></p>
<p class="MsoNormal">
<span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">Poil Industries</span></p><p></p>
<p></p>
<p class="MsoNormal">
<span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">Information Technology</span></p><p></p>
<p></p>
<p class="MsoNormal">
<span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">920 Super Street</span></p><p></p>
<p></p>
<p class="MsoNormal">
<span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">Sanchez, Pa 17046 USA</span></p><p></p>
<p></p>
<p class="MsoNormal">
<span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">Tel: xxx.xxx</span></p><p></p>
<p></p>
<p class="MsoNormal">
<span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">Fax: xxx.xxx</span></p><p></p>
<p></p>
<p class="MsoNormal">
<span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">Email: </span>
<a href="mailto:raoul@petitpoil.com">
<span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:blue">raoul@petitpoil.com</span>
</a>
<span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">
</span></p><p></p>
<p></p>
<p class="MsoNormal">
<span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">www.poilindustries.com</span></p><p></p>
<p></p>
<p class="MsoNormal">
<span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">www.superproducts.com</span></p><p></p>
<p></p>
</div>
<p class="MsoNormal">
<span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">
</span></p><p> </p>
<p></p>
<div>
<div style="border:none;border-top:solid #B5C4DF 1.0pt;padding:3.0pt 0in 0in 0in">
<p class="MsoNormal">
<b>
<span style="font-size:10.0pt;font-family:"Tahoma","sans-serif"">From:</span>
</b>
<span style="font-size:10.0pt;font-family:"Tahoma","sans-serif""> OpenERP Enterprise [mailto:sales@openerp.com] <br><b>Sent:</b> Wednesday, April 17, 2013 1:31 PM<br><b>To:</b> Matt Witters<br><b>Subject:</b> Re: your OpenERP.com registration</span></p><p></p>
<p></p>
</div>
</div>
<p class="MsoNormal"></p>
<p> </p>
<p>Hello Raoul Petitpoil, </p>
<p></p>
<p>I noticed you recently downloaded OpenERP. </p>
<p></p>
<p>You indicated that you wish to use OpenERP in your own company. We would like to know more about your your business needs and requirements, and see how we can help you. When would you be available to discuss your project ? </p>
<p></p>
<p>Best regards, </p>
<p></p>
<pre> <p> </p>
</pre>
<pre>--<p></p></pre>
<pre>Nicolas<p></p></pre>
<pre> <a href="http://openerp.com">http://openerp.com</a>
<p></p>
</pre>
<pre>Belgium: +32.81.81.37.00<p></p></pre>
<pre>U.S.: +1 (650) 307-6736<p></p></pre>
<pre>India: +91 (79) 40 500 100<p></p></pre>
<pre> <p></p></pre>
</div>
</div>"""
MSOFFICE_2_IN = ['We are currently investigating the possibility']
MSOFFICE_2_OUT = ['I noticed you recently downloaded OpenERP.', 'You indicated that you wish', 'Belgium: +32.81.81.37.00']
MSOFFICE_3 = """<div>
<div class="WordSection1">
<p class="MsoNormal">
<span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">Hi Nicolas !</span></p><p></p>
<p></p>
<p class="MsoNormal">
<span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">
</span></p><p> </p>
<p></p>
<p class="MsoNormal">
<span lang="EN-US" style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">Yes I’d be glad to hear about your offers as we struggle every year with the planning/approving of LOA. </span></p><p></p>
<p></p>
<p class="MsoNormal">
<span lang="EN-US" style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">I saw your boss yesterday on tv and immediately wanted to test the interface. </span></p><p></p>
<p></p>
<p class="MsoNormal">
<span lang="EN-US" style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">
</span></p><p> </p>
<p></p>
<div>
<p class="MsoNormal">
<b>
<span lang="NL-BE" style="font-size:10.0pt;font-family:"Trebuchet MS","sans-serif";color:gray">Bien à vous, </span></b></p><p></p><b>
</b>
<p></p>
<p class="MsoNormal">
<b>
<span lang="NL-BE" style="font-size:10.0pt;font-family:"Trebuchet MS","sans-serif";color:gray">Met vriendelijke groeten, </span></b></p><p></p><b>
</b>
<p></p>
<p class="MsoNormal">
<b>
<span lang="EN-GB" style="font-size:10.0pt;font-family:"Trebuchet MS","sans-serif";color:gray">Best regards,</span></b></p><p></p><b>
</b>
<p></p>
<p class="MsoNormal">
<b>
<span lang="EN-GB" style="font-size:10.0pt;font-family:"Trebuchet MS","sans-serif";color:gray">
</span></b></p><p><b> </b></p><b>
</b>
<p></p>
<p class="MsoNormal">
<b>
<span lang="EN-GB" style="font-size:10.0pt;font-family:"Trebuchet MS","sans-serif";color:gray">R. Petitpoil <br></span>
</b>
<span lang="EN-GB" style="font-size:10.0pt;font-family:"Trebuchet MS","sans-serif";color:gray">Human Resource Manager<b><br><br>Field Resource s.a n.v. <i> <br></i></b>Hermesstraat 6A <br>1930 Zaventem</span>
<span lang="EN-GB" style="font-size:8.0pt;font-family:"Tahoma","sans-serif";color:gray"><br></span>
<b>
<span lang="FR" style="font-size:10.0pt;font-family:Wingdings;color:#1F497D">(</span>
</b>
<b>
<span lang="FR" style="font-size:9.0pt;font-family:Wingdings;color:#1F497D"> </span>
</b>
<b>
<span lang="EN-GB" style="font-size:8.0pt;font-family:"Trebuchet MS","sans-serif";color:gray">xxx.xxx </span>
</b>
<b>
<span lang="EN-GB" style="font-size:9.0pt;font-family:"Trebuchet MS","sans-serif";color:gray"><br></span>
</b>
<b>
<span lang="FR" style="font-size:10.0pt;font-family:"Wingdings 2";color:#1F497D">7</span>
</b>
<b>
<span lang="FR" style="font-size:9.0pt;font-family:"Wingdings 2";color:#1F497D"> </span>
</b>
<b>
<span lang="EN-GB" style="font-size:8.0pt;font-family:"Trebuchet MS","sans-serif";color:gray">+32 2 727.05.91<br></span>
</b>
<span lang="EN-GB" style="font-size:24.0pt;font-family:Webdings;color:green">P</span>
<span lang="EN-GB" style="font-size:8.0pt;font-family:"Tahoma","sans-serif";color:green"> <b> </b></span>
<b>
<span lang="EN-GB" style="font-size:9.0pt;font-family:"Trebuchet MS","sans-serif";color:green">Please consider the environment before printing this email.</span>
</b>
<span lang="EN-GB" style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:navy"> </span>
<span lang="EN-GB" style="font-family:"Calibri","sans-serif";color:navy">
</span></p><p></p>
<p></p>
</div>
<p class="MsoNormal">
<span lang="EN-US" style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">
</span></p><p> </p>
<p></p>
<div>
<div style="border:none;border-top:solid #B5C4DF 1.0pt;padding:3.0pt 0cm 0cm 0cm">
<p class="MsoNormal">
<b>
<span lang="FR" style="font-size:10.0pt;font-family:"Tahoma","sans-serif"">De :</span>
</b>
<span lang="FR" style="font-size:10.0pt;font-family:"Tahoma","sans-serif""> OpenERP Enterprise [mailto:sales@openerp.com] <br><b>Envoyé :</b> jeudi 18 avril 2013 11:31<br><b>À :</b> Paul Richard<br><b>Objet :</b> Re: your OpenERP.com registration</span></p><p></p>
<p></p>
</div>
</div>
<p class="MsoNormal"></p>
<p> </p>
<p>Hello Raoul PETITPOIL, </p>
<p></p>
<p>I noticed you recently registered to our OpenERP Online solution. </p>
<p></p>
<p>You indicated that you wish to use OpenERP in your own company. We would like to know more about your your business needs and requirements, and see how we can help you. When would you be available to discuss your project ? </p>
<p></p>
<p>Best regards, </p>
<p></p>
<pre> <p> </p>
</pre>
<pre>--<p></p></pre>
<pre>Nicolas<p></p></pre>
<pre> <a href="http://openerp.com">http://openerp.com</a>
<p></p>
</pre>
<pre>Belgium: +32.81.81.37.00<p></p></pre>
<pre>U.S.: +1 (650) 307-6736<p></p></pre>
<pre>India: +91 (79) 40 500 100<p></p></pre>
<pre> <p></p></pre>
</div>
</div>"""
MSOFFICE_3_IN = ['I saw your boss yesterday']
MSOFFICE_3_OUT = ['I noticed you recently downloaded OpenERP.', 'You indicated that you wish', 'Belgium: +32.81.81.37.00']
# ------------------------------------------------------------
# Test cases coming from bugs
# ------------------------------------------------------------
# bug: read more not apparent, strange message in read more span
BUG1 = """<pre>Hi Migration Team,
Paragraph 1, blah blah blah blah blah blah blah blah blah blah blah blah
blah blah blah blah blah blah blah blah blah blah blah blah blah blah
blah blah blah blah blah blah blah blah blah blah blah blah blah blah
blah blah blah blah blah blah blah blah blah blah blah blah blah blah
blah blah blah blah blah blah blah blah blah blah blah blah blah blah
blah blah blah blah blah blah blah blah blah blah blah blah blah blah
blah blah blah blah blah blah blah blah.
Paragraph 2, blah blah blah blah blah blah blah blah blah blah blah blah
blah blah blah blah blah blah blah blah blah blah blah blah blah blah
blah blah blah blah blah blah blah blah blah blah blah blah blah blah
blah blah blah blah blah blah blah blah blah blah blah blah blah blah
blah blah blah blah blah blah blah blah blah blah blah blah blah blah
blah blah blah blah blah blah blah blah blah blah blah blah blah blah
blah blah blah blah blah blah blah blah.
Paragraph 3, blah blah blah blah blah blah blah blah blah blah blah blah
blah blah blah blah blah blah blah blah blah blah blah blah blah blah
blah blah blah blah blah blah blah blah blah blah blah blah blah blah
blah blah blah blah blah blah blah blah blah blah blah blah blah blah
blah blah blah blah blah blah blah blah blah blah blah blah blah blah
blah blah blah blah blah blah blah blah blah blah blah blah blah blah
blah blah blah blah blah blah blah blah.
Thanks.
Regards,
--
Olivier Laurent
Migration Manager
OpenERP SA
Chaussée de Namur, 40
B-1367 Gérompont
Tel: +32.81.81.37.00
Web: http://www.openerp.com</pre>"""
BUG_1_IN = [
'Hi Migration Team',
'Paragraph 1'
]
BUG_1_OUT = [
'Olivier Laurent',
'Chaussée de Namur',
'81.81.37.00',
'openerp.com',
]
BUG2 = """
<div>
<br>
<div class="moz-forward-container"><br>
<br>
-------- Original Message --------
<table class="moz-email-headers-table" border="0" cellpadding="0" cellspacing="0">
<tbody>
<tr>
<th nowrap="" valign="BASELINE" align="RIGHT">Subject:
</th>
<td>Fwd: TR: OpenERP S.A. Payment Reminder</td>
</tr>
<tr>
<th nowrap="" valign="BASELINE" align="RIGHT">Date: </th>
<td>Wed, 16 Oct 2013 14:11:13 +0200</td>
</tr>
<tr>
<th nowrap="" valign="BASELINE" align="RIGHT">From: </th>
<td>Christine Herrmann <a class="moz-txt-link-rfc2396E" href="mailto:che@openerp.com"><che@openerp.com></a></td>
</tr>
<tr>
<th nowrap="" valign="BASELINE" align="RIGHT">To: </th>
<td><a class="moz-txt-link-abbreviated" href="mailto:online@openerp.com">online@openerp.com</a></td>
</tr>
</tbody>
</table>
<br>
<br>
<br>
<div class="moz-forward-container"><br>
<br>
-------- Message original --------
<table class="moz-email-headers-table" border="0" cellpadding="0" cellspacing="0">
<tbody>
<tr>
<th nowrap="" valign="BASELINE" align="RIGHT">Sujet:
</th>
<td>TR: OpenERP S.A. Payment Reminder</td>
</tr>
<tr>
<th nowrap="" valign="BASELINE" align="RIGHT">Date :
</th>
<td>Wed, 16 Oct 2013 10:34:45 -0000</td>
</tr>
<tr>
<th nowrap="" valign="BASELINE" align="RIGHT">De : </th>
<td>Ida Siwatala <a class="moz-txt-link-rfc2396E" href="mailto:infos@inzoservices.com"><infos@inzoservices.com></a></td>
</tr>
<tr>
<th nowrap="" valign="BASELINE" align="RIGHT">Répondre
à : </th>
<td><a class="moz-txt-link-abbreviated" href="mailto:catchall@mail.odoo.com">catchall@mail.odoo.com</a></td>
</tr>
<tr>
<th nowrap="" valign="BASELINE" align="RIGHT">Pour :
</th>
<td>Christine Herrmann (che) <a class="moz-txt-link-rfc2396E" href="mailto:che@openerp.com"><che@openerp.com></a></td>
</tr>
</tbody>
</table>
<br>
<br>
<div>
<div class="WordSection1">
<p class="MsoNormal"><span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">Bonjour,</span></p>
<p class="MsoNormal"><span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D"></span></p>
<p> </p>
<p class="MsoNormal"><span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">Pourriez-vous
me faire un retour sur ce point.</span></p>
<p class="MsoNormal"><span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D"></span></p>
<p> </p>
<p class="MsoNormal"><span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">Cordialement</span></p>
<p class="MsoNormal"><span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D"></span></p>
<p> </p>
<div>
<div style="border:none;border-top:solid #B5C4DF
1.0pt;padding:3.0pt 0cm 0cm 0cm">
<p class="MsoNormal"><b><span style="font-size:10.0pt;font-family:"Tahoma","sans-serif"">De :</span></b><span style="font-size:10.0pt;font-family:"Tahoma","sans-serif"">
Ida Siwatala [<a class="moz-txt-link-freetext" href="mailto:infos@inzoservices.com">mailto:infos@inzoservices.com</a>]
<br>
<b>Envoyé :</b> vendredi 4 octobre 2013 20:03<br>
<b>À :</b> 'Followers of
INZO-services-8-all-e-Maxime-Lisbonne-77176-Savigny-le-temple-France'<br>
<b>Objet :</b> RE: OpenERP S.A. Payment Reminder</span></p>
</div>
</div>
<p> </p>
<p class="MsoNormal"><span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">Bonsoir,</span></p>
<p class="MsoNormal"><span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D"></span></p>
<p> </p>
<p class="MsoNormal"><span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">Je
me permets de revenir vers vous par écrit , car j’ai
fait 2 appels vers votre service en exposant mon
problème, mais je n’ai pas eu de retour.</span></p>
<p class="MsoNormal"><span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">Cela
fait un mois que j’ai fait la souscription de votre
produit, mais je me rends compte qu’il est pas adapté à
ma situation ( fonctionnalité manquante et surtout je
n’ai pas beaucoup de temps à passer à résoudre des
bugs). </span></p>
<p class="MsoNormal"><span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">C’est
pourquoi , j’ai demandé qu’un accord soit trouvé avec
vous pour annuler le contrat (tout en vous payant le
mois d’utilisation de septembre).</span></p>
<p class="MsoNormal"><span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D"></span></p>
<p> </p>
<p class="MsoNormal"><span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">Pourriez-vous
me faire un retour sur ce point.</span></p>
<p class="MsoNormal"><span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D"></span></p>
<p> </p>
<p class="MsoNormal"><span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">Cordialement,</span></p>
<p class="MsoNormal"><span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D"></span></p>
<p> </p>
<p class="MsoNormal"><span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D">Ida
Siwatala</span></p>
<p class="MsoNormal"><span style="font-size:11.0pt;font-family:"Calibri","sans-serif";color:#1F497D"></span></p>
<p> </p>
<p class="MsoNormal"><b><span style="font-size:10.0pt;font-family:"Tahoma","sans-serif"">De :</span></b><span style="font-size:10.0pt;font-family:"Tahoma","sans-serif"">
<a href="mailto:che@openerp.com">che@openerp.com</a>
[<a href="mailto:che@openerp.com">mailto:che@openerp.com</a>]
<br>
<b>Envoyé :</b> vendredi 4 octobre 2013 17:41<br>
<b>À :</b> <a href="mailto:infos@inzoservices.com">infos@inzoservices.com</a><br>
<b>Objet :</b> OpenERP S.A. Payment Reminder</span></p>
<p> </p>
<div>
<p style="background:white"><span style="font-size:9.0pt;font-family:"Arial","sans-serif";color:#222222">Dear
INZO services,</span></p>
<p style="background:white"><span style="font-size:9.0pt;font-family:"Arial","sans-serif";color:#222222">Exception
made if there was a mistake of ours, it seems that the
following amount stays unpaid. Please, take
appropriate measures in order to carry out this
payment in the next 8 days. </span></p>
<p class="MsoNormal" style="background:white"><span style="font-size:9.0pt;font-family:"Arial","sans-serif";color:#222222"></span></p>
<p> </p>
<table class="MsoNormalTable" style="width:100.0%;border:outset 1.5pt" width="100%" border="1" cellpadding="0">
<tbody>
<tr>
<td style="padding:.75pt .75pt .75pt .75pt">
<p class="MsoNormal">Date de facturation</p>
</td>
<td style="padding:.75pt .75pt .75pt .75pt">
<p class="MsoNormal">Description</p>
</td>
<td style="padding:.75pt .75pt .75pt .75pt">
<p class="MsoNormal">Reference</p>
</td>
<td style="padding:.75pt .75pt .75pt .75pt">
<p class="MsoNormal">Due Date</p>
</td>
<td style="padding:.75pt .75pt .75pt .75pt">
<p class="MsoNormal">Amount (€)</p>
</td>
<td style="padding:.75pt .75pt .75pt .75pt">
<p class="MsoNormal">Lit.</p>
</td>
</tr>
<tr>
<td style="padding:.75pt .75pt .75pt .75pt">
<p class="MsoNormal"><b>2013-09-24</b></p>
</td>
<td style="padding:.75pt .75pt .75pt .75pt">
<p class="MsoNormal"><b>2013/1121</b></p>
</td>
<td style="padding:.75pt .75pt .75pt .75pt">
<p class="MsoNormal"><b>Enterprise - Inzo Services
- Juillet 2013</b></p>
</td>
<td style="padding:.75pt .75pt .75pt .75pt">
<p class="MsoNormal"><b>2013-09-24</b></p>
</td>
<td style="padding:.75pt .75pt .75pt .75pt">
<p class="MsoNormal"><b>420.0</b></p>
</td>
<td style="padding:.75pt .75pt .75pt .75pt"><br>
</td>
</tr>
<tr>
<td style="padding:.75pt .75pt .75pt .75pt"><br>
</td>
<td style="border:none;padding:.75pt .75pt .75pt
.75pt"><br>
</td>
<td style="border:none;padding:.75pt .75pt .75pt
.75pt"><br>
</td>
<td style="border:none;padding:.75pt .75pt .75pt
.75pt"><br>
</td>
<td style="border:none;padding:.75pt .75pt .75pt
.75pt"><br>
</td>
<td style="border:none;padding:.75pt .75pt .75pt
.75pt"><br>
</td>
</tr>
</tbody>
</table>
<p class="MsoNormal" style="text-align:center;background:white" align="center"><span style="font-size:9.0pt;font-family:"Arial","sans-serif";color:#222222">Amount
due : 420.00 € </span></p>
<p style="background:white"><span style="font-size:9.0pt;font-family:"Arial","sans-serif";color:#222222">Would
your payment have been carried out after this mail was
sent, please ignore this message. Do not hesitate to
contact our accounting department. </span></p>
<p class="MsoNormal" style="background:white"><span style="font-size:9.0pt;font-family:"Arial","sans-serif";color:#222222"><br>
Best Regards, <br>
Aurore Lesage <br>
OpenERP<br>
Chaussée de Namur, 40 <br>
B-1367 Grand Rosières <br>
Tel: +32.81.81.37.00 - Fax: +32.81.73.35.01 <br>
E-mail : <a href="mailto:ale@openerp.com">ale@openerp.com</a> <br>
Web: <a href="http://www.openerp.com">http://www.openerp.com</a></span></p>
</div>
</div>
</div>
--<br>
INZO services <small>Sent by <a style="color:inherit" href="http://www.openerp.com">OpenERP
S.A.</a> using <a style="color:inherit" href="https://www.openerp.com/">OpenERP</a>.</small>
<small>Access your messages and documents <a style="color:inherit" href="https://accounts.openerp.com?db=openerp#action=mail.action_mail_redirect&login=che&message_id=5750830">in
OpenERP</a></small> <br>
<pre class="moz-signature" cols="72">--
Christine Herrmann
OpenERP
Chaussée de Namur, 40
B-1367 Grand Rosières
Tel: +32.81.81.37.00 - Fax: +32.81.73.35.01
Web: <a class="moz-txt-link-freetext" href="http://www.openerp.com">http://www.openerp.com</a> </pre>
<br>
</div>
<br>
<br>
</div>
<br>
</div>"""
BUG_2_IN = [
'read more',
'...',
]
BUG_2_OUT = [
'Fwd: TR: OpenERP S.A'
'fait un mois'
]
# BUG 20/08/2014: READ MORE NOT APPEARING
BUG3 = """<div class="oe_msg_body_long" style="/* display: none; */"><p>OpenERP has been upgraded to version 8.0.</p>
<h2>What's new in this upgrade?</h2>
<div class="document">
<ul>
<li><p class="first">New Warehouse Management System:</p>
<blockquote>
<p>Schedule your picking, packing, receptions and internal moves automatically with Odoo using
your own routing rules. Define push and pull rules to organize a warehouse or to manage
product moves between several warehouses. Track in detail all stock moves, not only in your
warehouse but wherever else it's taken as well (customers, suppliers or manufacturing
locations).</p>
</blockquote>
</li>
<li><p class="first">New Product Configurator</p>
</li>
<li><p class="first">Documentation generation from website forum:</p>
<blockquote>
<p>New module to generate a documentation from questions and responses from your forum.
The documentation manager can define a table of content and any user, depending their karma,
can link a question to an entry of this TOC.</p>
</blockquote>
</li>
<li><p class="first">New kanban view of documents (resumes and letters in recruitement, project documents...)</p>
</li>
<li><p class="first">E-Commerce:</p>
<blockquote>
<ul class="simple">
<li>Manage TIN in contact form for B2B.</li>
<li>Dedicated salesteam to easily manage leads and orders.</li>
</ul>
</blockquote>
</li>
<li><p class="first">Better Instant Messaging.</p>
</li>
<li><p class="first">Faster and Improved Search view: Search drawer now appears on top of the results, and is open
by default in reporting views</p>
</li>
<li><p class="first">Improved User Interface:</p>
<blockquote>
<ul class="simple">
<li>Popups has changed to be more responsive on tablets and smartphones.</li>
<li>New Stat Buttons: Forms views have now dynamic buttons showing some statistics abouts linked models.</li>
<li>Color code to check in one look availability of components in an MRP order.</li>
<li>Unified menu bar allows you to switch easily between the frontend (website) and backend</li>
<li>Results panel is now scrollable independently of the menu bars, keeping the navigation,
search bar and view switcher always within reach.</li>
</ul>
</blockquote>
</li>
<li><p class="first">User signature is now in HTML.</p>
</li>
<li><p class="first">New development API.</p>
</li>
<li><p class="first">Remove support for Outlook and Thunderbird plugins</p>
</li>
</ul>
</div>
<p>Enjoy the new OpenERP Online!</p><span class="oe_mail_reduce"><a href="#">read less</a></span></div>"""
BUG_3_IN = [
'read more',
'...',
]
BUG_3_OUT = [
'New kanban view of documents'
]
| agpl-3.0 | 8,448,386,689,245,686,000 | 47.574468 | 564 | 0.589855 | false |
jmighion/ansible | lib/ansible/modules/cloud/univention/udm_user.py | 29 | 21233 | #!/usr/bin/python
# -*- coding: UTF-8 -*-
# Copyright (c) 2016, Adfinis SyGroup AG
# Tobias Rueetschi <tobias.ruetschi@adfinis-sygroup.ch>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: udm_user
version_added: "2.2"
author: "Tobias Rueetschi (@2-B)"
short_description: Manage posix users on a univention corporate server
description:
- "This module allows to manage posix users on a univention corporate
server (UCS).
It uses the python API of the UCS to create a new object or edit it."
requirements:
- Python >= 2.6
options:
state:
required: false
default: "present"
choices: [ present, absent ]
description:
- Whether the user is present or not.
username:
required: true
description:
- User name
aliases: ['name']
firstname:
required: false
description:
- First name. Required if C(state=present).
lastname:
required: false
description:
- Last name. Required if C(state=present).
password:
required: false
default: None
description:
- Password. Required if C(state=present).
birthday:
required: false
default: None
description:
- Birthday
city:
required: false
default: None
description:
- City of users business address.
country:
required: false
default: None
description:
- Country of users business address.
department_number:
required: false
default: None
description:
- Department number of users business address.
aliases: [ departmentNumber ]
description:
required: false
default: None
description:
- Description (not gecos)
display_name:
required: false
default: None
description:
- Display name (not gecos)
aliases: [ displayName ]
email:
required: false
default: ['']
description:
- A list of e-mail addresses.
employee_number:
required: false
default: None
description:
- Employee number
aliases: [ employeeNumber ]
employee_type:
required: false
default: None
description:
- Employee type
aliases: [ employeeType ]
gecos:
required: false
default: None
description:
- GECOS
groups:
required: false
default: []
description:
- "POSIX groups, the LDAP DNs of the groups will be found with the
LDAP filter for each group as $GROUP:
C((&(objectClass=posixGroup)(cn=$GROUP)))."
home_share:
required: false
default: None
description:
- "Home NFS share. Must be a LDAP DN, e.g.
C(cn=home,cn=shares,ou=school,dc=example,dc=com)."
aliases: [ homeShare ]
home_share_path:
required: false
default: None
description:
- Path to home NFS share, inside the homeShare.
aliases: [ homeSharePath ]
home_telephone_number:
required: false
default: []
description:
- List of private telephone numbers.
aliases: [ homeTelephoneNumber ]
homedrive:
required: false
default: None
description:
- Windows home drive, e.g. C("H:").
mail_alternative_address:
required: false
default: []
description:
- List of alternative e-mail addresses.
aliases: [ mailAlternativeAddress ]
mail_home_server:
required: false
default: None
description:
- FQDN of mail server
aliases: [ mailHomeServer ]
mail_primary_address:
required: false
default: None
description:
- Primary e-mail address
aliases: [ mailPrimaryAddress ]
mobile_telephone_number:
required: false
default: []
description:
- Mobile phone number
aliases: [ mobileTelephoneNumber ]
organisation:
required: false
default: None
description:
- Organisation
override_pw_history:
required: false
default: False
description:
- Override password history
aliases: [ overridePWHistory ]
override_pw_length:
required: false
default: False
description:
- Override password check
aliases: [ overridePWLength ]
pager_telephonenumber:
required: false
default: []
description:
- List of pager telephone numbers.
aliases: [ pagerTelephonenumber ]
phone:
required: false
default: []
description:
- List of telephone numbers.
postcode:
required: false
default: None
description:
- Postal code of users business address.
primary_group:
required: false
default: cn=Domain Users,cn=groups,$LDAP_BASE_DN
description:
- Primary group. This must be the group LDAP DN.
aliases: [ primaryGroup ]
profilepath:
required: false
default: None
description:
- Windows profile directory
pwd_change_next_login:
required: false
default: None
choices: [ '0', '1' ]
description:
- Change password on next login.
aliases: [ pwdChangeNextLogin ]
room_number:
required: false
default: None
description:
- Room number of users business address.
aliases: [ roomNumber ]
samba_privileges:
required: false
default: []
description:
- "Samba privilege, like allow printer administration, do domain
join."
aliases: [ sambaPrivileges ]
samba_user_workstations:
required: false
default: []
description:
- Allow the authentication only on this Microsoft Windows host.
aliases: [ sambaUserWorkstations ]
sambahome:
required: false
default: None
description:
- Windows home path, e.g. C('\\\\$FQDN\\$USERNAME').
scriptpath:
required: false
default: None
description:
- Windows logon script.
secretary:
required: false
default: []
description:
- A list of superiors as LDAP DNs.
serviceprovider:
required: false
default: ['']
description:
- Enable user for the following service providers.
shell:
required: false
default: '/bin/bash'
description:
- Login shell
street:
required: false
default: None
description:
- Street of users business address.
title:
required: false
default: None
description:
- Title, e.g. C(Prof.).
unixhome:
required: false
default: '/home/$USERNAME'
description:
- Unix home directory
userexpiry:
required: false
default: Today + 1 year
description:
- Account expiry date, e.g. C(1999-12-31).
position:
required: false
default: ''
description:
- "Define the whole position of users object inside the LDAP tree,
e.g. C(cn=employee,cn=users,ou=school,dc=example,dc=com)."
update_password:
required: false
default: always
description:
- "C(always) will update passwords if they differ.
C(on_create) will only set the password for newly created users."
version_added: "2.3"
ou:
required: false
default: ''
description:
- "Organizational Unit inside the LDAP Base DN, e.g. C(school) for
LDAP OU C(ou=school,dc=example,dc=com)."
subpath:
required: false
default: 'cn=users'
description:
- "LDAP subpath inside the organizational unit, e.g.
C(cn=teachers,cn=users) for LDAP container
C(cn=teachers,cn=users,dc=example,dc=com)."
'''
EXAMPLES = '''
# Create a user on a UCS
- udm_user:
name: FooBar
password: secure_password
firstname: Foo
lastname: Bar
# Create a user with the DN
# C(uid=foo,cn=teachers,cn=users,ou=school,dc=school,dc=example,dc=com)
- udm_user:
name: foo
password: secure_password
firstname: Foo
lastname: Bar
ou: school
subpath: 'cn=teachers,cn=users'
# or define the position
- udm_user:
name: foo
password: secure_password
firstname: Foo
lastname: Bar
position: 'cn=teachers,cn=users,ou=school,dc=school,dc=example,dc=com'
'''
RETURN = '''# '''
import crypt
from datetime import date, timedelta
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.univention_umc import (
umc_module_for_add,
umc_module_for_edit,
ldap_search,
base_dn,
)
def main():
expiry = date.strftime(date.today() + timedelta(days=365), "%Y-%m-%d")
module = AnsibleModule(
argument_spec = dict(
birthday = dict(default=None,
type='str'),
city = dict(default=None,
type='str'),
country = dict(default=None,
type='str'),
department_number = dict(default=None,
type='str',
aliases=['departmentNumber']),
description = dict(default=None,
type='str'),
display_name = dict(default=None,
type='str',
aliases=['displayName']),
email = dict(default=[''],
type='list'),
employee_number = dict(default=None,
type='str',
aliases=['employeeNumber']),
employee_type = dict(default=None,
type='str',
aliases=['employeeType']),
firstname = dict(default=None,
type='str'),
gecos = dict(default=None,
type='str'),
groups = dict(default=[],
type='list'),
home_share = dict(default=None,
type='str',
aliases=['homeShare']),
home_share_path = dict(default=None,
type='str',
aliases=['homeSharePath']),
home_telephone_number = dict(default=[],
type='list',
aliases=['homeTelephoneNumber']),
homedrive = dict(default=None,
type='str'),
lastname = dict(default=None,
type='str'),
mail_alternative_address= dict(default=[],
type='list',
aliases=['mailAlternativeAddress']),
mail_home_server = dict(default=None,
type='str',
aliases=['mailHomeServer']),
mail_primary_address = dict(default=None,
type='str',
aliases=['mailPrimaryAddress']),
mobile_telephone_number = dict(default=[],
type='list',
aliases=['mobileTelephoneNumber']),
organisation = dict(default=None,
type='str'),
overridePWHistory = dict(default=False,
type='bool',
aliases=['override_pw_history']),
overridePWLength = dict(default=False,
type='bool',
aliases=['override_pw_length']),
pager_telephonenumber = dict(default=[],
type='list',
aliases=['pagerTelephonenumber']),
password = dict(default=None,
type='str',
no_log=True),
phone = dict(default=[],
type='list'),
postcode = dict(default=None,
type='str'),
primary_group = dict(default=None,
type='str',
aliases=['primaryGroup']),
profilepath = dict(default=None,
type='str'),
pwd_change_next_login = dict(default=None,
type='str',
choices=['0', '1'],
aliases=['pwdChangeNextLogin']),
room_number = dict(default=None,
type='str',
aliases=['roomNumber']),
samba_privileges = dict(default=[],
type='list',
aliases=['sambaPrivileges']),
samba_user_workstations = dict(default=[],
type='list',
aliases=['sambaUserWorkstations']),
sambahome = dict(default=None,
type='str'),
scriptpath = dict(default=None,
type='str'),
secretary = dict(default=[],
type='list'),
serviceprovider = dict(default=[''],
type='list'),
shell = dict(default='/bin/bash',
type='str'),
street = dict(default=None,
type='str'),
title = dict(default=None,
type='str'),
unixhome = dict(default=None,
type='str'),
userexpiry = dict(default=expiry,
type='str'),
username = dict(required=True,
aliases=['name'],
type='str'),
position = dict(default='',
type='str'),
update_password = dict(default='always',
choices=['always', 'on_create'],
type='str'),
ou = dict(default='',
type='str'),
subpath = dict(default='cn=users',
type='str'),
state = dict(default='present',
choices=['present', 'absent'],
type='str')
),
supports_check_mode=True,
required_if = ([
('state', 'present', ['firstname', 'lastname', 'password'])
])
)
username = module.params['username']
position = module.params['position']
ou = module.params['ou']
subpath = module.params['subpath']
state = module.params['state']
changed = False
users = list(ldap_search(
'(&(objectClass=posixAccount)(uid={}))'.format(username),
attr=['uid']
))
if position != '':
container = position
else:
if ou != '':
ou = 'ou={},'.format(ou)
if subpath != '':
subpath = '{},'.format(subpath)
container = '{}{}{}'.format(subpath, ou, base_dn())
user_dn = 'uid={},{}'.format(username, container)
exists = bool(len(users))
if state == 'present':
try:
if not exists:
obj = umc_module_for_add('users/user', container)
else:
obj = umc_module_for_edit('users/user', user_dn)
if module.params['displayName'] is None:
module.params['displayName'] = '{} {}'.format(
module.params['firstname'],
module.params['lastname']
)
if module.params['unixhome'] is None:
module.params['unixhome'] = '/home/{}'.format(
module.params['username']
)
for k in obj.keys():
if (k != 'password' and
k != 'groups' and
k != 'overridePWHistory' and
k in module.params and
module.params[k] is not None):
obj[k] = module.params[k]
# handle some special values
obj['e-mail'] = module.params['email']
password = module.params['password']
if obj['password'] is None:
obj['password'] = password
if module.params['update_password'] == 'always':
old_password = obj['password'].split('}', 2)[1]
if crypt.crypt(password, old_password) != old_password:
obj['overridePWHistory'] = module.params['overridePWHistory']
obj['overridePWLength'] = module.params['overridePWLength']
obj['password'] = password
diff = obj.diff()
if exists:
for k in obj.keys():
if obj.hasChanged(k):
changed = True
else:
changed = True
if not module.check_mode:
if not exists:
obj.create()
elif changed:
obj.modify()
except:
module.fail_json(
msg="Creating/editing user {} in {} failed".format(
username,
container
)
)
try:
groups = module.params['groups']
if groups:
filter = '(&(objectClass=posixGroup)(|(cn={})))'.format(
')(cn='.join(groups)
)
group_dns = list(ldap_search(filter, attr=['dn']))
for dn in group_dns:
grp = umc_module_for_edit('groups/group', dn[0])
if user_dn not in grp['users']:
grp['users'].append(user_dn)
if not module.check_mode:
grp.modify()
changed = True
except:
module.fail_json(
msg="Adding groups to user {} failed".format(username)
)
if state == 'absent' and exists:
try:
obj = umc_module_for_edit('users/user', user_dn)
if not module.check_mode:
obj.remove()
changed = True
except:
module.fail_json(
msg="Removing user {} failed".format(username)
)
module.exit_json(
changed=changed,
username=username,
diff=diff,
container=container
)
if __name__ == '__main__':
main()
| gpl-3.0 | -7,788,114,976,434,128,000 | 34.625839 | 92 | 0.448453 | false |
XiaoxiaoLiu/morphology_analysis | bigneuron/reestimate_radius.py | 1 | 1506 | __author__ = 'xiaoxiaol'
__author__ = 'xiaoxiaol'
# run standardize swc to make sure swc files have one single root, and sorted, and has the valide type id ( 1~4)
import matplotlib.pyplot as plt
import seaborn as sb
import os
import os.path as path
import numpy as np
import pandas as pd
import platform
import sys
import glob
if (platform.system() == "Linux"):
WORK_PATH = "/local1/xiaoxiaol/work"
else:
WORK_PATH = "/Users/xiaoxiaoliu/work"
p = WORK_PATH + '/src/morphology_analysis'
sys.path.append(p)
import bigneuron.recon_prescreening as rp
import bigneuron.plot_distances as plt_dist
import blast_neuron.blast_neuron_comp as bn
### main
data_DIR = "/data/mat/xiaoxiaol/data/big_neuron/silver/0401_gold163_all_soma_sort"
output_dir = data_DIR
#run_consensus(data_DIR, output_dir)
os.system("rm "+data_DIR+"/qsub2/*.qsub")
os.system("rm "+data_DIR+"/qsub2/*.o*")
for item in os.listdir(data_DIR):
folder_name = os.path.join(data_DIR, item)
if os.path.isdir(folder_name):
print folder_name
imagefile = glob.glob(folder_name+'/*.v3dpbd')
imagefile.extend(glob.glob(folder_name+'/*.v3draw'))
files =glob.glob(folder_name+'/*.strict.swc')
if len(files)>0 and len(imagefile)>0:
gs_swc_file =files[0]
if not os.path.exists(gs_swc_file+".out.swc"):
bn.estimate_radius(input_image=imagefile[0], input_swc_path=gs_swc_file,bg_th=40, GEN_QSUB = 0, qsub_script_dir= output_dir+"/qsub2", id=None)
| gpl-3.0 | 2,000,955,272,724,070,000 | 26.381818 | 158 | 0.675963 | false |
siutanwong/scikit-learn | examples/text/document_clustering.py | 230 | 8356 | """
=======================================
Clustering text documents using k-means
=======================================
This is an example showing how the scikit-learn can be used to cluster
documents by topics using a bag-of-words approach. This example uses
a scipy.sparse matrix to store the features instead of standard numpy arrays.
Two feature extraction methods can be used in this example:
- TfidfVectorizer uses a in-memory vocabulary (a python dict) to map the most
frequent words to features indices and hence compute a word occurrence
frequency (sparse) matrix. The word frequencies are then reweighted using
the Inverse Document Frequency (IDF) vector collected feature-wise over
the corpus.
- HashingVectorizer hashes word occurrences to a fixed dimensional space,
possibly with collisions. The word count vectors are then normalized to
each have l2-norm equal to one (projected to the euclidean unit-ball) which
seems to be important for k-means to work in high dimensional space.
HashingVectorizer does not provide IDF weighting as this is a stateless
model (the fit method does nothing). When IDF weighting is needed it can
be added by pipelining its output to a TfidfTransformer instance.
Two algorithms are demoed: ordinary k-means and its more scalable cousin
minibatch k-means.
Additionally, latent sematic analysis can also be used to reduce dimensionality
and discover latent patterns in the data.
It can be noted that k-means (and minibatch k-means) are very sensitive to
feature scaling and that in this case the IDF weighting helps improve the
quality of the clustering by quite a lot as measured against the "ground truth"
provided by the class label assignments of the 20 newsgroups dataset.
This improvement is not visible in the Silhouette Coefficient which is small
for both as this measure seem to suffer from the phenomenon called
"Concentration of Measure" or "Curse of Dimensionality" for high dimensional
datasets such as text data. Other measures such as V-measure and Adjusted Rand
Index are information theoretic based evaluation scores: as they are only based
on cluster assignments rather than distances, hence not affected by the curse
of dimensionality.
Note: as k-means is optimizing a non-convex objective function, it will likely
end up in a local optimum. Several runs with independent random init might be
necessary to get a good convergence.
"""
# Author: Peter Prettenhofer <peter.prettenhofer@gmail.com>
# Lars Buitinck <L.J.Buitinck@uva.nl>
# License: BSD 3 clause
from __future__ import print_function
from sklearn.datasets import fetch_20newsgroups
from sklearn.decomposition import TruncatedSVD
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import Normalizer
from sklearn import metrics
from sklearn.cluster import KMeans, MiniBatchKMeans
import logging
from optparse import OptionParser
import sys
from time import time
import numpy as np
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
# parse commandline arguments
op = OptionParser()
op.add_option("--lsa",
dest="n_components", type="int",
help="Preprocess documents with latent semantic analysis.")
op.add_option("--no-minibatch",
action="store_false", dest="minibatch", default=True,
help="Use ordinary k-means algorithm (in batch mode).")
op.add_option("--no-idf",
action="store_false", dest="use_idf", default=True,
help="Disable Inverse Document Frequency feature weighting.")
op.add_option("--use-hashing",
action="store_true", default=False,
help="Use a hashing feature vectorizer")
op.add_option("--n-features", type=int, default=10000,
help="Maximum number of features (dimensions)"
" to extract from text.")
op.add_option("--verbose",
action="store_true", dest="verbose", default=False,
help="Print progress reports inside k-means algorithm.")
print(__doc__)
op.print_help()
(opts, args) = op.parse_args()
if len(args) > 0:
op.error("this script takes no arguments.")
sys.exit(1)
###############################################################################
# Load some categories from the training set
categories = [
'alt.atheism',
'talk.religion.misc',
'comp.graphics',
'sci.space',
]
# Uncomment the following to do the analysis on all the categories
#categories = None
print("Loading 20 newsgroups dataset for categories:")
print(categories)
dataset = fetch_20newsgroups(subset='all', categories=categories,
shuffle=True, random_state=42)
print("%d documents" % len(dataset.data))
print("%d categories" % len(dataset.target_names))
print()
labels = dataset.target
true_k = np.unique(labels).shape[0]
print("Extracting features from the training dataset using a sparse vectorizer")
t0 = time()
if opts.use_hashing:
if opts.use_idf:
# Perform an IDF normalization on the output of HashingVectorizer
hasher = HashingVectorizer(n_features=opts.n_features,
stop_words='english', non_negative=True,
norm=None, binary=False)
vectorizer = make_pipeline(hasher, TfidfTransformer())
else:
vectorizer = HashingVectorizer(n_features=opts.n_features,
stop_words='english',
non_negative=False, norm='l2',
binary=False)
else:
vectorizer = TfidfVectorizer(max_df=0.5, max_features=opts.n_features,
min_df=2, stop_words='english',
use_idf=opts.use_idf)
X = vectorizer.fit_transform(dataset.data)
print("done in %fs" % (time() - t0))
print("n_samples: %d, n_features: %d" % X.shape)
print()
if opts.n_components:
print("Performing dimensionality reduction using LSA")
t0 = time()
# Vectorizer results are normalized, which makes KMeans behave as
# spherical k-means for better results. Since LSA/SVD results are
# not normalized, we have to redo the normalization.
svd = TruncatedSVD(opts.n_components)
normalizer = Normalizer(copy=False)
lsa = make_pipeline(svd, normalizer)
X = lsa.fit_transform(X)
print("done in %fs" % (time() - t0))
explained_variance = svd.explained_variance_ratio_.sum()
print("Explained variance of the SVD step: {}%".format(
int(explained_variance * 100)))
print()
###############################################################################
# Do the actual clustering
if opts.minibatch:
km = MiniBatchKMeans(n_clusters=true_k, init='k-means++', n_init=1,
init_size=1000, batch_size=1000, verbose=opts.verbose)
else:
km = KMeans(n_clusters=true_k, init='k-means++', max_iter=100, n_init=1,
verbose=opts.verbose)
print("Clustering sparse data with %s" % km)
t0 = time()
km.fit(X)
print("done in %0.3fs" % (time() - t0))
print()
print("Homogeneity: %0.3f" % metrics.homogeneity_score(labels, km.labels_))
print("Completeness: %0.3f" % metrics.completeness_score(labels, km.labels_))
print("V-measure: %0.3f" % metrics.v_measure_score(labels, km.labels_))
print("Adjusted Rand-Index: %.3f"
% metrics.adjusted_rand_score(labels, km.labels_))
print("Silhouette Coefficient: %0.3f"
% metrics.silhouette_score(X, km.labels_, sample_size=1000))
print()
if not opts.use_hashing:
print("Top terms per cluster:")
if opts.n_components:
original_space_centroids = svd.inverse_transform(km.cluster_centers_)
order_centroids = original_space_centroids.argsort()[:, ::-1]
else:
order_centroids = km.cluster_centers_.argsort()[:, ::-1]
terms = vectorizer.get_feature_names()
for i in range(true_k):
print("Cluster %d:" % i, end='')
for ind in order_centroids[i, :10]:
print(' %s' % terms[ind], end='')
print()
| bsd-3-clause | -7,751,988,632,583,375,000 | 36.809955 | 80 | 0.670656 | false |
COL-IU/XLSearch | xlsearch_train.py | 1 | 5042 | import sys
import pickle
import os
import getopt
from time import ctime
import numpy as np
usage = '''
USAGE: python xlsearch_train.py -l [path to xlsearch library]
-p [parameter file]
-o [output file]'''
(pairs, args) = getopt.getopt(sys.argv[1:], 'l:p:o:')
cmd_arg = dict()
for i in range(len(pairs)):
cmd_arg[pairs[i][0]] = pairs[i][1]
if len(cmd_arg) != 3:
print usage
sys.exit(1)
lib_path = cmd_arg['-l']
param_file = cmd_arg['-p']
output_file = cmd_arg['-o']
sys.path.append(lib_path)
from utility import *
from index import EnumIndexBuilder
from fastareader import FastaReader
print 'XLSearch, version 1.0'
print 'Copyright of School of Informatics and Computing, Indiana University'
print 'Current time %s' % ctime()
print 'Training logistic regression models using authetic true-true PSMs...'
print '\nReading paramters from: %s...' % param_file
[param, mass] = read_param(param_file)
param['ntermxlink'] = False
param['neutral_loss']['h2o_loss']['aa'] = set('DEST')
param['neutral_loss']['nh3_loss']['aa'] = set('KNQR')
param['neutral_loss']['h2o_gain']['aa'] = set()
mass['C'] = 103.009184
print 'Reading parameters done!'
print '\nReading MSMS spectra files from directory: %s...' % param['ms_data']
spec_dict = read_spec(param['ms_data'], param, mass)
pickle.dump(spec_dict, file('spectra.pickle', 'w'))
print 'Total number of spectra: %d' % len(spec_dict)
print 'Reading MSMS spectra files done!'
print '\nDeisotoping MSMS spectra...'
spec_dict = pickle.load(file('spectra.pickle'))
deisotoped = dict()
titles = spec_dict.keys()
for i in range(len(titles)):
title = titles[i]
(one, align) = spec_dict[title].deisotope(mass, 4, 0.02)
deisotoped[title] = one
pickle.dump(deisotoped, file('deisotoped.pickle', 'w'))
deisotoped = pickle.load(file('deisotoped.pickle'))
spec_dict = deisotoped
print 'Deisotoping MSMS spectra done!'
print 'Current time %s' % ctime()
print '\nBuilding index for all possible inter-peptide cross-links...'
index = EnumIndexBuilder(param['target_database'], spec_dict, mass, param)
pickle.dump(index, file('index.pickle', 'w'))
index = pickle.load(file('index.pickle'))
print 'Building index done!'
print 'Current time %s' % ctime()
print '\nComputing features for candidate PSMs for query spectra...'
results = []
titles = []
for title in index.search_index.keys():
if len(index.search_index[title]) != 0:
titles.append(title)
length = len(titles)
for i in range(0, length):
print '%d / %d' % (i, length)
sys.stdout.flush()
title = titles[i]
result = get_matches_per_spec(mass, param, index, title)
result = [title, result]
results.append(result)
print 'Computing features done!\n'
print 'Current time: %s' % ctime()
pickle.dump(results, file('results.pickle', 'w'))
results = pickle.load(file('results.pickle'))
print 'Extracting authentic true-true PSMs...'
true_true = get_true_true(results, index, param, mass)
pickle.dump(true_true, file('TT.pickle', 'w'))
print 'Extracting authentic true-true PSMs done!'
print 'Extracting true-false PSMs based on true-true PSMs as seeds...'
true_false = get_true_false(true_true, param, mass)
pickle.dump(true_false, file('TF.pickle', 'w'))
print 'Extracting true-false PSMs done!'
print 'Extracting false-false PSMs based on true-true PSMs as seeds...'
false_false = get_false_false(true_true, param, mass)
pickle.dump(false_false, file('FF.pickle', 'w'))
print 'Extracting false-false PSMs done!'
print 'Computing feature matrix for true-true, true-false, false-false PSMs...'
X_true_true = get_feature_matrix(true_true)
X_true_false = get_feature_matrix(true_false)
X_false_false = get_feature_matrix(false_false)
X_TT_TF = np.concatenate((X_true_true, X_true_false), axis = 0)
y_TT_TF = []
y_TT_TF.extend([1.0] * len(true_true))
y_TT_TF.extend([0.0] * len(true_false))
y_TT_TF = np.asarray(y_TT_TF)
y_TT_TF = y_TT_TF.T
X_TF_FF = np.concatenate((X_true_false, X_false_false), axis = 0)
y_TF_FF = []
y_TF_FF.extend([1.0] * len(true_false))
y_TF_FF.extend([0.0] * len(false_false))
y_TF_FF = np.asarray(y_TF_FF)
y_TF_FF = y_TF_FF.T
print 'Computing features done!'
from sklearn import linear_model
log_reg = linear_model.LogisticRegression()
log_reg.fit(X_TT_TF, y_TT_TF)
model_TT_TF = []
model_TT_TF.extend(log_reg.intercept_.tolist())
model_TT_TF.extend(log_reg.coef_.tolist())
log_reg = linear_model.LogisticRegression()
log_reg.fit(X_TF_FF, y_TF_FF)
model_TF_FF = []
model_TF_FF.extend(log_reg.intercept_.tolist())
model_TF_FF.extend(log_reg.coef_.tolist())
f = open(output_file, 'w')
f.write('# Classifier I (TT-TF) coefficients')
for i in range(len(model_TT_TF)):
f.write('CI%02d\t')
f.write('%.60f\n' % model_TT_TF[i])
f.write('# Classifier II (TF-FF) coefficients')
for i in range(len(model_TF_FF)):
f.write('CII%02d\t')
f.write('%.60f\n' % model_TF_FF[i])
f.write('nTT\t%d\n' % len(true_true))
f.write('nTF\t%d\n' % len(true_false))
f.write('nFF\t%d\n' % len(false_false))
f.close()
print 'XLSearch train mode finished!'
| mit | 4,556,188,399,461,052,000 | 31.11465 | 79 | 0.685244 | false |
xin3liang/platform_external_chromium_org | tools/perf/page_sets/tough_texture_upload_cases.py | 34 | 1457 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.page import page as page_module
from telemetry.page import page_set as page_set_module
class ToughTextureUploadCasesPage(page_module.Page):
def __init__(self, url, page_set):
super(
ToughTextureUploadCasesPage,
self).__init__(
url=url,
page_set=page_set)
def RunSmoothness(self, action_runner):
interaction = action_runner.BeginGestureInteraction(
'ScrollAction', is_smooth=True)
action_runner.ScrollPage()
interaction.End()
class ToughTextureUploadCasesPageSet(page_set_module.PageSet):
"""
Description: A collection of texture upload performance tests
"""
def __init__(self):
super(ToughTextureUploadCasesPageSet, self).__init__()
urls_list = [
'file://tough_texture_upload_cases/background_color_animation.html',
# pylint: disable=C0301
'file://tough_texture_upload_cases/background_color_animation_and_transform_animation.html',
# pylint: disable=C0301
'file://tough_texture_upload_cases/background_color_animation_with_gradient.html',
# pylint: disable=C0301
'file://tough_texture_upload_cases/background_color_animation_with_gradient_and_transform_animation.html']
for url in urls_list:
self.AddPage(ToughTextureUploadCasesPage(url, self))
| bsd-3-clause | -890,998,329,685,197,600 | 32.883721 | 112 | 0.717227 | false |
darvelo/chime | fabfile/fabconf.py | 2 | 2774 | # -*- coding: utf-8 -*-
'''
--------------------------------------------------------------------------------------
project_conf.py
--------------------------------------------------------------------------------------
Configuration settings that detail EC2 instances. Note that we are not using
the built-in env from fabric.api -- there are no official recommendations on
best practice. See: http://lists.gnu.org/archive/html/fab-user/2013-11/msg00006.html
'''
import os
import os.path
import pwd
fabconf = {}
# Do not edit
fabconf['FAB_CONFIG_PATH'] = os.path.dirname(__file__)
fabconf['FAB_HOSTS_FILE'] = fabconf.get('FAB_CONFIG_PATH') + '/hosts.txt'
# Project name
fabconf['PROJECT_NAME'] = os.environ.get('PROJECT_NAME', 'chime')
fabconf['GIT_BRANCH'] = 'master'
# Username for connecting to EC2 instaces - Do not edit unless you have a reason to
fabconf['SERVER_USERNAME'] = 'ubuntu'
# Don't edit. Full path of the ssh key you use to connect to EC2 instances
fabconf['SSH_PRIVATE_KEY_PATH'] = os.environ.get('SSH_PRIVATE_KEY_PATH')
# Where to install apps
fabconf['APPS_DIR'] = "/home/{user}/web".format(user=fabconf.get('SERVER_USERNAME'))
# Where your project will installed: /<APPS_DIR>/<PROJECT_NAME>
fabconf['PROJECT_PATH'] = '{apps}/{project}'.format(
apps=fabconf.get('APPS_DIR'),
project=fabconf.get('PROJECT_NAME')
)
# Space-delimited list of app domains
fabconf['DOMAINS'] = os.environ.get('DOMAINS')
# Name tag for your server instance on EC2
# Use recommendation from https://docs.python.org/2/library/os.html#os.getlogin
# to get around ioctl error thrown by os.getlogin() in a cron job.
fabconf['INSTANCE_NAME_TAG'] = os.environ.get('INSTANCE_NAME_TAG', 'ChimeCMS Autotest')
fabconf['INSTANCE_CREATED_BY'] = '{}-{}'.format(pwd.getpwuid(os.getuid())[0], os.uname()[1])
# EC2 key.
fabconf['AWS_ACCESS_KEY'] = os.environ['AWS_ACCESS_KEY']
# EC2 secret.
fabconf['AWS_SECRET_KEY'] = os.environ['AWS_SECRET_KEY']
#EC2 region. Defaults to us-east-1
fabconf['EC2_REGION'] = os.environ.get('EC2_REGION', 'us-east-1')
# AMI name. Either pass in a comma-delimited list of values.
# Defaults to Ubuntu 14.04
fabconf['EC2_AMIS'] = os.environ.get('EC2_AMIS', 'ami-6725ea0c').split(',')
# Name of the keypair you use in EC2.
fabconf['EC2_KEY_PAIR'] = os.environ.get('EC2_KEY_PAIR', 'cfa-chime-keypair')
# Name of the security group.
fabconf['AWS_SECURITY_GROUPS'] = os.environ.get('AWS_SECURITY_GROUPS', 'default')
# API Name of instance type. Defaults to t2.micro
fabconf['EC2_INSTANCE_TYPE'] = os.environ.get('EC2_INSTANCE_TYPE', 't2.small')
# Assorted other config (described in AcceptanceTests.md) used here to fail fast
fabconf['TESTING_EMAIL'] = os.environ['TESTING_EMAIL']
fabconf['TESTING_PASSWORD'] = os.environ['TESTING_PASSWORD']
| bsd-3-clause | 438,997,109,505,511,040 | 36.486486 | 92 | 0.667988 | false |
heia-fr/sirano | sirano/plugins/actions/raw_payload.py | 1 | 1377 | # -*- coding: utf-8 -*-
#
# This file is a part of Sirano.
#
# Copyright (C) 2015 HES-SO // HEIA-FR
# Copyright (C) 2015 Loic Gremaud <loic.gremaud@grelinfo.ch>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
from sirano.action import Action
class RTPPayloadAction(Action):
"""Anonymize the RTP payload content field"""
name = "raw-payload"
def __init__(self, app):
super(RTPPayloadAction, self).__init__(app)
def anonymize(self, value):
value_len = len(value)
text = "ANONYMIZED BY SIRANO "
text_len = len(text)
s = ''
for i in range(value_len):
s += text[i % text_len]
return s
def discover(self, value):
pass
| gpl-2.0 | -8,566,065,964,626,127,000 | 31.023256 | 81 | 0.67756 | false |
jdilallo/jdilallo-test | examples/dfp/v201311/custom_targeting_service/get_custom_targeting_values_by_statement.py | 1 | 2247 | #!/usr/bin/python
#
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example gets custom targeting values for the given predefined custom
targeting key.
To create custom targeting values, run
create_custom_targeting_keys_and_values.py. To determine which custom
targeting keys exist, run get_all_custom_targeting_keys_and_values.py."""
__author__ = ('Nicholas Chen',
'Joseph DiLallo')
# Import appropriate classes from the client library.
from googleads import dfp
CUSTOM_TARGETING_KEY_ID = 'INSERT_CUSTOM_TARGETING_KEY_ID_HERE'
def main(client, key_id):
# Initialize appropriate service.
custom_targeting_service = client.GetService(
'CustomTargetingService', version='v201311')
values = [{
'key': 'keyId',
'value': {
'xsi_type': 'NumberValue',
'value': key_id
}
}]
query = 'WHERE customTargetingKeyId = :keyId'
statement = dfp.FilterStatement(query, values)
# Get custom targeting values by statement.
while True:
response = custom_targeting_service.getCustomTargetingValuesByStatement(
statement.ToStatement())
if 'results' in response:
# Display results.
for value in response['results']:
print ('Custom targeting value with id \'%s\', name \'%s\', and display'
' name \'%s\' was found.'
% (value['id'], value['name'], value['displayName']))
statement.offset += dfp.SUGGESTED_PAGE_LIMIT
else:
break
print '\nNumber of results found: %s' % response['totalResultSetSize']
if __name__ == '__main__':
# Initialize client object.
dfp_client = dfp.DfpClient.LoadFromStorage()
main(dfp_client, CUSTOM_TARGETING_KEY_ID)
| apache-2.0 | 1,361,129,771,667,427,300 | 32.537313 | 80 | 0.691589 | false |
KousikaGanesh/purchaseandInventory | openerp/addons/crm/wizard/crm_phonecall_to_meeting.py | 56 | 2730 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv
from openerp.tools.translate import _
class crm_phonecall2meeting(osv.osv_memory):
""" Phonecall to Meeting """
_name = 'crm.phonecall2meeting'
_description = 'Phonecall To Meeting'
def action_cancel(self, cr, uid, ids, context=None):
"""
Closes Phonecall to Meeting form
@param self: The object pointer
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param ids: List of Phonecall to Meeting IDs
@param context: A standard dictionary for contextual values
"""
return {'type':'ir.actions.act_window_close'}
def action_make_meeting(self, cr, uid, ids, context=None):
""" This opens Meeting's calendar view to schedule meeting on current Phonecall
@return : Dictionary value for created Meeting view
"""
res = {}
phonecall_id = context and context.get('active_id', False) or False
if phonecall_id:
phonecall = self.pool.get('crm.phonecall').browse(cr, uid, phonecall_id, context)
res = self.pool.get('ir.actions.act_window').for_xml_id(cr, uid, 'base_calendar', 'action_crm_meeting', context)
res['context'] = {
'default_phonecall_id': phonecall.id,
'default_partner_id': phonecall.partner_id and phonecall.partner_id.id or False,
'default_user_id': uid,
'default_email_from': phonecall.email_from,
'default_state': 'open',
'default_name': phonecall.name,
}
return res
crm_phonecall2meeting()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 7,752,674,633,255,610,000 | 41.625 | 124 | 0.612903 | false |
gengliangwang/spark | python/pyspark/serializers.py | 10 | 20586 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
PySpark supports custom serializers for transferring data; this can improve
performance.
By default, PySpark uses :class:`PickleSerializer` to serialize objects using Python's
`cPickle` serializer, which can serialize nearly any Python object.
Other serializers, like :class:`MarshalSerializer`, support fewer datatypes but can be
faster.
Examples
--------
The serializer is chosen when creating :class:`SparkContext`:
>>> from pyspark.context import SparkContext
>>> from pyspark.serializers import MarshalSerializer
>>> sc = SparkContext('local', 'test', serializer=MarshalSerializer())
>>> sc.parallelize(list(range(1000))).map(lambda x: 2 * x).take(10)
[0, 2, 4, 6, 8, 10, 12, 14, 16, 18]
>>> sc.stop()
PySpark serializes objects in batches; by default, the batch size is chosen based
on the size of objects and is also configurable by SparkContext's `batchSize`
parameter:
>>> sc = SparkContext('local', 'test', batchSize=2)
>>> rdd = sc.parallelize(range(16), 4).map(lambda x: x)
Behind the scenes, this creates a JavaRDD with four partitions, each of
which contains two batches of two objects:
>>> rdd.glom().collect()
[[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11], [12, 13, 14, 15]]
>>> int(rdd._jrdd.count())
8
>>> sc.stop()
"""
import sys
from itertools import chain, product
import marshal
import struct
import types
import collections
import zlib
import itertools
import pickle
pickle_protocol = pickle.HIGHEST_PROTOCOL
from pyspark import cloudpickle
from pyspark.util import print_exec
__all__ = ["PickleSerializer", "MarshalSerializer", "UTF8Deserializer"]
class SpecialLengths(object):
END_OF_DATA_SECTION = -1
PYTHON_EXCEPTION_THROWN = -2
TIMING_DATA = -3
END_OF_STREAM = -4
NULL = -5
START_ARROW_STREAM = -6
class Serializer(object):
def dump_stream(self, iterator, stream):
"""
Serialize an iterator of objects to the output stream.
"""
raise NotImplementedError
def load_stream(self, stream):
"""
Return an iterator of deserialized objects from the input stream.
"""
raise NotImplementedError
def _load_stream_without_unbatching(self, stream):
"""
Return an iterator of deserialized batches (iterable) of objects from the input stream.
If the serializer does not operate on batches the default implementation returns an
iterator of single element lists.
"""
return map(lambda x: [x], self.load_stream(stream))
# Note: our notion of "equality" is that output generated by
# equal serializers can be deserialized using the same serializer.
# This default implementation handles the simple cases;
# subclasses should override __eq__ as appropriate.
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return "%s()" % self.__class__.__name__
def __hash__(self):
return hash(str(self))
class FramedSerializer(Serializer):
"""
Serializer that writes objects as a stream of (length, data) pairs,
where `length` is a 32-bit integer and data is `length` bytes.
"""
def dump_stream(self, iterator, stream):
for obj in iterator:
self._write_with_length(obj, stream)
def load_stream(self, stream):
while True:
try:
yield self._read_with_length(stream)
except EOFError:
return
def _write_with_length(self, obj, stream):
serialized = self.dumps(obj)
if serialized is None:
raise ValueError("serialized value should not be None")
if len(serialized) > (1 << 31):
raise ValueError("can not serialize object larger than 2G")
write_int(len(serialized), stream)
stream.write(serialized)
def _read_with_length(self, stream):
length = read_int(stream)
if length == SpecialLengths.END_OF_DATA_SECTION:
raise EOFError
elif length == SpecialLengths.NULL:
return None
obj = stream.read(length)
if len(obj) < length:
raise EOFError
return self.loads(obj)
def dumps(self, obj):
"""
Serialize an object into a byte array.
When batching is used, this will be called with an array of objects.
"""
raise NotImplementedError
def loads(self, obj):
"""
Deserialize an object from a byte array.
"""
raise NotImplementedError
class BatchedSerializer(Serializer):
"""
Serializes a stream of objects in batches by calling its wrapped
Serializer with streams of objects.
"""
UNLIMITED_BATCH_SIZE = -1
UNKNOWN_BATCH_SIZE = 0
def __init__(self, serializer, batchSize=UNLIMITED_BATCH_SIZE):
self.serializer = serializer
self.batchSize = batchSize
def _batched(self, iterator):
if self.batchSize == self.UNLIMITED_BATCH_SIZE:
yield list(iterator)
elif hasattr(iterator, "__len__") and hasattr(iterator, "__getslice__"):
n = len(iterator)
for i in range(0, n, self.batchSize):
yield iterator[i: i + self.batchSize]
else:
items = []
count = 0
for item in iterator:
items.append(item)
count += 1
if count == self.batchSize:
yield items
items = []
count = 0
if items:
yield items
def dump_stream(self, iterator, stream):
self.serializer.dump_stream(self._batched(iterator), stream)
def load_stream(self, stream):
return chain.from_iterable(self._load_stream_without_unbatching(stream))
def _load_stream_without_unbatching(self, stream):
return self.serializer.load_stream(stream)
def __repr__(self):
return "BatchedSerializer(%s, %d)" % (str(self.serializer), self.batchSize)
class FlattenedValuesSerializer(BatchedSerializer):
"""
Serializes a stream of list of pairs, split the list of values
which contain more than a certain number of objects to make them
have similar sizes.
"""
def __init__(self, serializer, batchSize=10):
BatchedSerializer.__init__(self, serializer, batchSize)
def _batched(self, iterator):
n = self.batchSize
for key, values in iterator:
for i in range(0, len(values), n):
yield key, values[i:i + n]
def load_stream(self, stream):
return self.serializer.load_stream(stream)
def __repr__(self):
return "FlattenedValuesSerializer(%s, %d)" % (self.serializer, self.batchSize)
class AutoBatchedSerializer(BatchedSerializer):
"""
Choose the size of batch automatically based on the size of object
"""
def __init__(self, serializer, bestSize=1 << 16):
BatchedSerializer.__init__(self, serializer, self.UNKNOWN_BATCH_SIZE)
self.bestSize = bestSize
def dump_stream(self, iterator, stream):
batch, best = 1, self.bestSize
iterator = iter(iterator)
while True:
vs = list(itertools.islice(iterator, batch))
if not vs:
break
bytes = self.serializer.dumps(vs)
write_int(len(bytes), stream)
stream.write(bytes)
size = len(bytes)
if size < best:
batch *= 2
elif size > best * 10 and batch > 1:
batch //= 2
def __repr__(self):
return "AutoBatchedSerializer(%s)" % self.serializer
class CartesianDeserializer(Serializer):
"""
Deserializes the JavaRDD cartesian() of two PythonRDDs.
Due to pyspark batching we cannot simply use the result of the Java RDD cartesian,
we additionally need to do the cartesian within each pair of batches.
"""
def __init__(self, key_ser, val_ser):
self.key_ser = key_ser
self.val_ser = val_ser
def _load_stream_without_unbatching(self, stream):
key_batch_stream = self.key_ser._load_stream_without_unbatching(stream)
val_batch_stream = self.val_ser._load_stream_without_unbatching(stream)
for (key_batch, val_batch) in zip(key_batch_stream, val_batch_stream):
# for correctness with repeated cartesian/zip this must be returned as one batch
yield product(key_batch, val_batch)
def load_stream(self, stream):
return chain.from_iterable(self._load_stream_without_unbatching(stream))
def __repr__(self):
return "CartesianDeserializer(%s, %s)" % \
(str(self.key_ser), str(self.val_ser))
class PairDeserializer(Serializer):
"""
Deserializes the JavaRDD zip() of two PythonRDDs.
Due to pyspark batching we cannot simply use the result of the Java RDD zip,
we additionally need to do the zip within each pair of batches.
"""
def __init__(self, key_ser, val_ser):
self.key_ser = key_ser
self.val_ser = val_ser
def _load_stream_without_unbatching(self, stream):
key_batch_stream = self.key_ser._load_stream_without_unbatching(stream)
val_batch_stream = self.val_ser._load_stream_without_unbatching(stream)
for (key_batch, val_batch) in zip(key_batch_stream, val_batch_stream):
# For double-zipped RDDs, the batches can be iterators from other PairDeserializer,
# instead of lists. We need to convert them to lists if needed.
key_batch = key_batch if hasattr(key_batch, '__len__') else list(key_batch)
val_batch = val_batch if hasattr(val_batch, '__len__') else list(val_batch)
if len(key_batch) != len(val_batch):
raise ValueError("Can not deserialize PairRDD with different number of items"
" in batches: (%d, %d)" % (len(key_batch), len(val_batch)))
# for correctness with repeated cartesian/zip this must be returned as one batch
yield zip(key_batch, val_batch)
def load_stream(self, stream):
return chain.from_iterable(self._load_stream_without_unbatching(stream))
def __repr__(self):
return "PairDeserializer(%s, %s)" % (str(self.key_ser), str(self.val_ser))
class NoOpSerializer(FramedSerializer):
def loads(self, obj):
return obj
def dumps(self, obj):
return obj
# Hack namedtuple, make it picklable
__cls = {} # type: ignore
def _restore(name, fields, value):
""" Restore an object of namedtuple"""
k = (name, fields)
cls = __cls.get(k)
if cls is None:
cls = collections.namedtuple(name, fields)
__cls[k] = cls
return cls(*value)
def _hack_namedtuple(cls):
""" Make class generated by namedtuple picklable """
name = cls.__name__
fields = cls._fields
def __reduce__(self):
return (_restore, (name, fields, tuple(self)))
cls.__reduce__ = __reduce__
cls._is_namedtuple_ = True
return cls
def _hijack_namedtuple():
""" Hack namedtuple() to make it picklable """
# hijack only one time
if hasattr(collections.namedtuple, "__hijack"):
return
global _old_namedtuple # or it will put in closure
global _old_namedtuple_kwdefaults # or it will put in closure too
def _copy_func(f):
return types.FunctionType(f.__code__, f.__globals__, f.__name__,
f.__defaults__, f.__closure__)
_old_namedtuple = _copy_func(collections.namedtuple)
_old_namedtuple_kwdefaults = collections.namedtuple.__kwdefaults__
def namedtuple(*args, **kwargs):
for k, v in _old_namedtuple_kwdefaults.items():
kwargs[k] = kwargs.get(k, v)
cls = _old_namedtuple(*args, **kwargs)
return _hack_namedtuple(cls)
# replace namedtuple with the new one
collections.namedtuple.__globals__["_old_namedtuple_kwdefaults"] = _old_namedtuple_kwdefaults
collections.namedtuple.__globals__["_old_namedtuple"] = _old_namedtuple
collections.namedtuple.__globals__["_hack_namedtuple"] = _hack_namedtuple
collections.namedtuple.__code__ = namedtuple.__code__
collections.namedtuple.__hijack = 1
# hack the cls already generated by namedtuple.
# Those created in other modules can be pickled as normal,
# so only hack those in __main__ module
for n, o in sys.modules["__main__"].__dict__.items():
if (type(o) is type and o.__base__ is tuple
and hasattr(o, "_fields")
and "__reduce__" not in o.__dict__):
_hack_namedtuple(o) # hack inplace
_hijack_namedtuple()
class PickleSerializer(FramedSerializer):
"""
Serializes objects using Python's pickle serializer:
http://docs.python.org/2/library/pickle.html
This serializer supports nearly any Python object, but may
not be as fast as more specialized serializers.
"""
def dumps(self, obj):
return pickle.dumps(obj, pickle_protocol)
def loads(self, obj, encoding="bytes"):
return pickle.loads(obj, encoding=encoding)
class CloudPickleSerializer(PickleSerializer):
def dumps(self, obj):
try:
return cloudpickle.dumps(obj, pickle_protocol)
except pickle.PickleError:
raise
except Exception as e:
emsg = str(e)
if "'i' format requires" in emsg:
msg = "Object too large to serialize: %s" % emsg
else:
msg = "Could not serialize object: %s: %s" % (e.__class__.__name__, emsg)
print_exec(sys.stderr)
raise pickle.PicklingError(msg)
class MarshalSerializer(FramedSerializer):
"""
Serializes objects using Python's Marshal serializer:
http://docs.python.org/2/library/marshal.html
This serializer is faster than PickleSerializer but supports fewer datatypes.
"""
def dumps(self, obj):
return marshal.dumps(obj)
def loads(self, obj):
return marshal.loads(obj)
class AutoSerializer(FramedSerializer):
"""
Choose marshal or pickle as serialization protocol automatically
"""
def __init__(self):
FramedSerializer.__init__(self)
self._type = None
def dumps(self, obj):
if self._type is not None:
return b'P' + pickle.dumps(obj, -1)
try:
return b'M' + marshal.dumps(obj)
except Exception:
self._type = b'P'
return b'P' + pickle.dumps(obj, -1)
def loads(self, obj):
_type = obj[0]
if _type == b'M':
return marshal.loads(obj[1:])
elif _type == b'P':
return pickle.loads(obj[1:])
else:
raise ValueError("invalid serialization type: %s" % _type)
class CompressedSerializer(FramedSerializer):
"""
Compress the serialized data
"""
def __init__(self, serializer):
FramedSerializer.__init__(self)
assert isinstance(serializer, FramedSerializer), "serializer must be a FramedSerializer"
self.serializer = serializer
def dumps(self, obj):
return zlib.compress(self.serializer.dumps(obj), 1)
def loads(self, obj):
return self.serializer.loads(zlib.decompress(obj))
def __repr__(self):
return "CompressedSerializer(%s)" % self.serializer
class UTF8Deserializer(Serializer):
"""
Deserializes streams written by String.getBytes.
"""
def __init__(self, use_unicode=True):
self.use_unicode = use_unicode
def loads(self, stream):
length = read_int(stream)
if length == SpecialLengths.END_OF_DATA_SECTION:
raise EOFError
elif length == SpecialLengths.NULL:
return None
s = stream.read(length)
return s.decode("utf-8") if self.use_unicode else s
def load_stream(self, stream):
try:
while True:
yield self.loads(stream)
except struct.error:
return
except EOFError:
return
def __repr__(self):
return "UTF8Deserializer(%s)" % self.use_unicode
def read_long(stream):
length = stream.read(8)
if not length:
raise EOFError
return struct.unpack("!q", length)[0]
def write_long(value, stream):
stream.write(struct.pack("!q", value))
def pack_long(value):
return struct.pack("!q", value)
def read_int(stream):
length = stream.read(4)
if not length:
raise EOFError
return struct.unpack("!i", length)[0]
def write_int(value, stream):
stream.write(struct.pack("!i", value))
def read_bool(stream):
length = stream.read(1)
if not length:
raise EOFError
return struct.unpack("!?", length)[0]
def write_with_length(obj, stream):
write_int(len(obj), stream)
stream.write(obj)
class ChunkedStream(object):
"""
This is a file-like object takes a stream of data, of unknown length, and breaks it into fixed
length frames. The intended use case is serializing large data and sending it immediately over
a socket -- we do not want to buffer the entire data before sending it, but the receiving end
needs to know whether or not there is more data coming.
It works by buffering the incoming data in some fixed-size chunks. If the buffer is full, it
first sends the buffer size, then the data. This repeats as long as there is more data to send.
When this is closed, it sends the length of whatever data is in the buffer, then that data, and
finally a "length" of -1 to indicate the stream has completed.
"""
def __init__(self, wrapped, buffer_size):
self.buffer_size = buffer_size
self.buffer = bytearray(buffer_size)
self.current_pos = 0
self.wrapped = wrapped
def write(self, bytes):
byte_pos = 0
byte_remaining = len(bytes)
while byte_remaining > 0:
new_pos = byte_remaining + self.current_pos
if new_pos < self.buffer_size:
# just put it in our buffer
self.buffer[self.current_pos:new_pos] = bytes[byte_pos:]
self.current_pos = new_pos
byte_remaining = 0
else:
# fill the buffer, send the length then the contents, and start filling again
space_left = self.buffer_size - self.current_pos
new_byte_pos = byte_pos + space_left
self.buffer[self.current_pos:self.buffer_size] = bytes[byte_pos:new_byte_pos]
write_int(self.buffer_size, self.wrapped)
self.wrapped.write(self.buffer)
byte_remaining -= space_left
byte_pos = new_byte_pos
self.current_pos = 0
def close(self):
# if there is anything left in the buffer, write it out first
if self.current_pos > 0:
write_int(self.current_pos, self.wrapped)
self.wrapped.write(self.buffer[:self.current_pos])
# -1 length indicates to the receiving end that we're done.
write_int(-1, self.wrapped)
self.wrapped.close()
@property
def closed(self):
"""
Return True if the `wrapped` object has been closed.
NOTE: this property is required by pyarrow to be used as a file-like object in
pyarrow.RecordBatchStreamWriter from ArrowStreamSerializer
"""
return self.wrapped.closed
if __name__ == '__main__':
import doctest
(failure_count, test_count) = doctest.testmod()
if failure_count:
sys.exit(-1)
| apache-2.0 | 1,304,584,176,585,604,600 | 30.768519 | 100 | 0.625668 | false |
hujiajie/pa-chromium | chrome/test/functional/search_engines.py | 79 | 3858 | #!/usr/bin/env python
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import re
import pyauto_functional # Must be imported before pyauto
import pyauto
import test_utils
class SearchEnginesTest(pyauto.PyUITest):
"""TestCase for Search Engines."""
_localhost_prefix = 'http://localhost:1000/'
def _GetSearchEngineWithKeyword(self, keyword):
"""Get search engine info and return an element that matches keyword.
Args:
keyword: Search engine keyword field.
Returns:
A search engine info dict or None.
"""
match_list = ([x for x in self.GetSearchEngineInfo()
if x['keyword'] == keyword])
if match_list:
return match_list[0]
return None
def Debug(self):
"""Test method for experimentation.
This method will not run automatically.
"""
while True:
raw_input('Interact with the browser and hit <enter>')
self.pprint(self.GetSearchEngineInfo())
def testDiscoverSearchEngine(self):
"""Test that chrome discovers youtube search engine after searching."""
# Take a snapshot of current search engine info.
info = self.GetSearchEngineInfo()
youtube = self._GetSearchEngineWithKeyword('youtube.com')
self.assertFalse(youtube)
# Use omnibox to invoke search engine discovery.
# Navigating using NavigateToURL does not currently invoke this logic.
self.SetOmniboxText('http://www.youtube.com')
self.OmniboxAcceptInput()
def InfoUpdated(old_info):
new_info = self.GetSearchEngineInfo()
if len(new_info) > len(old_info):
return True
return False
self.WaitUntil(lambda: InfoUpdated(info))
youtube = self._GetSearchEngineWithKeyword('youtube.com')
self.assertTrue(youtube)
self.assertTrue(re.search('youtube', youtube['short_name'],
re.IGNORECASE))
self.assertFalse(youtube['in_default_list'])
self.assertFalse(youtube['is_default'])
def testDeleteSearchEngine(self):
"""Test adding then deleting a search engine."""
self.AddSearchEngine(title='foo',
keyword='foo.com',
url='http://foo/?q=%s')
foo = self._GetSearchEngineWithKeyword('foo.com')
self.assertTrue(foo)
self.DeleteSearchEngine('foo.com')
foo = self._GetSearchEngineWithKeyword('foo.com')
self.assertFalse(foo)
def testMakeSearchEngineDefault(self):
"""Test adding then making a search engine default."""
self.AddSearchEngine(
title='foo',
keyword='foo.com',
url=self._localhost_prefix + '?q=%s')
foo = self._GetSearchEngineWithKeyword('foo.com')
self.assertTrue(foo)
self.assertFalse(foo['is_default'])
self.MakeSearchEngineDefault('foo.com')
foo = self._GetSearchEngineWithKeyword('foo.com')
self.assertTrue(foo)
self.assertTrue(foo['is_default'])
self.SetOmniboxText('foobar')
self.OmniboxAcceptInput()
self.assertEqual(self._localhost_prefix + '?q=foobar',
self.GetActiveTabURL().spec())
def testDefaultSearchEngines(self):
"""Test that we have 3 default search options."""
info = self.GetSearchEngineInfo()
self.assertEqual(len(info), 3)
# Verify that each can be used as the default search provider.
default_providers = ['google.com', 'yahoo.com', 'bing.com']
for keyword in default_providers:
self.MakeSearchEngineDefault(keyword)
search_engine = self._GetSearchEngineWithKeyword(keyword)
self.assertTrue(search_engine['is_default'])
self.SetOmniboxText('test search')
self.OmniboxAcceptInput()
self.assertTrue(re.search(keyword, self.GetActiveTabURL().spec()))
if __name__ == '__main__':
pyauto_functional.Main()
| bsd-3-clause | 1,029,202,799,083,367,600 | 34.072727 | 75 | 0.676516 | false |
gchp/django | django/contrib/staticfiles/management/commands/findstatic.py | 463 | 1745 | from __future__ import unicode_literals
import os
from django.contrib.staticfiles import finders
from django.core.management.base import LabelCommand
from django.utils.encoding import force_text
class Command(LabelCommand):
help = "Finds the absolute paths for the given static file(s)."
label = 'static file'
def add_arguments(self, parser):
super(Command, self).add_arguments(parser)
parser.add_argument('--first', action='store_false', dest='all',
default=True,
help="Only return the first match for each static file.")
def handle_label(self, path, **options):
verbosity = options['verbosity']
result = finders.find(path, all=options['all'])
path = force_text(path)
if verbosity >= 2:
searched_locations = ("Looking in the following locations:\n %s" %
"\n ".join(force_text(location)
for location in finders.searched_locations))
else:
searched_locations = ''
if result:
if not isinstance(result, (list, tuple)):
result = [result]
result = (force_text(os.path.realpath(path)) for path in result)
if verbosity >= 1:
file_list = '\n '.join(result)
return ("Found '%s' here:\n %s\n%s" %
(path, file_list, searched_locations))
else:
return '\n'.join(result)
else:
message = ["No matching file found for '%s'." % path]
if verbosity >= 2:
message.append(searched_locations)
if verbosity >= 1:
self.stderr.write('\n'.join(message))
| bsd-3-clause | -5,458,890,167,682,293,000 | 37.777778 | 79 | 0.557593 | false |
PhilHarnish/forge | bin/process_clues.py | 1 | 1840 | """Process http://www.otsys.com/clue/ DB for use with python."""
import collections
import os
import sqlite3
import sys
# Add parent directory to path.
sys.path.append(os.path.join(
os.path.dirname(os.path.dirname(os.path.realpath(__file__))),
'src'))
from data import crossword
from data import data
from puzzle.puzzlepedia import prod_config
prod_config.init()
STOP_WORD = '~' # Appears after z.
MAX_KEYWORDS = 50
MIN_USAGES = 5
VISITED = set()
ALL_SEEN = collections.defaultdict(int)
def _prune_keywords(keywords):
top = sorted(
keywords.items(), key=lambda i: i[1], reverse=True
)[:MAX_KEYWORDS]
results = {}
for keyword, count in top:
ALL_SEEN[keyword] += count
results[keyword] = count
return results
conn = crossword.init('data/crossword.sqlite')
c = conn.cursor()
def _insert(solution, usages, keywords):
try:
crossword.add(c, solution, usages, _prune_keywords(keywords))
except (sqlite3.OperationalError, sqlite3.IntegrityError):
conn.commit()
conn.close()
raise
last_solution = None
keywords = collections.defaultdict(int)
usages = 0
for i, line in enumerate(
data.open_project_path('data/clues.txt', errors='ignore')):
solution, unused_int, unused_year, unused_source, clue = line.lower().split(
None, 4)
if solution > STOP_WORD:
print(line)
break
if solution in VISITED:
print('Skipping %s' % line)
continue
if last_solution and solution != last_solution:
if usages >= MIN_USAGES and keywords:
_insert(last_solution, usages, keywords)
VISITED.add(last_solution)
keywords.clear()
usages = 0
usages += 1
for keyword in crossword.clue_keywords(clue):
keywords[keyword] += 1
last_solution = solution
_insert(last_solution, usages, keywords)
conn.commit()
conn.close()
print(_prune_keywords(ALL_SEEN))
| mit | 377,060,723,532,881,600 | 23.533333 | 78 | 0.693478 | false |
40223108/w18 | static/Brython3.1.1-20150328-091302/Lib/unittest/test/_test_warnings.py | 858 | 2304 | # helper module for test_runner.Test_TextTestRunner.test_warnings
"""
This module has a number of tests that raise different kinds of warnings.
When the tests are run, the warnings are caught and their messages are printed
to stdout. This module also accepts an arg that is then passed to
unittest.main to affect the behavior of warnings.
Test_TextTestRunner.test_warnings executes this script with different
combinations of warnings args and -W flags and check that the output is correct.
See #10535.
"""
import sys
import unittest
import warnings
def warnfun():
warnings.warn('rw', RuntimeWarning)
class TestWarnings(unittest.TestCase):
# unittest warnings will be printed at most once per type (max one message
# for the fail* methods, and one for the assert* methods)
def test_assert(self):
self.assertEquals(2+2, 4)
self.assertEquals(2*2, 4)
self.assertEquals(2**2, 4)
def test_fail(self):
self.failUnless(1)
self.failUnless(True)
def test_other_unittest(self):
self.assertAlmostEqual(2+2, 4)
self.assertNotAlmostEqual(4+4, 2)
# these warnings are normally silenced, but they are printed in unittest
def test_deprecation(self):
warnings.warn('dw', DeprecationWarning)
warnings.warn('dw', DeprecationWarning)
warnings.warn('dw', DeprecationWarning)
def test_import(self):
warnings.warn('iw', ImportWarning)
warnings.warn('iw', ImportWarning)
warnings.warn('iw', ImportWarning)
# user warnings should always be printed
def test_warning(self):
warnings.warn('uw')
warnings.warn('uw')
warnings.warn('uw')
# these warnings come from the same place; they will be printed
# only once by default or three times if the 'always' filter is used
def test_function(self):
warnfun()
warnfun()
warnfun()
if __name__ == '__main__':
with warnings.catch_warnings(record=True) as ws:
# if an arg is provided pass it to unittest.main as 'warnings'
if len(sys.argv) == 2:
unittest.main(exit=False, warnings=sys.argv.pop())
else:
unittest.main(exit=False)
# print all the warning messages collected
for w in ws:
print(w.message)
| gpl-3.0 | 6,196,802,471,668,009,000 | 30.561644 | 80 | 0.673177 | false |
40223142/cda11 | static/Brython3.1.1-20150328-091302/Lib/multiprocessing/dummy/connection.py | 707 | 3049 | #
# Analogue of `multiprocessing.connection` which uses queues instead of sockets
#
# multiprocessing/dummy/connection.py
#
# Copyright (c) 2006-2008, R Oudkerk
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of author nor the names of any contributors may be
# used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
#
__all__ = [ 'Client', 'Listener', 'Pipe' ]
from queue import Queue
families = [None]
class Listener(object):
def __init__(self, address=None, family=None, backlog=1):
self._backlog_queue = Queue(backlog)
def accept(self):
return Connection(*self._backlog_queue.get())
def close(self):
self._backlog_queue = None
address = property(lambda self: self._backlog_queue)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, exc_tb):
self.close()
def Client(address):
_in, _out = Queue(), Queue()
address.put((_out, _in))
return Connection(_in, _out)
def Pipe(duplex=True):
a, b = Queue(), Queue()
return Connection(a, b), Connection(b, a)
class Connection(object):
def __init__(self, _in, _out):
self._out = _out
self._in = _in
self.send = self.send_bytes = _out.put
self.recv = self.recv_bytes = _in.get
def poll(self, timeout=0.0):
if self._in.qsize() > 0:
return True
if timeout <= 0.0:
return False
self._in.not_empty.acquire()
self._in.not_empty.wait(timeout)
self._in.not_empty.release()
return self._in.qsize() > 0
def close(self):
pass
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, exc_tb):
self.close()
| gpl-3.0 | -7,191,503,471,821,366,000 | 29.79798 | 79 | 0.677599 | false |
Sumith1896/sympy | sympy/utilities/runtests.py | 4 | 78928 | """
This is our testing framework.
Goals:
* it should be compatible with py.test and operate very similarly
(or identically)
* doesn't require any external dependencies
* preferably all the functionality should be in this file only
* no magic, just import the test file and execute the test functions, that's it
* portable
"""
from __future__ import print_function, division
import os
import sys
import platform
import inspect
import traceback
import pdb
import re
import linecache
from fnmatch import fnmatch
from timeit import default_timer as clock
import doctest as pdoctest # avoid clashing with our doctest() function
from doctest import DocTestFinder, DocTestRunner
import random
import subprocess
import signal
import stat
from inspect import isgeneratorfunction
from sympy.core.cache import clear_cache
from sympy.core.compatibility import exec_, PY3, string_types, range
from sympy.utilities.misc import find_executable
from sympy.external import import_module
from sympy.utilities.exceptions import SymPyDeprecationWarning
IS_WINDOWS = (os.name == 'nt')
class Skipped(Exception):
pass
import __future__
# add more flags ??
future_flags = __future__.division.compiler_flag
def _indent(s, indent=4):
"""
Add the given number of space characters to the beginning of
every non-blank line in ``s``, and return the result.
If the string ``s`` is Unicode, it is encoded using the stdout
encoding and the ``backslashreplace`` error handler.
"""
# After a 2to3 run the below code is bogus, so wrap it with a version check
if not PY3:
if isinstance(s, unicode):
s = s.encode(pdoctest._encoding, 'backslashreplace')
# This regexp matches the start of non-blank lines:
return re.sub('(?m)^(?!$)', indent*' ', s)
pdoctest._indent = _indent
# ovverride reporter to maintain windows and python3
def _report_failure(self, out, test, example, got):
"""
Report that the given example failed.
"""
s = self._checker.output_difference(example, got, self.optionflags)
s = s.encode('raw_unicode_escape').decode('utf8', 'ignore')
out(self._failure_header(test, example) + s)
if PY3 and IS_WINDOWS:
DocTestRunner.report_failure = _report_failure
def convert_to_native_paths(lst):
"""
Converts a list of '/' separated paths into a list of
native (os.sep separated) paths and converts to lowercase
if the system is case insensitive.
"""
newlst = []
for i, rv in enumerate(lst):
rv = os.path.join(*rv.split("/"))
# on windows the slash after the colon is dropped
if sys.platform == "win32":
pos = rv.find(':')
if pos != -1:
if rv[pos + 1] != '\\':
rv = rv[:pos + 1] + '\\' + rv[pos + 1:]
newlst.append(sys_normcase(rv))
return newlst
def get_sympy_dir():
"""
Returns the root sympy directory and set the global value
indicating whether the system is case sensitive or not.
"""
global sys_case_insensitive
this_file = os.path.abspath(__file__)
sympy_dir = os.path.join(os.path.dirname(this_file), "..", "..")
sympy_dir = os.path.normpath(sympy_dir)
sys_case_insensitive = (os.path.isdir(sympy_dir) and
os.path.isdir(sympy_dir.lower()) and
os.path.isdir(sympy_dir.upper()))
return sys_normcase(sympy_dir)
def sys_normcase(f):
if sys_case_insensitive: # global defined after call to get_sympy_dir()
return f.lower()
return f
def setup_pprint():
from sympy import pprint_use_unicode, init_printing
# force pprint to be in ascii mode in doctests
pprint_use_unicode(False)
# hook our nice, hash-stable strprinter
init_printing(pretty_print=False)
def run_in_subprocess_with_hash_randomization(function, function_args=(),
function_kwargs={}, command=sys.executable,
module='sympy.utilities.runtests', force=False):
"""
Run a function in a Python subprocess with hash randomization enabled.
If hash randomization is not supported by the version of Python given, it
returns False. Otherwise, it returns the exit value of the command. The
function is passed to sys.exit(), so the return value of the function will
be the return value.
The environment variable PYTHONHASHSEED is used to seed Python's hash
randomization. If it is set, this function will return False, because
starting a new subprocess is unnecessary in that case. If it is not set,
one is set at random, and the tests are run. Note that if this
environment variable is set when Python starts, hash randomization is
automatically enabled. To force a subprocess to be created even if
PYTHONHASHSEED is set, pass ``force=True``. This flag will not force a
subprocess in Python versions that do not support hash randomization (see
below), because those versions of Python do not support the ``-R`` flag.
``function`` should be a string name of a function that is importable from
the module ``module``, like "_test". The default for ``module`` is
"sympy.utilities.runtests". ``function_args`` and ``function_kwargs``
should be a repr-able tuple and dict, respectively. The default Python
command is sys.executable, which is the currently running Python command.
This function is necessary because the seed for hash randomization must be
set by the environment variable before Python starts. Hence, in order to
use a predetermined seed for tests, we must start Python in a separate
subprocess.
Hash randomization was added in the minor Python versions 2.6.8, 2.7.3,
3.1.5, and 3.2.3, and is enabled by default in all Python versions after
and including 3.3.0.
Examples
========
>>> from sympy.utilities.runtests import (
... run_in_subprocess_with_hash_randomization)
>>> # run the core tests in verbose mode
>>> run_in_subprocess_with_hash_randomization("_test",
... function_args=("core",),
... function_kwargs={'verbose': True}) # doctest: +SKIP
# Will return 0 if sys.executable supports hash randomization and tests
# pass, 1 if they fail, and False if it does not support hash
# randomization.
"""
# Note, we must return False everywhere, not None, as subprocess.call will
# sometimes return None.
# First check if the Python version supports hash randomization
# If it doesn't have this support, it won't reconize the -R flag
p = subprocess.Popen([command, "-RV"], stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
p.communicate()
if p.returncode != 0:
return False
hash_seed = os.getenv("PYTHONHASHSEED")
if not hash_seed:
os.environ["PYTHONHASHSEED"] = str(random.randrange(2**32))
else:
if not force:
return False
# Now run the command
commandstring = ("import sys; from %s import %s;sys.exit(%s(*%s, **%s))" %
(module, function, function, repr(function_args),
repr(function_kwargs)))
try:
p = subprocess.Popen([command, "-R", "-c", commandstring])
p.communicate()
except KeyboardInterrupt:
p.wait()
finally:
# Put the environment variable back, so that it reads correctly for
# the current Python process.
if hash_seed is None:
del os.environ["PYTHONHASHSEED"]
else:
os.environ["PYTHONHASHSEED"] = hash_seed
return p.returncode
def run_all_tests(test_args=(), test_kwargs={}, doctest_args=(),
doctest_kwargs={}, examples_args=(), examples_kwargs={'quiet': True}):
"""
Run all tests.
Right now, this runs the regular tests (bin/test), the doctests
(bin/doctest), the examples (examples/all.py), and the sage tests (see
sympy/external/tests/test_sage.py).
This is what ``setup.py test`` uses.
You can pass arguments and keyword arguments to the test functions that
support them (for now, test, doctest, and the examples). See the
docstrings of those functions for a description of the available options.
For example, to run the solvers tests with colors turned off:
>>> from sympy.utilities.runtests import run_all_tests
>>> run_all_tests(test_args=("solvers",),
... test_kwargs={"colors:False"}) # doctest: +SKIP
"""
tests_successful = True
try:
# Regular tests
if not test(*test_args, **test_kwargs):
# some regular test fails, so set the tests_successful
# flag to false and continue running the doctests
tests_successful = False
# Doctests
print()
if not doctest(*doctest_args, **doctest_kwargs):
tests_successful = False
# Examples
print()
sys.path.append("examples")
from all import run_examples # examples/all.py
if not run_examples(*examples_args, **examples_kwargs):
tests_successful = False
# Sage tests
if not (sys.platform == "win32" or PY3):
# run Sage tests; Sage currently doesn't support Windows or Python 3
dev_null = open(os.devnull, 'w')
if subprocess.call("sage -v", shell=True, stdout=dev_null,
stderr=dev_null) == 0:
if subprocess.call("sage -python bin/test "
"sympy/external/tests/test_sage.py", shell=True) != 0:
tests_successful = False
if tests_successful:
return
else:
# Return nonzero exit code
sys.exit(1)
except KeyboardInterrupt:
print()
print("DO *NOT* COMMIT!")
sys.exit(1)
def test(*paths, **kwargs):
"""
Run tests in the specified test_*.py files.
Tests in a particular test_*.py file are run if any of the given strings
in ``paths`` matches a part of the test file's path. If ``paths=[]``,
tests in all test_*.py files are run.
Notes:
- If sort=False, tests are run in random order (not default).
- Paths can be entered in native system format or in unix,
forward-slash format.
- Files that are on the blacklist can be tested by providing
their path; they are only excluded if no paths are given.
**Explanation of test results**
====== ===============================================================
Output Meaning
====== ===============================================================
. passed
F failed
X XPassed (expected to fail but passed)
f XFAILed (expected to fail and indeed failed)
s skipped
w slow
T timeout (e.g., when ``--timeout`` is used)
K KeyboardInterrupt (when running the slow tests with ``--slow``,
you can interrupt one of them without killing the test runner)
====== ===============================================================
Colors have no additional meaning and are used just to facilitate
interpreting the output.
Examples
========
>>> import sympy
Run all tests:
>>> sympy.test() # doctest: +SKIP
Run one file:
>>> sympy.test("sympy/core/tests/test_basic.py") # doctest: +SKIP
>>> sympy.test("_basic") # doctest: +SKIP
Run all tests in sympy/functions/ and some particular file:
>>> sympy.test("sympy/core/tests/test_basic.py",
... "sympy/functions") # doctest: +SKIP
Run all tests in sympy/core and sympy/utilities:
>>> sympy.test("/core", "/util") # doctest: +SKIP
Run specific test from a file:
>>> sympy.test("sympy/core/tests/test_basic.py",
... kw="test_equality") # doctest: +SKIP
Run specific test from any file:
>>> sympy.test(kw="subs") # doctest: +SKIP
Run the tests with verbose mode on:
>>> sympy.test(verbose=True) # doctest: +SKIP
Don't sort the test output:
>>> sympy.test(sort=False) # doctest: +SKIP
Turn on post-mortem pdb:
>>> sympy.test(pdb=True) # doctest: +SKIP
Turn off colors:
>>> sympy.test(colors=False) # doctest: +SKIP
Force colors, even when the output is not to a terminal (this is useful,
e.g., if you are piping to ``less -r`` and you still want colors)
>>> sympy.test(force_colors=False) # doctest: +SKIP
The traceback verboseness can be set to "short" or "no" (default is
"short")
>>> sympy.test(tb='no') # doctest: +SKIP
The ``split`` option can be passed to split the test run into parts. The
split currently only splits the test files, though this may change in the
future. ``split`` should be a string of the form 'a/b', which will run
part ``a`` of ``b``. For instance, to run the first half of the test suite:
>>> sympy.test(split='1/2') # doctest: +SKIP
You can disable running the tests in a separate subprocess using
``subprocess=False``. This is done to support seeding hash randomization,
which is enabled by default in the Python versions where it is supported.
If subprocess=False, hash randomization is enabled/disabled according to
whether it has been enabled or not in the calling Python process.
However, even if it is enabled, the seed cannot be printed unless it is
called from a new Python process.
Hash randomization was added in the minor Python versions 2.6.8, 2.7.3,
3.1.5, and 3.2.3, and is enabled by default in all Python versions after
and including 3.3.0.
If hash randomization is not supported ``subprocess=False`` is used
automatically.
>>> sympy.test(subprocess=False) # doctest: +SKIP
To set the hash randomization seed, set the environment variable
``PYTHONHASHSEED`` before running the tests. This can be done from within
Python using
>>> import os
>>> os.environ['PYTHONHASHSEED'] = '42' # doctest: +SKIP
Or from the command line using
$ PYTHONHASHSEED=42 ./bin/test
If the seed is not set, a random seed will be chosen.
Note that to reproduce the same hash values, you must use both the same seed
as well as the same architecture (32-bit vs. 64-bit).
"""
subprocess = kwargs.pop("subprocess", True)
rerun = kwargs.pop("rerun", 0)
# count up from 0, do not print 0
print_counter = lambda i : (print("rerun %d" % (rerun-i))
if rerun-i else None)
if subprocess:
# loop backwards so last i is 0
for i in range(rerun, -1, -1):
print_counter(i)
ret = run_in_subprocess_with_hash_randomization("_test",
function_args=paths, function_kwargs=kwargs)
if ret is False:
break
val = not bool(ret)
# exit on the first failure or if done
if not val or i == 0:
return val
# rerun even if hash randomization is not supported
for i in range(rerun, -1, -1):
print_counter(i)
val = not bool(_test(*paths, **kwargs))
if not val or i == 0:
return val
def _test(*paths, **kwargs):
"""
Internal function that actually runs the tests.
All keyword arguments from ``test()`` are passed to this function except for
``subprocess``.
Returns 0 if tests passed and 1 if they failed. See the docstring of
``test()`` for more information.
"""
verbose = kwargs.get("verbose", False)
tb = kwargs.get("tb", "short")
kw = kwargs.get("kw", None) or ()
# ensure that kw is a tuple
if isinstance(kw, str):
kw = (kw, )
post_mortem = kwargs.get("pdb", False)
colors = kwargs.get("colors", True)
force_colors = kwargs.get("force_colors", False)
sort = kwargs.get("sort", True)
seed = kwargs.get("seed", None)
if seed is None:
seed = random.randrange(100000000)
timeout = kwargs.get("timeout", False)
slow = kwargs.get("slow", False)
enhance_asserts = kwargs.get("enhance_asserts", False)
split = kwargs.get('split', None)
blacklist = kwargs.get('blacklist', [])
blacklist = convert_to_native_paths(blacklist)
r = PyTestReporter(verbose=verbose, tb=tb, colors=colors,
force_colors=force_colors, split=split)
t = SymPyTests(r, kw, post_mortem, seed)
# Disable warnings for external modules
import sympy.external
sympy.external.importtools.WARN_OLD_VERSION = False
sympy.external.importtools.WARN_NOT_INSTALLED = False
# Show deprecation warnings
import warnings
warnings.simplefilter("error", SymPyDeprecationWarning)
test_files = t.get_test_files('sympy')
not_blacklisted = [f for f in test_files
if not any(b in f for b in blacklist)]
if len(paths) == 0:
matched = not_blacklisted
else:
paths = convert_to_native_paths(paths)
matched = []
for f in not_blacklisted:
basename = os.path.basename(f)
for p in paths:
if p in f or fnmatch(basename, p):
matched.append(f)
break
if split:
matched = split_list(matched, split)
t._testfiles.extend(matched)
return int(not t.test(sort=sort, timeout=timeout,
slow=slow, enhance_asserts=enhance_asserts))
def doctest(*paths, **kwargs):
"""
Runs doctests in all \*.py files in the sympy directory which match
any of the given strings in ``paths`` or all tests if paths=[].
Notes:
- Paths can be entered in native system format or in unix,
forward-slash format.
- Files that are on the blacklist can be tested by providing
their path; they are only excluded if no paths are given.
Examples
========
>>> import sympy
Run all tests:
>>> sympy.doctest() # doctest: +SKIP
Run one file:
>>> sympy.doctest("sympy/core/basic.py") # doctest: +SKIP
>>> sympy.doctest("polynomial.rst") # doctest: +SKIP
Run all tests in sympy/functions/ and some particular file:
>>> sympy.doctest("/functions", "basic.py") # doctest: +SKIP
Run any file having polynomial in its name, doc/src/modules/polynomial.rst,
sympy/functions/special/polynomials.py, and sympy/polys/polynomial.py:
>>> sympy.doctest("polynomial") # doctest: +SKIP
The ``split`` option can be passed to split the test run into parts. The
split currently only splits the test files, though this may change in the
future. ``split`` should be a string of the form 'a/b', which will run
part ``a`` of ``b``. Note that the regular doctests and the Sphinx
doctests are split independently. For instance, to run the first half of
the test suite:
>>> sympy.doctest(split='1/2') # doctest: +SKIP
The ``subprocess`` and ``verbose`` options are the same as with the function
``test()``. See the docstring of that function for more information.
"""
subprocess = kwargs.pop("subprocess", True)
rerun = kwargs.pop("rerun", 0)
# count up from 0, do not print 0
print_counter = lambda i : (print("rerun %d" % (rerun-i))
if rerun-i else None)
if subprocess:
# loop backwards so last i is 0
for i in range(rerun, -1, -1):
print_counter(i)
ret = run_in_subprocess_with_hash_randomization("_doctest",
function_args=paths, function_kwargs=kwargs)
if ret is False:
break
val = not bool(ret)
# exit on the first failure or if done
if not val or i == 0:
return val
# rerun even if hash randomization is not supported
for i in range(rerun, -1, -1):
print_counter(i)
val = not bool(_doctest(*paths, **kwargs))
if not val or i == 0:
return val
def _doctest(*paths, **kwargs):
"""
Internal function that actually runs the doctests.
All keyword arguments from ``doctest()`` are passed to this function
except for ``subprocess``.
Returns 0 if tests passed and 1 if they failed. See the docstrings of
``doctest()`` and ``test()`` for more information.
"""
normal = kwargs.get("normal", False)
verbose = kwargs.get("verbose", False)
blacklist = kwargs.get("blacklist", [])
split = kwargs.get('split', None)
blacklist.extend([
"doc/src/modules/plotting.rst", # generates live plots
"sympy/utilities/compilef.py", # needs tcc
"sympy/physics/gaussopt.py", # raises deprecation warning
])
if import_module('numpy') is None:
blacklist.extend([
"sympy/plotting/experimental_lambdify.py",
"sympy/plotting/plot_implicit.py",
"examples/advanced/autowrap_integrators.py",
"examples/advanced/autowrap_ufuncify.py",
"examples/intermediate/sample.py",
"examples/intermediate/mplot2d.py",
"examples/intermediate/mplot3d.py",
"doc/src/modules/numeric-computation.rst"
])
else:
if import_module('matplotlib') is None:
blacklist.extend([
"examples/intermediate/mplot2d.py",
"examples/intermediate/mplot3d.py"
])
else:
# don't display matplotlib windows
from sympy.plotting.plot import unset_show
unset_show()
if import_module('pyglet') is None:
blacklist.extend(["sympy/plotting/pygletplot"])
if import_module('theano') is None:
blacklist.extend(["doc/src/modules/numeric-computation.rst"])
# disabled because of doctest failures in asmeurer's bot
blacklist.extend([
"sympy/utilities/autowrap.py",
"examples/advanced/autowrap_integrators.py",
"examples/advanced/autowrap_ufuncify.py"
])
# blacklist these modules until issue 4840 is resolved
blacklist.extend([
"sympy/conftest.py",
"sympy/utilities/benchmarking.py"
])
blacklist = convert_to_native_paths(blacklist)
# Disable warnings for external modules
import sympy.external
sympy.external.importtools.WARN_OLD_VERSION = False
sympy.external.importtools.WARN_NOT_INSTALLED = False
# Show deprecation warnings
import warnings
warnings.simplefilter("error", SymPyDeprecationWarning)
r = PyTestReporter(verbose, split=split)
t = SymPyDocTests(r, normal)
test_files = t.get_test_files('sympy')
test_files.extend(t.get_test_files('examples', init_only=False))
not_blacklisted = [f for f in test_files
if not any(b in f for b in blacklist)]
if len(paths) == 0:
matched = not_blacklisted
else:
# take only what was requested...but not blacklisted items
# and allow for partial match anywhere or fnmatch of name
paths = convert_to_native_paths(paths)
matched = []
for f in not_blacklisted:
basename = os.path.basename(f)
for p in paths:
if p in f or fnmatch(basename, p):
matched.append(f)
break
if split:
matched = split_list(matched, split)
t._testfiles.extend(matched)
# run the tests and record the result for this *py portion of the tests
if t._testfiles:
failed = not t.test()
else:
failed = False
# N.B.
# --------------------------------------------------------------------
# Here we test *.rst files at or below doc/src. Code from these must
# be self supporting in terms of imports since there is no importing
# of necessary modules by doctest.testfile. If you try to pass *.py
# files through this they might fail because they will lack the needed
# imports and smarter parsing that can be done with source code.
#
test_files = t.get_test_files('doc/src', '*.rst', init_only=False)
test_files.sort()
not_blacklisted = [f for f in test_files
if not any(b in f for b in blacklist)]
if len(paths) == 0:
matched = not_blacklisted
else:
# Take only what was requested as long as it's not on the blacklist.
# Paths were already made native in *py tests so don't repeat here.
# There's no chance of having a *py file slip through since we
# only have *rst files in test_files.
matched = []
for f in not_blacklisted:
basename = os.path.basename(f)
for p in paths:
if p in f or fnmatch(basename, p):
matched.append(f)
break
if split:
matched = split_list(matched, split)
setup_pprint()
first_report = True
for rst_file in matched:
if not os.path.isfile(rst_file):
continue
old_displayhook = sys.displayhook
try:
out = sympytestfile(
rst_file, module_relative=False, encoding='utf-8',
optionflags=pdoctest.ELLIPSIS | pdoctest.NORMALIZE_WHITESPACE |
pdoctest.IGNORE_EXCEPTION_DETAIL)
finally:
# make sure we return to the original displayhook in case some
# doctest has changed that
sys.displayhook = old_displayhook
rstfailed, tested = out
if tested:
failed = rstfailed or failed
if first_report:
first_report = False
msg = 'rst doctests start'
if not t._testfiles:
r.start(msg=msg)
else:
r.write_center(msg)
print()
# use as the id, everything past the first 'sympy'
file_id = rst_file[rst_file.find('sympy') + len('sympy') + 1:]
print(file_id, end=" ")
# get at least the name out so it is know who is being tested
wid = r.terminal_width - len(file_id) - 1 # update width
test_file = '[%s]' % (tested)
report = '[%s]' % (rstfailed or 'OK')
print(''.join(
[test_file, ' '*(wid - len(test_file) - len(report)), report])
)
# the doctests for *py will have printed this message already if there was
# a failure, so now only print it if there was intervening reporting by
# testing the *rst as evidenced by first_report no longer being True.
if not first_report and failed:
print()
print("DO *NOT* COMMIT!")
return int(failed)
sp = re.compile(r'([0-9]+)/([1-9][0-9]*)')
def split_list(l, split):
"""
Splits a list into part a of b
split should be a string of the form 'a/b'. For instance, '1/3' would give
the split one of three.
If the length of the list is not divisible by the number of splits, the
last split will have more items.
>>> from sympy.utilities.runtests import split_list
>>> a = list(range(10))
>>> split_list(a, '1/3')
[0, 1, 2]
>>> split_list(a, '2/3')
[3, 4, 5]
>>> split_list(a, '3/3')
[6, 7, 8, 9]
"""
m = sp.match(split)
if not m:
raise ValueError("split must be a string of the form a/b where a and b are ints")
i, t = map(int, m.groups())
return l[(i - 1)*len(l)//t:i*len(l)//t]
from collections import namedtuple
SymPyTestResults = namedtuple('TestResults', 'failed attempted')
def sympytestfile(filename, module_relative=True, name=None, package=None,
globs=None, verbose=None, report=True, optionflags=0,
extraglobs=None, raise_on_error=False,
parser=pdoctest.DocTestParser(), encoding=None):
"""
Test examples in the given file. Return (#failures, #tests).
Optional keyword arg ``module_relative`` specifies how filenames
should be interpreted:
- If ``module_relative`` is True (the default), then ``filename``
specifies a module-relative path. By default, this path is
relative to the calling module's directory; but if the
``package`` argument is specified, then it is relative to that
package. To ensure os-independence, ``filename`` should use
"/" characters to separate path segments, and should not
be an absolute path (i.e., it may not begin with "/").
- If ``module_relative`` is False, then ``filename`` specifies an
os-specific path. The path may be absolute or relative (to
the current working directory).
Optional keyword arg ``name`` gives the name of the test; by default
use the file's basename.
Optional keyword argument ``package`` is a Python package or the
name of a Python package whose directory should be used as the
base directory for a module relative filename. If no package is
specified, then the calling module's directory is used as the base
directory for module relative filenames. It is an error to
specify ``package`` if ``module_relative`` is False.
Optional keyword arg ``globs`` gives a dict to be used as the globals
when executing examples; by default, use {}. A copy of this dict
is actually used for each docstring, so that each docstring's
examples start with a clean slate.
Optional keyword arg ``extraglobs`` gives a dictionary that should be
merged into the globals that are used to execute examples. By
default, no extra globals are used.
Optional keyword arg ``verbose`` prints lots of stuff if true, prints
only failures if false; by default, it's true iff "-v" is in sys.argv.
Optional keyword arg ``report`` prints a summary at the end when true,
else prints nothing at the end. In verbose mode, the summary is
detailed, else very brief (in fact, empty if all tests passed).
Optional keyword arg ``optionflags`` or's together module constants,
and defaults to 0. Possible values (see the docs for details):
- DONT_ACCEPT_TRUE_FOR_1
- DONT_ACCEPT_BLANKLINE
- NORMALIZE_WHITESPACE
- ELLIPSIS
- SKIP
- IGNORE_EXCEPTION_DETAIL
- REPORT_UDIFF
- REPORT_CDIFF
- REPORT_NDIFF
- REPORT_ONLY_FIRST_FAILURE
Optional keyword arg ``raise_on_error`` raises an exception on the
first unexpected exception or failure. This allows failures to be
post-mortem debugged.
Optional keyword arg ``parser`` specifies a DocTestParser (or
subclass) that should be used to extract tests from the files.
Optional keyword arg ``encoding`` specifies an encoding that should
be used to convert the file to unicode.
Advanced tomfoolery: testmod runs methods of a local instance of
class doctest.Tester, then merges the results into (or creates)
global Tester instance doctest.master. Methods of doctest.master
can be called directly too, if you want to do something unusual.
Passing report=0 to testmod is especially useful then, to delay
displaying a summary. Invoke doctest.master.summarize(verbose)
when you're done fiddling.
"""
if package and not module_relative:
raise ValueError("Package may only be specified for module-"
"relative paths.")
# Relativize the path
if not PY3:
text, filename = pdoctest._load_testfile(
filename, package, module_relative)
if encoding is not None:
text = text.decode(encoding)
else:
text, filename = pdoctest._load_testfile(
filename, package, module_relative, encoding)
# If no name was given, then use the file's name.
if name is None:
name = os.path.basename(filename)
# Assemble the globals.
if globs is None:
globs = {}
else:
globs = globs.copy()
if extraglobs is not None:
globs.update(extraglobs)
if '__name__' not in globs:
globs['__name__'] = '__main__'
if raise_on_error:
runner = pdoctest.DebugRunner(verbose=verbose, optionflags=optionflags)
else:
runner = SymPyDocTestRunner(verbose=verbose, optionflags=optionflags)
runner._checker = SymPyOutputChecker()
# Read the file, convert it to a test, and run it.
test = parser.get_doctest(text, globs, name, filename, 0)
runner.run(test, compileflags=future_flags)
if report:
runner.summarize()
if pdoctest.master is None:
pdoctest.master = runner
else:
pdoctest.master.merge(runner)
return SymPyTestResults(runner.failures, runner.tries)
class SymPyTests(object):
def __init__(self, reporter, kw="", post_mortem=False,
seed=None):
self._post_mortem = post_mortem
self._kw = kw
self._count = 0
self._root_dir = sympy_dir
self._reporter = reporter
self._reporter.root_dir(self._root_dir)
self._testfiles = []
self._seed = seed if seed is not None else random.random()
def test(self, sort=False, timeout=False, slow=False, enhance_asserts=False):
"""
Runs the tests returning True if all tests pass, otherwise False.
If sort=False run tests in random order.
"""
if sort:
self._testfiles.sort()
else:
from random import shuffle
random.seed(self._seed)
shuffle(self._testfiles)
self._reporter.start(self._seed)
for f in self._testfiles:
try:
self.test_file(f, sort, timeout, slow, enhance_asserts)
except KeyboardInterrupt:
print(" interrupted by user")
self._reporter.finish()
raise
return self._reporter.finish()
def _enhance_asserts(self, source):
from ast import (NodeTransformer, Compare, Name, Store, Load, Tuple,
Assign, BinOp, Str, Mod, Assert, parse, fix_missing_locations)
ops = {"Eq": '==', "NotEq": '!=', "Lt": '<', "LtE": '<=',
"Gt": '>', "GtE": '>=', "Is": 'is', "IsNot": 'is not',
"In": 'in', "NotIn": 'not in'}
class Transform(NodeTransformer):
def visit_Assert(self, stmt):
if isinstance(stmt.test, Compare):
compare = stmt.test
values = [compare.left] + compare.comparators
names = [ "_%s" % i for i, _ in enumerate(values) ]
names_store = [ Name(n, Store()) for n in names ]
names_load = [ Name(n, Load()) for n in names ]
target = Tuple(names_store, Store())
value = Tuple(values, Load())
assign = Assign([target], value)
new_compare = Compare(names_load[0], compare.ops, names_load[1:])
msg_format = "\n%s " + "\n%s ".join([ ops[op.__class__.__name__] for op in compare.ops ]) + "\n%s"
msg = BinOp(Str(msg_format), Mod(), Tuple(names_load, Load()))
test = Assert(new_compare, msg, lineno=stmt.lineno, col_offset=stmt.col_offset)
return [assign, test]
else:
return stmt
tree = parse(source)
new_tree = Transform().visit(tree)
return fix_missing_locations(new_tree)
def test_file(self, filename, sort=True, timeout=False, slow=False, enhance_asserts=False):
funcs = []
try:
gl = {'__file__': filename}
try:
if PY3:
open_file = lambda: open(filename, encoding="utf8")
else:
open_file = lambda: open(filename)
with open_file() as f:
source = f.read()
if self._kw:
for l in source.splitlines():
if l.lstrip().startswith('def '):
if any(l.find(k) != -1 for k in self._kw):
break
else:
return
if enhance_asserts:
try:
source = self._enhance_asserts(source)
except ImportError:
pass
code = compile(source, filename, "exec")
exec_(code, gl)
except (SystemExit, KeyboardInterrupt):
raise
except ImportError:
self._reporter.import_error(filename, sys.exc_info())
return
clear_cache()
self._count += 1
random.seed(self._seed)
pytestfile = ""
if "XFAIL" in gl:
pytestfile = inspect.getsourcefile(gl["XFAIL"])
pytestfile2 = ""
if "slow" in gl:
pytestfile2 = inspect.getsourcefile(gl["slow"])
disabled = gl.get("disabled", False)
if not disabled:
# we need to filter only those functions that begin with 'test_'
# that are defined in the testing file or in the file where
# is defined the XFAIL decorator
funcs = [gl[f] for f in gl.keys() if f.startswith("test_") and
(inspect.isfunction(gl[f]) or inspect.ismethod(gl[f])) and
(inspect.getsourcefile(gl[f]) == filename or
inspect.getsourcefile(gl[f]) == pytestfile or
inspect.getsourcefile(gl[f]) == pytestfile2)]
if slow:
funcs = [f for f in funcs if getattr(f, '_slow', False)]
# Sorting of XFAILed functions isn't fixed yet :-(
funcs.sort(key=lambda x: inspect.getsourcelines(x)[1])
i = 0
while i < len(funcs):
if isgeneratorfunction(funcs[i]):
# some tests can be generators, that return the actual
# test functions. We unpack it below:
f = funcs.pop(i)
for fg in f():
func = fg[0]
args = fg[1:]
fgw = lambda: func(*args)
funcs.insert(i, fgw)
i += 1
else:
i += 1
# drop functions that are not selected with the keyword expression:
funcs = [x for x in funcs if self.matches(x)]
if not funcs:
return
except Exception:
self._reporter.entering_filename(filename, len(funcs))
raise
self._reporter.entering_filename(filename, len(funcs))
if not sort:
random.shuffle(funcs)
for f in funcs:
self._reporter.entering_test(f)
try:
if getattr(f, '_slow', False) and not slow:
raise Skipped("Slow")
if timeout:
self._timeout(f, timeout)
else:
random.seed(self._seed)
f()
except KeyboardInterrupt:
if getattr(f, '_slow', False):
self._reporter.test_skip("KeyboardInterrupt")
else:
raise
except Exception:
if timeout:
signal.alarm(0) # Disable the alarm. It could not be handled before.
t, v, tr = sys.exc_info()
if t is AssertionError:
self._reporter.test_fail((t, v, tr))
if self._post_mortem:
pdb.post_mortem(tr)
elif t.__name__ == "Skipped":
self._reporter.test_skip(v)
elif t.__name__ == "XFail":
self._reporter.test_xfail()
elif t.__name__ == "XPass":
self._reporter.test_xpass(v)
else:
self._reporter.test_exception((t, v, tr))
if self._post_mortem:
pdb.post_mortem(tr)
else:
self._reporter.test_pass()
self._reporter.leaving_filename()
def _timeout(self, function, timeout):
def callback(x, y):
signal.alarm(0)
raise Skipped("Timeout")
signal.signal(signal.SIGALRM, callback)
signal.alarm(timeout) # Set an alarm with a given timeout
function()
signal.alarm(0) # Disable the alarm
def matches(self, x):
"""
Does the keyword expression self._kw match "x"? Returns True/False.
Always returns True if self._kw is "".
"""
if not self._kw:
return True
for kw in self._kw:
if x.__name__.find(kw) != -1:
return True
return False
def get_test_files(self, dir, pat='test_*.py'):
"""
Returns the list of test_*.py (default) files at or below directory
``dir`` relative to the sympy home directory.
"""
dir = os.path.join(self._root_dir, convert_to_native_paths([dir])[0])
g = []
for path, folders, files in os.walk(dir):
g.extend([os.path.join(path, f) for f in files if fnmatch(f, pat)])
return sorted([sys_normcase(gi) for gi in g])
class SymPyDocTests(object):
def __init__(self, reporter, normal):
self._count = 0
self._root_dir = sympy_dir
self._reporter = reporter
self._reporter.root_dir(self._root_dir)
self._normal = normal
self._testfiles = []
def test(self):
"""
Runs the tests and returns True if all tests pass, otherwise False.
"""
self._reporter.start()
for f in self._testfiles:
try:
self.test_file(f)
except KeyboardInterrupt:
print(" interrupted by user")
self._reporter.finish()
raise
return self._reporter.finish()
def test_file(self, filename):
clear_cache()
from sympy.core.compatibility import StringIO
rel_name = filename[len(self._root_dir) + 1:]
dirname, file = os.path.split(filename)
module = rel_name.replace(os.sep, '.')[:-3]
if rel_name.startswith("examples"):
# Examples files do not have __init__.py files,
# So we have to temporarily extend sys.path to import them
sys.path.insert(0, dirname)
module = file[:-3] # remove ".py"
setup_pprint()
try:
module = pdoctest._normalize_module(module)
tests = SymPyDocTestFinder().find(module)
except (SystemExit, KeyboardInterrupt):
raise
except ImportError:
self._reporter.import_error(filename, sys.exc_info())
return
finally:
if rel_name.startswith("examples"):
del sys.path[0]
tests = [test for test in tests if len(test.examples) > 0]
# By default tests are sorted by alphabetical order by function name.
# We sort by line number so one can edit the file sequentially from
# bottom to top. However, if there are decorated functions, their line
# numbers will be too large and for now one must just search for these
# by text and function name.
tests.sort(key=lambda x: -x.lineno)
if not tests:
return
self._reporter.entering_filename(filename, len(tests))
for test in tests:
assert len(test.examples) != 0
# check if there are external dependencies which need to be met
if '_doctest_depends_on' in test.globs:
if not self._process_dependencies(test.globs['_doctest_depends_on']):
self._reporter.test_skip()
continue
runner = SymPyDocTestRunner(optionflags=pdoctest.ELLIPSIS |
pdoctest.NORMALIZE_WHITESPACE |
pdoctest.IGNORE_EXCEPTION_DETAIL)
runner._checker = SymPyOutputChecker()
old = sys.stdout
new = StringIO()
sys.stdout = new
# If the testing is normal, the doctests get importing magic to
# provide the global namespace. If not normal (the default) then
# then must run on their own; all imports must be explicit within
# a function's docstring. Once imported that import will be
# available to the rest of the tests in a given function's
# docstring (unless clear_globs=True below).
if not self._normal:
test.globs = {}
# if this is uncommented then all the test would get is what
# comes by default with a "from sympy import *"
#exec('from sympy import *') in test.globs
test.globs['print_function'] = print_function
try:
f, t = runner.run(test, compileflags=future_flags,
out=new.write, clear_globs=False)
except KeyboardInterrupt:
raise
finally:
sys.stdout = old
if f > 0:
self._reporter.doctest_fail(test.name, new.getvalue())
else:
self._reporter.test_pass()
self._reporter.leaving_filename()
def get_test_files(self, dir, pat='*.py', init_only=True):
"""
Returns the list of \*.py files (default) from which docstrings
will be tested which are at or below directory ``dir``. By default,
only those that have an __init__.py in their parent directory
and do not start with ``test_`` will be included.
"""
def importable(x):
"""
Checks if given pathname x is an importable module by checking for
__init__.py file.
Returns True/False.
Currently we only test if the __init__.py file exists in the
directory with the file "x" (in theory we should also test all the
parent dirs).
"""
init_py = os.path.join(os.path.dirname(x), "__init__.py")
return os.path.exists(init_py)
dir = os.path.join(self._root_dir, convert_to_native_paths([dir])[0])
g = []
for path, folders, files in os.walk(dir):
g.extend([os.path.join(path, f) for f in files
if not f.startswith('test_') and fnmatch(f, pat)])
if init_only:
# skip files that are not importable (i.e. missing __init__.py)
g = [x for x in g if importable(x)]
return [sys_normcase(gi) for gi in g]
def _process_dependencies(self, deps):
"""
Returns ``False`` if some dependencies are not met and the test should be
skipped otherwise returns ``True``.
"""
executables = deps.get('exe', None)
moduledeps = deps.get('modules', None)
viewers = deps.get('disable_viewers', None)
pyglet = deps.get('pyglet', None)
# print deps
if executables is not None:
for ex in executables:
found = find_executable(ex)
if found is None:
return False
if moduledeps is not None:
for extmod in moduledeps:
if extmod == 'matplotlib':
matplotlib = import_module(
'matplotlib',
__import__kwargs={'fromlist':
['pyplot', 'cm', 'collections']},
min_module_version='1.0.0', catch=(RuntimeError,))
if matplotlib is not None:
pass
else:
return False
else:
# TODO min version support
mod = import_module(extmod)
if mod is not None:
version = "unknown"
if hasattr(mod, '__version__'):
version = mod.__version__
else:
return False
if viewers is not None:
import tempfile
tempdir = tempfile.mkdtemp()
os.environ['PATH'] = '%s:%s' % (tempdir, os.environ['PATH'])
if PY3:
vw = '#!/usr/bin/env python3\n' \
'import sys\n' \
'if len(sys.argv) <= 1:\n' \
' exit("wrong number of args")\n'
else:
vw = '#!/usr/bin/env python\n' \
'import sys\n' \
'if len(sys.argv) <= 1:\n' \
' exit("wrong number of args")\n'
for viewer in viewers:
with open(os.path.join(tempdir, viewer), 'w') as fh:
fh.write(vw)
# make the file executable
os.chmod(os.path.join(tempdir, viewer),
stat.S_IREAD | stat.S_IWRITE | stat.S_IXUSR)
if pyglet:
# monkey-patch pyglet s.t. it does not open a window during
# doctesting
import pyglet
class DummyWindow(object):
def __init__(self, *args, **kwargs):
self.has_exit=True
self.width = 600
self.height = 400
def set_vsync(self, x):
pass
def switch_to(self):
pass
def push_handlers(self, x):
pass
def close(self):
pass
pyglet.window.Window = DummyWindow
return True
class SymPyDocTestFinder(DocTestFinder):
"""
A class used to extract the DocTests that are relevant to a given
object, from its docstring and the docstrings of its contained
objects. Doctests can currently be extracted from the following
object types: modules, functions, classes, methods, staticmethods,
classmethods, and properties.
Modified from doctest's version by looking harder for code in the
case that it looks like the the code comes from a different module.
In the case of decorated functions (e.g. @vectorize) they appear
to come from a different module (e.g. multidemensional) even though
their code is not there.
"""
def _find(self, tests, obj, name, module, source_lines, globs, seen):
"""
Find tests for the given object and any contained objects, and
add them to ``tests``.
"""
if self._verbose:
print('Finding tests in %s' % name)
# If we've already processed this object, then ignore it.
if id(obj) in seen:
return
seen[id(obj)] = 1
# Make sure we don't run doctests for classes outside of sympy, such
# as in numpy or scipy.
if inspect.isclass(obj):
if obj.__module__.split('.')[0] != 'sympy':
return
# Find a test for this object, and add it to the list of tests.
test = self._get_test(obj, name, module, globs, source_lines)
if test is not None:
tests.append(test)
if not self._recurse:
return
# Look for tests in a module's contained objects.
if inspect.ismodule(obj):
for rawname, val in obj.__dict__.items():
# Recurse to functions & classes.
if inspect.isfunction(val) or inspect.isclass(val):
# Make sure we don't run doctests functions or classes
# from different modules
if val.__module__ != module.__name__:
continue
assert self._from_module(module, val), \
"%s is not in module %s (rawname %s)" % (val, module, rawname)
try:
valname = '%s.%s' % (name, rawname)
self._find(tests, val, valname, module,
source_lines, globs, seen)
except KeyboardInterrupt:
raise
# Look for tests in a module's __test__ dictionary.
for valname, val in getattr(obj, '__test__', {}).items():
if not isinstance(valname, string_types):
raise ValueError("SymPyDocTestFinder.find: __test__ keys "
"must be strings: %r" %
(type(valname),))
if not (inspect.isfunction(val) or inspect.isclass(val) or
inspect.ismethod(val) or inspect.ismodule(val) or
isinstance(val, string_types)):
raise ValueError("SymPyDocTestFinder.find: __test__ values "
"must be strings, functions, methods, "
"classes, or modules: %r" %
(type(val),))
valname = '%s.__test__.%s' % (name, valname)
self._find(tests, val, valname, module, source_lines,
globs, seen)
# Look for tests in a class's contained objects.
if inspect.isclass(obj):
for valname, val in obj.__dict__.items():
# Special handling for staticmethod/classmethod.
if isinstance(val, staticmethod):
val = getattr(obj, valname)
if isinstance(val, classmethod):
val = getattr(obj, valname).__func__
# Recurse to methods, properties, and nested classes.
if (inspect.isfunction(val) or
inspect.isclass(val) or
isinstance(val, property)):
# Make sure we don't run doctests functions or classes
# from different modules
if isinstance(val, property):
if hasattr(val.fget, '__module__'):
if val.fget.__module__ != module.__name__:
continue
else:
if val.__module__ != module.__name__:
continue
assert self._from_module(module, val), \
"%s is not in module %s (valname %s)" % (
val, module, valname)
valname = '%s.%s' % (name, valname)
self._find(tests, val, valname, module, source_lines,
globs, seen)
def _get_test(self, obj, name, module, globs, source_lines):
"""
Return a DocTest for the given object, if it defines a docstring;
otherwise, return None.
"""
lineno = None
# Extract the object's docstring. If it doesn't have one,
# then return None (no test for this object).
if isinstance(obj, string_types):
# obj is a string in the case for objects in the polys package.
# Note that source_lines is a binary string (compiled polys
# modules), which can't be handled by _find_lineno so determine
# the line number here.
docstring = obj
matches = re.findall("line \d+", name)
assert len(matches) == 1, \
"string '%s' does not contain lineno " % name
# NOTE: this is not the exact linenumber but its better than no
# lineno ;)
lineno = int(matches[0][5:])
else:
try:
if obj.__doc__ is None:
docstring = ''
else:
docstring = obj.__doc__
if not isinstance(docstring, string_types):
docstring = str(docstring)
except (TypeError, AttributeError):
docstring = ''
# Don't bother if the docstring is empty.
if self._exclude_empty and not docstring:
return None
# check that properties have a docstring because _find_lineno
# assumes it
if isinstance(obj, property):
if obj.fget.__doc__ is None:
return None
# Find the docstring's location in the file.
if lineno is None:
# handling of properties is not implemented in _find_lineno so do
# it here
if hasattr(obj, 'func_closure') and obj.func_closure is not None:
tobj = obj.func_closure[0].cell_contents
elif isinstance(obj, property):
tobj = obj.fget
else:
tobj = obj
lineno = self._find_lineno(tobj, source_lines)
if lineno is None:
return None
# Return a DocTest for this object.
if module is None:
filename = None
else:
filename = getattr(module, '__file__', module.__name__)
if filename[-4:] in (".pyc", ".pyo"):
filename = filename[:-1]
if hasattr(obj, '_doctest_depends_on'):
globs['_doctest_depends_on'] = obj._doctest_depends_on
else:
globs['_doctest_depends_on'] = {}
return self._parser.get_doctest(docstring, globs, name,
filename, lineno)
class SymPyDocTestRunner(DocTestRunner):
"""
A class used to run DocTest test cases, and accumulate statistics.
The ``run`` method is used to process a single DocTest case. It
returns a tuple ``(f, t)``, where ``t`` is the number of test cases
tried, and ``f`` is the number of test cases that failed.
Modified from the doctest version to not reset the sys.displayhook (see
issue 5140).
See the docstring of the original DocTestRunner for more information.
"""
def run(self, test, compileflags=None, out=None, clear_globs=True):
"""
Run the examples in ``test``, and display the results using the
writer function ``out``.
The examples are run in the namespace ``test.globs``. If
``clear_globs`` is true (the default), then this namespace will
be cleared after the test runs, to help with garbage
collection. If you would like to examine the namespace after
the test completes, then use ``clear_globs=False``.
``compileflags`` gives the set of flags that should be used by
the Python compiler when running the examples. If not
specified, then it will default to the set of future-import
flags that apply to ``globs``.
The output of each example is checked using
``SymPyDocTestRunner.check_output``, and the results are
formatted by the ``SymPyDocTestRunner.report_*`` methods.
"""
self.test = test
if compileflags is None:
compileflags = pdoctest._extract_future_flags(test.globs)
save_stdout = sys.stdout
if out is None:
out = save_stdout.write
sys.stdout = self._fakeout
# Patch pdb.set_trace to restore sys.stdout during interactive
# debugging (so it's not still redirected to self._fakeout).
# Note that the interactive output will go to *our*
# save_stdout, even if that's not the real sys.stdout; this
# allows us to write test cases for the set_trace behavior.
save_set_trace = pdb.set_trace
self.debugger = pdoctest._OutputRedirectingPdb(save_stdout)
self.debugger.reset()
pdb.set_trace = self.debugger.set_trace
# Patch linecache.getlines, so we can see the example's source
# when we're inside the debugger.
self.save_linecache_getlines = pdoctest.linecache.getlines
linecache.getlines = self.__patched_linecache_getlines
try:
test.globs['print_function'] = print_function
return self.__run(test, compileflags, out)
finally:
sys.stdout = save_stdout
pdb.set_trace = save_set_trace
linecache.getlines = self.save_linecache_getlines
if clear_globs:
test.globs.clear()
# We have to override the name mangled methods.
SymPyDocTestRunner._SymPyDocTestRunner__patched_linecache_getlines = \
DocTestRunner._DocTestRunner__patched_linecache_getlines
SymPyDocTestRunner._SymPyDocTestRunner__run = DocTestRunner._DocTestRunner__run
SymPyDocTestRunner._SymPyDocTestRunner__record_outcome = \
DocTestRunner._DocTestRunner__record_outcome
class SymPyOutputChecker(pdoctest.OutputChecker):
"""
Compared to the OutputChecker from the stdlib our OutputChecker class
supports numerical comparison of floats occuring in the output of the
doctest examples
"""
def __init__(self):
# NOTE OutputChecker is an old-style class with no __init__ method,
# so we can't call the base class version of __init__ here
got_floats = r'(\d+\.\d*|\.\d+)'
# floats in the 'want' string may contain ellipses
want_floats = got_floats + r'(\.{3})?'
front_sep = r'\s|\+|\-|\*|,'
back_sep = front_sep + r'|j|e'
fbeg = r'^%s(?=%s|$)' % (got_floats, back_sep)
fmidend = r'(?<=%s)%s(?=%s|$)' % (front_sep, got_floats, back_sep)
self.num_got_rgx = re.compile(r'(%s|%s)' %(fbeg, fmidend))
fbeg = r'^%s(?=%s|$)' % (want_floats, back_sep)
fmidend = r'(?<=%s)%s(?=%s|$)' % (front_sep, want_floats, back_sep)
self.num_want_rgx = re.compile(r'(%s|%s)' %(fbeg, fmidend))
def check_output(self, want, got, optionflags):
"""
Return True iff the actual output from an example (`got`)
matches the expected output (`want`). These strings are
always considered to match if they are identical; but
depending on what option flags the test runner is using,
several non-exact match types are also possible. See the
documentation for `TestRunner` for more information about
option flags.
"""
# Handle the common case first, for efficiency:
# if they're string-identical, always return true.
if got == want:
return True
# TODO parse integers as well ?
# Parse floats and compare them. If some of the parsed floats contain
# ellipses, skip the comparison.
matches = self.num_got_rgx.finditer(got)
numbers_got = [match.group(1) for match in matches] # list of strs
matches = self.num_want_rgx.finditer(want)
numbers_want = [match.group(1) for match in matches] # list of strs
if len(numbers_got) != len(numbers_want):
return False
if len(numbers_got) > 0:
nw_ = []
for ng, nw in zip(numbers_got, numbers_want):
if '...' in nw:
nw_.append(ng)
continue
else:
nw_.append(nw)
if abs(float(ng)-float(nw)) > 1e-5:
return False
got = self.num_got_rgx.sub(r'%s', got)
got = got % tuple(nw_)
# <BLANKLINE> can be used as a special sequence to signify a
# blank line, unless the DONT_ACCEPT_BLANKLINE flag is used.
if not (optionflags & pdoctest.DONT_ACCEPT_BLANKLINE):
# Replace <BLANKLINE> in want with a blank line.
want = re.sub('(?m)^%s\s*?$' % re.escape(pdoctest.BLANKLINE_MARKER),
'', want)
# If a line in got contains only spaces, then remove the
# spaces.
got = re.sub('(?m)^\s*?$', '', got)
if got == want:
return True
# This flag causes doctest to ignore any differences in the
# contents of whitespace strings. Note that this can be used
# in conjunction with the ELLIPSIS flag.
if optionflags & pdoctest.NORMALIZE_WHITESPACE:
got = ' '.join(got.split())
want = ' '.join(want.split())
if got == want:
return True
# The ELLIPSIS flag says to let the sequence "..." in `want`
# match any substring in `got`.
if optionflags & pdoctest.ELLIPSIS:
if pdoctest._ellipsis_match(want, got):
return True
# We didn't find any match; return false.
return False
class Reporter(object):
"""
Parent class for all reporters.
"""
pass
class PyTestReporter(Reporter):
"""
Py.test like reporter. Should produce output identical to py.test.
"""
def __init__(self, verbose=False, tb="short", colors=True,
force_colors=False, split=None):
self._verbose = verbose
self._tb_style = tb
self._colors = colors
self._force_colors = force_colors
self._xfailed = 0
self._xpassed = []
self._failed = []
self._failed_doctest = []
self._passed = 0
self._skipped = 0
self._exceptions = []
self._terminal_width = None
self._default_width = 80
self._split = split
# this tracks the x-position of the cursor (useful for positioning
# things on the screen), without the need for any readline library:
self._write_pos = 0
self._line_wrap = False
def root_dir(self, dir):
self._root_dir = dir
@property
def terminal_width(self):
if self._terminal_width is not None:
return self._terminal_width
def findout_terminal_width():
if sys.platform == "win32":
# Windows support is based on:
#
# http://code.activestate.com/recipes/
# 440694-determine-size-of-console-window-on-windows/
from ctypes import windll, create_string_buffer
h = windll.kernel32.GetStdHandle(-12)
csbi = create_string_buffer(22)
res = windll.kernel32.GetConsoleScreenBufferInfo(h, csbi)
if res:
import struct
(_, _, _, _, _, left, _, right, _, _, _) = \
struct.unpack("hhhhHhhhhhh", csbi.raw)
return right - left
else:
return self._default_width
if hasattr(sys.stdout, 'isatty') and not sys.stdout.isatty():
return self._default_width # leave PIPEs alone
try:
process = subprocess.Popen(['stty', '-a'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout = process.stdout.read()
if PY3:
stdout = stdout.decode("utf-8")
except (OSError, IOError):
pass
else:
# We support the following output formats from stty:
#
# 1) Linux -> columns 80
# 2) OS X -> 80 columns
# 3) Solaris -> columns = 80
re_linux = r"columns\s+(?P<columns>\d+);"
re_osx = r"(?P<columns>\d+)\s*columns;"
re_solaris = r"columns\s+=\s+(?P<columns>\d+);"
for regex in (re_linux, re_osx, re_solaris):
match = re.search(regex, stdout)
if match is not None:
columns = match.group('columns')
try:
width = int(columns)
except ValueError:
pass
if width != 0:
return width
return self._default_width
width = findout_terminal_width()
self._terminal_width = width
return width
def write(self, text, color="", align="left", width=None,
force_colors=False):
"""
Prints a text on the screen.
It uses sys.stdout.write(), so no readline library is necessary.
Parameters
==========
color : choose from the colors below, "" means default color
align : "left"/"right", "left" is a normal print, "right" is aligned on
the right-hand side of the screen, filled with spaces if
necessary
width : the screen width
"""
color_templates = (
("Black", "0;30"),
("Red", "0;31"),
("Green", "0;32"),
("Brown", "0;33"),
("Blue", "0;34"),
("Purple", "0;35"),
("Cyan", "0;36"),
("LightGray", "0;37"),
("DarkGray", "1;30"),
("LightRed", "1;31"),
("LightGreen", "1;32"),
("Yellow", "1;33"),
("LightBlue", "1;34"),
("LightPurple", "1;35"),
("LightCyan", "1;36"),
("White", "1;37"),
)
colors = {}
for name, value in color_templates:
colors[name] = value
c_normal = '\033[0m'
c_color = '\033[%sm'
if width is None:
width = self.terminal_width
if align == "right":
if self._write_pos + len(text) > width:
# we don't fit on the current line, create a new line
self.write("\n")
self.write(" "*(width - self._write_pos - len(text)))
if not self._force_colors and hasattr(sys.stdout, 'isatty') and not \
sys.stdout.isatty():
# the stdout is not a terminal, this for example happens if the
# output is piped to less, e.g. "bin/test | less". In this case,
# the terminal control sequences would be printed verbatim, so
# don't use any colors.
color = ""
elif sys.platform == "win32":
# Windows consoles don't support ANSI escape sequences
color = ""
elif not self._colors:
color = ""
if self._line_wrap:
if text[0] != "\n":
sys.stdout.write("\n")
# Avoid UnicodeEncodeError when printing out test failures
if PY3 and IS_WINDOWS:
text = text.encode('raw_unicode_escape').decode('utf8', 'ignore')
elif PY3 and not sys.stdout.encoding.lower().startswith('utf'):
text = text.encode(sys.stdout.encoding, 'backslashreplace'
).decode(sys.stdout.encoding)
if color == "":
sys.stdout.write(text)
else:
sys.stdout.write("%s%s%s" %
(c_color % colors[color], text, c_normal))
sys.stdout.flush()
l = text.rfind("\n")
if l == -1:
self._write_pos += len(text)
else:
self._write_pos = len(text) - l - 1
self._line_wrap = self._write_pos >= width
self._write_pos %= width
def write_center(self, text, delim="="):
width = self.terminal_width
if text != "":
text = " %s " % text
idx = (width - len(text)) // 2
t = delim*idx + text + delim*(width - idx - len(text))
self.write(t + "\n")
def write_exception(self, e, val, tb):
t = traceback.extract_tb(tb)
# remove the first item, as that is always runtests.py
t = t[1:]
t = traceback.format_list(t)
self.write("".join(t))
t = traceback.format_exception_only(e, val)
self.write("".join(t))
def start(self, seed=None, msg="test process starts"):
self.write_center(msg)
executable = sys.executable
v = tuple(sys.version_info)
python_version = "%s.%s.%s-%s-%s" % v
implementation = platform.python_implementation()
if implementation == 'PyPy':
implementation += " %s.%s.%s-%s-%s" % sys.pypy_version_info
self.write("executable: %s (%s) [%s]\n" %
(executable, python_version, implementation))
from .misc import ARCH
self.write("architecture: %s\n" % ARCH)
from sympy.core.cache import USE_CACHE
self.write("cache: %s\n" % USE_CACHE)
from sympy.core.compatibility import GROUND_TYPES, HAS_GMPY
version = ''
if GROUND_TYPES =='gmpy':
if HAS_GMPY == 1:
import gmpy
elif HAS_GMPY == 2:
import gmpy2 as gmpy
version = gmpy.version()
self.write("ground types: %s %s\n" % (GROUND_TYPES, version))
if seed is not None:
self.write("random seed: %d\n" % seed)
from .misc import HASH_RANDOMIZATION
self.write("hash randomization: ")
hash_seed = os.getenv("PYTHONHASHSEED") or '0'
if HASH_RANDOMIZATION and (hash_seed == "random" or int(hash_seed)):
self.write("on (PYTHONHASHSEED=%s)\n" % hash_seed)
else:
self.write("off\n")
if self._split:
self.write("split: %s\n" % self._split)
self.write('\n')
self._t_start = clock()
def finish(self):
self._t_end = clock()
self.write("\n")
global text, linelen
text = "tests finished: %d passed, " % self._passed
linelen = len(text)
def add_text(mytext):
global text, linelen
"""Break new text if too long."""
if linelen + len(mytext) > self.terminal_width:
text += '\n'
linelen = 0
text += mytext
linelen += len(mytext)
if len(self._failed) > 0:
add_text("%d failed, " % len(self._failed))
if len(self._failed_doctest) > 0:
add_text("%d failed, " % len(self._failed_doctest))
if self._skipped > 0:
add_text("%d skipped, " % self._skipped)
if self._xfailed > 0:
add_text("%d expected to fail, " % self._xfailed)
if len(self._xpassed) > 0:
add_text("%d expected to fail but passed, " % len(self._xpassed))
if len(self._exceptions) > 0:
add_text("%d exceptions, " % len(self._exceptions))
add_text("in %.2f seconds" % (self._t_end - self._t_start))
if len(self._xpassed) > 0:
self.write_center("xpassed tests", "_")
for e in self._xpassed:
self.write("%s: %s\n" % (e[0], e[1]))
self.write("\n")
if self._tb_style != "no" and len(self._exceptions) > 0:
for e in self._exceptions:
filename, f, (t, val, tb) = e
self.write_center("", "_")
if f is None:
s = "%s" % filename
else:
s = "%s:%s" % (filename, f.__name__)
self.write_center(s, "_")
self.write_exception(t, val, tb)
self.write("\n")
if self._tb_style != "no" and len(self._failed) > 0:
for e in self._failed:
filename, f, (t, val, tb) = e
self.write_center("", "_")
self.write_center("%s:%s" % (filename, f.__name__), "_")
self.write_exception(t, val, tb)
self.write("\n")
if self._tb_style != "no" and len(self._failed_doctest) > 0:
for e in self._failed_doctest:
filename, msg = e
self.write_center("", "_")
self.write_center("%s" % filename, "_")
self.write(msg)
self.write("\n")
self.write_center(text)
ok = len(self._failed) == 0 and len(self._exceptions) == 0 and \
len(self._failed_doctest) == 0
if not ok:
self.write("DO *NOT* COMMIT!\n")
return ok
def entering_filename(self, filename, n):
rel_name = filename[len(self._root_dir) + 1:]
self._active_file = rel_name
self._active_file_error = False
self.write(rel_name)
self.write("[%d] " % n)
def leaving_filename(self):
self.write(" ")
if self._active_file_error:
self.write("[FAIL]", "Red", align="right")
else:
self.write("[OK]", "Green", align="right")
self.write("\n")
if self._verbose:
self.write("\n")
def entering_test(self, f):
self._active_f = f
if self._verbose:
self.write("\n" + f.__name__ + " ")
def test_xfail(self):
self._xfailed += 1
self.write("f", "Green")
def test_xpass(self, v):
message = str(v)
self._xpassed.append((self._active_file, message))
self.write("X", "Green")
def test_fail(self, exc_info):
self._failed.append((self._active_file, self._active_f, exc_info))
self.write("F", "Red")
self._active_file_error = True
def doctest_fail(self, name, error_msg):
# the first line contains "******", remove it:
error_msg = "\n".join(error_msg.split("\n")[1:])
self._failed_doctest.append((name, error_msg))
self.write("F", "Red")
self._active_file_error = True
def test_pass(self, char="."):
self._passed += 1
if self._verbose:
self.write("ok", "Green")
else:
self.write(char, "Green")
def test_skip(self, v=None):
char = "s"
self._skipped += 1
if v is not None:
message = str(v)
if message == "KeyboardInterrupt":
char = "K"
elif message == "Timeout":
char = "T"
elif message == "Slow":
char = "w"
self.write(char, "Blue")
if self._verbose:
self.write(" - ", "Blue")
if v is not None:
self.write(message, "Blue")
def test_exception(self, exc_info):
self._exceptions.append((self._active_file, self._active_f, exc_info))
self.write("E", "Red")
self._active_file_error = True
def import_error(self, filename, exc_info):
self._exceptions.append((filename, None, exc_info))
rel_name = filename[len(self._root_dir) + 1:]
self.write(rel_name)
self.write("[?] Failed to import", "Red")
self.write(" ")
self.write("[FAIL]", "Red", align="right")
self.write("\n")
sympy_dir = get_sympy_dir()
| bsd-3-clause | -6,213,852,699,316,213,000 | 35.779124 | 118 | 0.557635 | false |
40223137/2015abc | static/Brython3.1.0-20150301-090019/Lib/xml/etree/ElementPath.py | 785 | 9477 | #
# ElementTree
# $Id: ElementPath.py 3375 2008-02-13 08:05:08Z fredrik $
#
# limited xpath support for element trees
#
# history:
# 2003-05-23 fl created
# 2003-05-28 fl added support for // etc
# 2003-08-27 fl fixed parsing of periods in element names
# 2007-09-10 fl new selection engine
# 2007-09-12 fl fixed parent selector
# 2007-09-13 fl added iterfind; changed findall to return a list
# 2007-11-30 fl added namespaces support
# 2009-10-30 fl added child element value filter
#
# Copyright (c) 2003-2009 by Fredrik Lundh. All rights reserved.
#
# fredrik@pythonware.com
# http://www.pythonware.com
#
# --------------------------------------------------------------------
# The ElementTree toolkit is
#
# Copyright (c) 1999-2009 by Fredrik Lundh
#
# By obtaining, using, and/or copying this software and/or its
# associated documentation, you agree that you have read, understood,
# and will comply with the following terms and conditions:
#
# Permission to use, copy, modify, and distribute this software and
# its associated documentation for any purpose and without fee is
# hereby granted, provided that the above copyright notice appears in
# all copies, and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of
# Secret Labs AB or the author not be used in advertising or publicity
# pertaining to distribution of the software without specific, written
# prior permission.
#
# SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD
# TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANT-
# ABILITY AND FITNESS. IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR
# BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY
# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
# OF THIS SOFTWARE.
# --------------------------------------------------------------------
# Licensed to PSF under a Contributor Agreement.
# See http://www.python.org/psf/license for licensing details.
##
# Implementation module for XPath support. There's usually no reason
# to import this module directly; the <b>ElementTree</b> does this for
# you, if needed.
##
import re
xpath_tokenizer_re = re.compile(
"("
"'[^']*'|\"[^\"]*\"|"
"::|"
"//?|"
"\.\.|"
"\(\)|"
"[/.*:\[\]\(\)@=])|"
"((?:\{[^}]+\})?[^/\[\]\(\)@=\s]+)|"
"\s+"
)
def xpath_tokenizer(pattern, namespaces=None):
for token in xpath_tokenizer_re.findall(pattern):
tag = token[1]
if tag and tag[0] != "{" and ":" in tag:
try:
prefix, uri = tag.split(":", 1)
if not namespaces:
raise KeyError
yield token[0], "{%s}%s" % (namespaces[prefix], uri)
except KeyError:
raise SyntaxError("prefix %r not found in prefix map" % prefix)
else:
yield token
def get_parent_map(context):
parent_map = context.parent_map
if parent_map is None:
context.parent_map = parent_map = {}
for p in context.root.iter():
for e in p:
parent_map[e] = p
return parent_map
def prepare_child(next, token):
tag = token[1]
def select(context, result):
for elem in result:
for e in elem:
if e.tag == tag:
yield e
return select
def prepare_star(next, token):
def select(context, result):
for elem in result:
for e in elem:
yield e
return select
def prepare_self(next, token):
def select(context, result):
for elem in result:
yield elem
return select
def prepare_descendant(next, token):
token = next()
if token[0] == "*":
tag = "*"
elif not token[0]:
tag = token[1]
else:
raise SyntaxError("invalid descendant")
def select(context, result):
for elem in result:
for e in elem.iter(tag):
if e is not elem:
yield e
return select
def prepare_parent(next, token):
def select(context, result):
# FIXME: raise error if .. is applied at toplevel?
parent_map = get_parent_map(context)
result_map = {}
for elem in result:
if elem in parent_map:
parent = parent_map[elem]
if parent not in result_map:
result_map[parent] = None
yield parent
return select
def prepare_predicate(next, token):
# FIXME: replace with real parser!!! refs:
# http://effbot.org/zone/simple-iterator-parser.htm
# http://javascript.crockford.com/tdop/tdop.html
signature = []
predicate = []
while 1:
token = next()
if token[0] == "]":
break
if token[0] and token[0][:1] in "'\"":
token = "'", token[0][1:-1]
signature.append(token[0] or "-")
predicate.append(token[1])
signature = "".join(signature)
# use signature to determine predicate type
if signature == "@-":
# [@attribute] predicate
key = predicate[1]
def select(context, result):
for elem in result:
if elem.get(key) is not None:
yield elem
return select
if signature == "@-='":
# [@attribute='value']
key = predicate[1]
value = predicate[-1]
def select(context, result):
for elem in result:
if elem.get(key) == value:
yield elem
return select
if signature == "-" and not re.match("\d+$", predicate[0]):
# [tag]
tag = predicate[0]
def select(context, result):
for elem in result:
if elem.find(tag) is not None:
yield elem
return select
if signature == "-='" and not re.match("\d+$", predicate[0]):
# [tag='value']
tag = predicate[0]
value = predicate[-1]
def select(context, result):
for elem in result:
for e in elem.findall(tag):
if "".join(e.itertext()) == value:
yield elem
break
return select
if signature == "-" or signature == "-()" or signature == "-()-":
# [index] or [last()] or [last()-index]
if signature == "-":
index = int(predicate[0]) - 1
else:
if predicate[0] != "last":
raise SyntaxError("unsupported function")
if signature == "-()-":
try:
index = int(predicate[2]) - 1
except ValueError:
raise SyntaxError("unsupported expression")
else:
index = -1
def select(context, result):
parent_map = get_parent_map(context)
for elem in result:
try:
parent = parent_map[elem]
# FIXME: what if the selector is "*" ?
elems = list(parent.findall(elem.tag))
if elems[index] is elem:
yield elem
except (IndexError, KeyError):
pass
return select
raise SyntaxError("invalid predicate")
ops = {
"": prepare_child,
"*": prepare_star,
".": prepare_self,
"..": prepare_parent,
"//": prepare_descendant,
"[": prepare_predicate,
}
_cache = {}
class _SelectorContext:
parent_map = None
def __init__(self, root):
self.root = root
# --------------------------------------------------------------------
##
# Generate all matching objects.
def iterfind(elem, path, namespaces=None):
# compile selector pattern
if path[-1:] == "/":
path = path + "*" # implicit all (FIXME: keep this?)
try:
selector = _cache[path]
except KeyError:
if len(_cache) > 100:
_cache.clear()
if path[:1] == "/":
raise SyntaxError("cannot use absolute path on element")
next = iter(xpath_tokenizer(path, namespaces)).__next__
token = next()
selector = []
while 1:
try:
selector.append(ops[token[0]](next, token))
except StopIteration:
raise SyntaxError("invalid path")
try:
token = next()
if token[0] == "/":
token = next()
except StopIteration:
break
_cache[path] = selector
# execute selector pattern
result = [elem]
context = _SelectorContext(elem)
for select in selector:
result = select(context, result)
return result
##
# Find first matching object.
def find(elem, path, namespaces=None):
try:
return next(iterfind(elem, path, namespaces))
except StopIteration:
return None
##
# Find all matching objects.
def findall(elem, path, namespaces=None):
return list(iterfind(elem, path, namespaces))
##
# Find text for first matching object.
def findtext(elem, path, default=None, namespaces=None):
try:
elem = next(iterfind(elem, path, namespaces))
return elem.text or ""
except StopIteration:
return default
| gpl-3.0 | -4,461,106,553,009,720,300 | 30.277228 | 79 | 0.551124 | false |
felipecorrea/python-pocket | examples/save_to_pocket.py | 1 | 1208 | #!/usr/bin/env python
'''Add an Item to Pocket'''
__author__ = 'Felipe Borges'
import sys
sys.path.append("..")
import getopt
import pocket
USAGE = '''Usage: save_to_pocket [options] url
This script adds an Item to Pocket.
Options:
-h --help: print this help
--consumer_key : the Pocket API consumer key
--access_token : the user's Pocket Access Token
'''
def print_usage_and_exit():
print USAGE
sys.exit(2)
def main():
try:
shortflags = 'h'
longflags = ['help', 'consumer_key=', 'access_token=']
opts, args = getopt.gnu_getopt(sys.argv[1:], shortflags, longflags)
except getopt.GetoptError:
print_usage_and_exit()
consumer_key = None
access_token = None
for o, a in opts:
if o in ('-h', '--help'):
print_usage_and_exit()
if o in ('--consumer_key'):
consumer_key = a
if o in ('--access_token'):
access_token = a
url = ' '.join(args)
if not url or not consumer_key or not access_token:
print_usage_and_exit()
api = pocket.Api(consumer_key = consumer_key, access_token = access_token)
try:
item = api.add(url)
print 'Item \'%s\' added successfuly!' % item.normal_url
except e:
print e
sys.exit(2)
if __name__ == "__main__":
main()
| apache-2.0 | -106,114,501,525,025,570 | 18.803279 | 75 | 0.639901 | false |
eneldoserrata/marcos_openerp | addons/report_aeroo/barcode/barcode.py | 19 | 2368 | ##############################################################################
#
# Copyright (c) 2008-2011 Alistek Ltd (http://www.alistek.com) All Rights Reserved.
# General contacts <info@alistek.com>
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsability of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# garantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# This module is GPLv3 or newer and incompatible
# with OpenERP SA "AGPL + Private Use License"!
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
##############################################################################
from code128 import get_code
from code39 import create_c39
from EANBarCode import EanBarCode
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
def make_barcode(code, code_type='ean13', rotate=None, height=50, xw=1):
if code:
if code_type.lower()=='ean13':
bar=EanBarCode()
im = bar.getImage(code,height)
elif code_type.lower()=='code128':
im = get_code(code, xw, height)
elif code_type.lower()=='code39':
im = create_c39(height, xw, code)
else:
return StringIO(), 'image/png'
tf = StringIO()
try:
if rotate!=None:
im=im.rotate(int(rotate))
except Exception, e:
pass
im.save(tf, 'png')
size_x = str(im.size[0]/96.0)+'in'
size_y = str(im.size[1]/96.0)+'in'
return tf, 'image/png', size_x, size_y
| agpl-3.0 | -5,927,766,222,914,648,000 | 37.193548 | 83 | 0.646537 | false |
lnfamous/Kernel_CyanogenMod11_Pico | tools/perf/scripts/python/netdev-times.py | 11271 | 15048 | # Display a process of packets and processed time.
# It helps us to investigate networking or network device.
#
# options
# tx: show only tx chart
# rx: show only rx chart
# dev=: show only thing related to specified device
# debug: work with debug mode. It shows buffer status.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
all_event_list = []; # insert all tracepoint event related with this script
irq_dic = {}; # key is cpu and value is a list which stacks irqs
# which raise NET_RX softirq
net_rx_dic = {}; # key is cpu and value include time of NET_RX softirq-entry
# and a list which stacks receive
receive_hunk_list = []; # a list which include a sequence of receive events
rx_skb_list = []; # received packet list for matching
# skb_copy_datagram_iovec
buffer_budget = 65536; # the budget of rx_skb_list, tx_queue_list and
# tx_xmit_list
of_count_rx_skb_list = 0; # overflow count
tx_queue_list = []; # list of packets which pass through dev_queue_xmit
of_count_tx_queue_list = 0; # overflow count
tx_xmit_list = []; # list of packets which pass through dev_hard_start_xmit
of_count_tx_xmit_list = 0; # overflow count
tx_free_list = []; # list of packets which is freed
# options
show_tx = 0;
show_rx = 0;
dev = 0; # store a name of device specified by option "dev="
debug = 0;
# indices of event_info tuple
EINFO_IDX_NAME= 0
EINFO_IDX_CONTEXT=1
EINFO_IDX_CPU= 2
EINFO_IDX_TIME= 3
EINFO_IDX_PID= 4
EINFO_IDX_COMM= 5
# Calculate a time interval(msec) from src(nsec) to dst(nsec)
def diff_msec(src, dst):
return (dst - src) / 1000000.0
# Display a process of transmitting a packet
def print_transmit(hunk):
if dev != 0 and hunk['dev'].find(dev) < 0:
return
print "%7s %5d %6d.%06dsec %12.3fmsec %12.3fmsec" % \
(hunk['dev'], hunk['len'],
nsecs_secs(hunk['queue_t']),
nsecs_nsecs(hunk['queue_t'])/1000,
diff_msec(hunk['queue_t'], hunk['xmit_t']),
diff_msec(hunk['xmit_t'], hunk['free_t']))
# Format for displaying rx packet processing
PF_IRQ_ENTRY= " irq_entry(+%.3fmsec irq=%d:%s)"
PF_SOFT_ENTRY=" softirq_entry(+%.3fmsec)"
PF_NAPI_POLL= " napi_poll_exit(+%.3fmsec %s)"
PF_JOINT= " |"
PF_WJOINT= " | |"
PF_NET_RECV= " |---netif_receive_skb(+%.3fmsec skb=%x len=%d)"
PF_NET_RX= " |---netif_rx(+%.3fmsec skb=%x)"
PF_CPY_DGRAM= " | skb_copy_datagram_iovec(+%.3fmsec %d:%s)"
PF_KFREE_SKB= " | kfree_skb(+%.3fmsec location=%x)"
PF_CONS_SKB= " | consume_skb(+%.3fmsec)"
# Display a process of received packets and interrputs associated with
# a NET_RX softirq
def print_receive(hunk):
show_hunk = 0
irq_list = hunk['irq_list']
cpu = irq_list[0]['cpu']
base_t = irq_list[0]['irq_ent_t']
# check if this hunk should be showed
if dev != 0:
for i in range(len(irq_list)):
if irq_list[i]['name'].find(dev) >= 0:
show_hunk = 1
break
else:
show_hunk = 1
if show_hunk == 0:
return
print "%d.%06dsec cpu=%d" % \
(nsecs_secs(base_t), nsecs_nsecs(base_t)/1000, cpu)
for i in range(len(irq_list)):
print PF_IRQ_ENTRY % \
(diff_msec(base_t, irq_list[i]['irq_ent_t']),
irq_list[i]['irq'], irq_list[i]['name'])
print PF_JOINT
irq_event_list = irq_list[i]['event_list']
for j in range(len(irq_event_list)):
irq_event = irq_event_list[j]
if irq_event['event'] == 'netif_rx':
print PF_NET_RX % \
(diff_msec(base_t, irq_event['time']),
irq_event['skbaddr'])
print PF_JOINT
print PF_SOFT_ENTRY % \
diff_msec(base_t, hunk['sirq_ent_t'])
print PF_JOINT
event_list = hunk['event_list']
for i in range(len(event_list)):
event = event_list[i]
if event['event_name'] == 'napi_poll':
print PF_NAPI_POLL % \
(diff_msec(base_t, event['event_t']), event['dev'])
if i == len(event_list) - 1:
print ""
else:
print PF_JOINT
else:
print PF_NET_RECV % \
(diff_msec(base_t, event['event_t']), event['skbaddr'],
event['len'])
if 'comm' in event.keys():
print PF_WJOINT
print PF_CPY_DGRAM % \
(diff_msec(base_t, event['comm_t']),
event['pid'], event['comm'])
elif 'handle' in event.keys():
print PF_WJOINT
if event['handle'] == "kfree_skb":
print PF_KFREE_SKB % \
(diff_msec(base_t,
event['comm_t']),
event['location'])
elif event['handle'] == "consume_skb":
print PF_CONS_SKB % \
diff_msec(base_t,
event['comm_t'])
print PF_JOINT
def trace_begin():
global show_tx
global show_rx
global dev
global debug
for i in range(len(sys.argv)):
if i == 0:
continue
arg = sys.argv[i]
if arg == 'tx':
show_tx = 1
elif arg =='rx':
show_rx = 1
elif arg.find('dev=',0, 4) >= 0:
dev = arg[4:]
elif arg == 'debug':
debug = 1
if show_tx == 0 and show_rx == 0:
show_tx = 1
show_rx = 1
def trace_end():
# order all events in time
all_event_list.sort(lambda a,b :cmp(a[EINFO_IDX_TIME],
b[EINFO_IDX_TIME]))
# process all events
for i in range(len(all_event_list)):
event_info = all_event_list[i]
name = event_info[EINFO_IDX_NAME]
if name == 'irq__softirq_exit':
handle_irq_softirq_exit(event_info)
elif name == 'irq__softirq_entry':
handle_irq_softirq_entry(event_info)
elif name == 'irq__softirq_raise':
handle_irq_softirq_raise(event_info)
elif name == 'irq__irq_handler_entry':
handle_irq_handler_entry(event_info)
elif name == 'irq__irq_handler_exit':
handle_irq_handler_exit(event_info)
elif name == 'napi__napi_poll':
handle_napi_poll(event_info)
elif name == 'net__netif_receive_skb':
handle_netif_receive_skb(event_info)
elif name == 'net__netif_rx':
handle_netif_rx(event_info)
elif name == 'skb__skb_copy_datagram_iovec':
handle_skb_copy_datagram_iovec(event_info)
elif name == 'net__net_dev_queue':
handle_net_dev_queue(event_info)
elif name == 'net__net_dev_xmit':
handle_net_dev_xmit(event_info)
elif name == 'skb__kfree_skb':
handle_kfree_skb(event_info)
elif name == 'skb__consume_skb':
handle_consume_skb(event_info)
# display receive hunks
if show_rx:
for i in range(len(receive_hunk_list)):
print_receive(receive_hunk_list[i])
# display transmit hunks
if show_tx:
print " dev len Qdisc " \
" netdevice free"
for i in range(len(tx_free_list)):
print_transmit(tx_free_list[i])
if debug:
print "debug buffer status"
print "----------------------------"
print "xmit Qdisc:remain:%d overflow:%d" % \
(len(tx_queue_list), of_count_tx_queue_list)
print "xmit netdevice:remain:%d overflow:%d" % \
(len(tx_xmit_list), of_count_tx_xmit_list)
print "receive:remain:%d overflow:%d" % \
(len(rx_skb_list), of_count_rx_skb_list)
# called from perf, when it finds a correspoinding event
def irq__softirq_entry(name, context, cpu, sec, nsec, pid, comm, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__softirq_exit(name, context, cpu, sec, nsec, pid, comm, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__softirq_raise(name, context, cpu, sec, nsec, pid, comm, vec):
if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
return
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
all_event_list.append(event_info)
def irq__irq_handler_entry(name, context, cpu, sec, nsec, pid, comm,
irq, irq_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
irq, irq_name)
all_event_list.append(event_info)
def irq__irq_handler_exit(name, context, cpu, sec, nsec, pid, comm, irq, ret):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, irq, ret)
all_event_list.append(event_info)
def napi__napi_poll(name, context, cpu, sec, nsec, pid, comm, napi, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
napi, dev_name)
all_event_list.append(event_info)
def net__netif_receive_skb(name, context, cpu, sec, nsec, pid, comm, skbaddr,
skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__netif_rx(name, context, cpu, sec, nsec, pid, comm, skbaddr,
skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__net_dev_queue(name, context, cpu, sec, nsec, pid, comm,
skbaddr, skblen, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, dev_name)
all_event_list.append(event_info)
def net__net_dev_xmit(name, context, cpu, sec, nsec, pid, comm,
skbaddr, skblen, rc, dev_name):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen, rc ,dev_name)
all_event_list.append(event_info)
def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm,
skbaddr, protocol, location):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, protocol, location)
all_event_list.append(event_info)
def skb__consume_skb(name, context, cpu, sec, nsec, pid, comm, skbaddr):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr)
all_event_list.append(event_info)
def skb__skb_copy_datagram_iovec(name, context, cpu, sec, nsec, pid, comm,
skbaddr, skblen):
event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
skbaddr, skblen)
all_event_list.append(event_info)
def handle_irq_handler_entry(event_info):
(name, context, cpu, time, pid, comm, irq, irq_name) = event_info
if cpu not in irq_dic.keys():
irq_dic[cpu] = []
irq_record = {'irq':irq, 'name':irq_name, 'cpu':cpu, 'irq_ent_t':time}
irq_dic[cpu].append(irq_record)
def handle_irq_handler_exit(event_info):
(name, context, cpu, time, pid, comm, irq, ret) = event_info
if cpu not in irq_dic.keys():
return
irq_record = irq_dic[cpu].pop()
if irq != irq_record['irq']:
return
irq_record.update({'irq_ext_t':time})
# if an irq doesn't include NET_RX softirq, drop.
if 'event_list' in irq_record.keys():
irq_dic[cpu].append(irq_record)
def handle_irq_softirq_raise(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
if cpu not in irq_dic.keys() \
or len(irq_dic[cpu]) == 0:
return
irq_record = irq_dic[cpu].pop()
if 'event_list' in irq_record.keys():
irq_event_list = irq_record['event_list']
else:
irq_event_list = []
irq_event_list.append({'time':time, 'event':'sirq_raise'})
irq_record.update({'event_list':irq_event_list})
irq_dic[cpu].append(irq_record)
def handle_irq_softirq_entry(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
net_rx_dic[cpu] = {'sirq_ent_t':time, 'event_list':[]}
def handle_irq_softirq_exit(event_info):
(name, context, cpu, time, pid, comm, vec) = event_info
irq_list = []
event_list = 0
if cpu in irq_dic.keys():
irq_list = irq_dic[cpu]
del irq_dic[cpu]
if cpu in net_rx_dic.keys():
sirq_ent_t = net_rx_dic[cpu]['sirq_ent_t']
event_list = net_rx_dic[cpu]['event_list']
del net_rx_dic[cpu]
if irq_list == [] or event_list == 0:
return
rec_data = {'sirq_ent_t':sirq_ent_t, 'sirq_ext_t':time,
'irq_list':irq_list, 'event_list':event_list}
# merge information realted to a NET_RX softirq
receive_hunk_list.append(rec_data)
def handle_napi_poll(event_info):
(name, context, cpu, time, pid, comm, napi, dev_name) = event_info
if cpu in net_rx_dic.keys():
event_list = net_rx_dic[cpu]['event_list']
rec_data = {'event_name':'napi_poll',
'dev':dev_name, 'event_t':time}
event_list.append(rec_data)
def handle_netif_rx(event_info):
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
if cpu not in irq_dic.keys() \
or len(irq_dic[cpu]) == 0:
return
irq_record = irq_dic[cpu].pop()
if 'event_list' in irq_record.keys():
irq_event_list = irq_record['event_list']
else:
irq_event_list = []
irq_event_list.append({'time':time, 'event':'netif_rx',
'skbaddr':skbaddr, 'skblen':skblen, 'dev_name':dev_name})
irq_record.update({'event_list':irq_event_list})
irq_dic[cpu].append(irq_record)
def handle_netif_receive_skb(event_info):
global of_count_rx_skb_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
if cpu in net_rx_dic.keys():
rec_data = {'event_name':'netif_receive_skb',
'event_t':time, 'skbaddr':skbaddr, 'len':skblen}
event_list = net_rx_dic[cpu]['event_list']
event_list.append(rec_data)
rx_skb_list.insert(0, rec_data)
if len(rx_skb_list) > buffer_budget:
rx_skb_list.pop()
of_count_rx_skb_list += 1
def handle_net_dev_queue(event_info):
global of_count_tx_queue_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, dev_name) = event_info
skb = {'dev':dev_name, 'skbaddr':skbaddr, 'len':skblen, 'queue_t':time}
tx_queue_list.insert(0, skb)
if len(tx_queue_list) > buffer_budget:
tx_queue_list.pop()
of_count_tx_queue_list += 1
def handle_net_dev_xmit(event_info):
global of_count_tx_xmit_list
(name, context, cpu, time, pid, comm,
skbaddr, skblen, rc, dev_name) = event_info
if rc == 0: # NETDEV_TX_OK
for i in range(len(tx_queue_list)):
skb = tx_queue_list[i]
if skb['skbaddr'] == skbaddr:
skb['xmit_t'] = time
tx_xmit_list.insert(0, skb)
del tx_queue_list[i]
if len(tx_xmit_list) > buffer_budget:
tx_xmit_list.pop()
of_count_tx_xmit_list += 1
return
def handle_kfree_skb(event_info):
(name, context, cpu, time, pid, comm,
skbaddr, protocol, location) = event_info
for i in range(len(tx_queue_list)):
skb = tx_queue_list[i]
if skb['skbaddr'] == skbaddr:
del tx_queue_list[i]
return
for i in range(len(tx_xmit_list)):
skb = tx_xmit_list[i]
if skb['skbaddr'] == skbaddr:
skb['free_t'] = time
tx_free_list.append(skb)
del tx_xmit_list[i]
return
for i in range(len(rx_skb_list)):
rec_data = rx_skb_list[i]
if rec_data['skbaddr'] == skbaddr:
rec_data.update({'handle':"kfree_skb",
'comm':comm, 'pid':pid, 'comm_t':time})
del rx_skb_list[i]
return
def handle_consume_skb(event_info):
(name, context, cpu, time, pid, comm, skbaddr) = event_info
for i in range(len(tx_xmit_list)):
skb = tx_xmit_list[i]
if skb['skbaddr'] == skbaddr:
skb['free_t'] = time
tx_free_list.append(skb)
del tx_xmit_list[i]
return
def handle_skb_copy_datagram_iovec(event_info):
(name, context, cpu, time, pid, comm, skbaddr, skblen) = event_info
for i in range(len(rx_skb_list)):
rec_data = rx_skb_list[i]
if skbaddr == rec_data['skbaddr']:
rec_data.update({'handle':"skb_copy_datagram_iovec",
'comm':comm, 'pid':pid, 'comm_t':time})
del rx_skb_list[i]
return
| gpl-2.0 | 5,801,203,324,063,442,000 | 31.431034 | 78 | 0.645335 | false |
DanteOnline/free-art | venv/lib/python3.4/site-packages/django/db/backends/oracle/creation.py | 160 | 17256 | import sys
import time
from django.conf import settings
from django.db.backends.base.creation import BaseDatabaseCreation
from django.db.utils import DatabaseError
from django.utils.functional import cached_property
from django.utils.six.moves import input
TEST_DATABASE_PREFIX = 'test_'
PASSWORD = 'Im_a_lumberjack'
class DatabaseCreation(BaseDatabaseCreation):
@cached_property
def _maindb_connection(self):
"""
This is analogous to other backends' `_nodb_connection` property,
which allows access to an "administrative" connection which can
be used to manage the test databases.
For Oracle, the only connection that can be used for that purpose
is the main (non-test) connection.
"""
settings_dict = settings.DATABASES[self.connection.alias]
user = settings_dict.get('SAVED_USER') or settings_dict['USER']
password = settings_dict.get('SAVED_PASSWORD') or settings_dict['PASSWORD']
settings_dict = settings_dict.copy()
settings_dict.update(USER=user, PASSWORD=password)
DatabaseWrapper = type(self.connection)
return DatabaseWrapper(settings_dict, alias=self.connection.alias)
def _create_test_db(self, verbosity=1, autoclobber=False, keepdb=False):
parameters = self._get_test_db_params()
cursor = self._maindb_connection.cursor()
if self._test_database_create():
try:
self._execute_test_db_creation(cursor, parameters, verbosity, keepdb)
except Exception as e:
# if we want to keep the db, then no need to do any of the below,
# just return and skip it all.
if keepdb:
return
sys.stderr.write("Got an error creating the test database: %s\n" % e)
if not autoclobber:
confirm = input(
"It appears the test database, %s, already exists. "
"Type 'yes' to delete it, or 'no' to cancel: " % parameters['user'])
if autoclobber or confirm == 'yes':
if verbosity >= 1:
print("Destroying old test database for alias '%s'..." % self.connection.alias)
try:
self._execute_test_db_destruction(cursor, parameters, verbosity)
except DatabaseError as e:
if 'ORA-29857' in str(e):
self._handle_objects_preventing_db_destruction(cursor, parameters,
verbosity, autoclobber)
else:
# Ran into a database error that isn't about leftover objects in the tablespace
sys.stderr.write("Got an error destroying the old test database: %s\n" % e)
sys.exit(2)
except Exception as e:
sys.stderr.write("Got an error destroying the old test database: %s\n" % e)
sys.exit(2)
try:
self._execute_test_db_creation(cursor, parameters, verbosity, keepdb)
except Exception as e:
sys.stderr.write("Got an error recreating the test database: %s\n" % e)
sys.exit(2)
else:
print("Tests cancelled.")
sys.exit(1)
if self._test_user_create():
if verbosity >= 1:
print("Creating test user...")
try:
self._create_test_user(cursor, parameters, verbosity, keepdb)
except Exception as e:
# If we want to keep the db, then we want to also keep the user.
if keepdb:
return
sys.stderr.write("Got an error creating the test user: %s\n" % e)
if not autoclobber:
confirm = input(
"It appears the test user, %s, already exists. Type "
"'yes' to delete it, or 'no' to cancel: " % parameters['user'])
if autoclobber or confirm == 'yes':
try:
if verbosity >= 1:
print("Destroying old test user...")
self._destroy_test_user(cursor, parameters, verbosity)
if verbosity >= 1:
print("Creating test user...")
self._create_test_user(cursor, parameters, verbosity, keepdb)
except Exception as e:
sys.stderr.write("Got an error recreating the test user: %s\n" % e)
sys.exit(2)
else:
print("Tests cancelled.")
sys.exit(1)
self._maindb_connection.close() # done with main user -- test user and tablespaces created
self._switch_to_test_user(parameters)
return self.connection.settings_dict['NAME']
def _switch_to_test_user(self, parameters):
"""
Oracle doesn't have the concept of separate databases under the same user.
Thus, we use a separate user (see _create_test_db). This method is used
to switch to that user. We will need the main user again for clean-up when
we end testing, so we keep its credentials in SAVED_USER/SAVED_PASSWORD
entries in the settings dict.
"""
real_settings = settings.DATABASES[self.connection.alias]
real_settings['SAVED_USER'] = self.connection.settings_dict['SAVED_USER'] = \
self.connection.settings_dict['USER']
real_settings['SAVED_PASSWORD'] = self.connection.settings_dict['SAVED_PASSWORD'] = \
self.connection.settings_dict['PASSWORD']
real_test_settings = real_settings['TEST']
test_settings = self.connection.settings_dict['TEST']
real_test_settings['USER'] = real_settings['USER'] = test_settings['USER'] = \
self.connection.settings_dict['USER'] = parameters['user']
real_settings['PASSWORD'] = self.connection.settings_dict['PASSWORD'] = parameters['password']
def set_as_test_mirror(self, primary_settings_dict):
"""
Set this database up to be used in testing as a mirror of a primary database
whose settings are given
"""
self.connection.settings_dict['USER'] = primary_settings_dict['USER']
self.connection.settings_dict['PASSWORD'] = primary_settings_dict['PASSWORD']
def _handle_objects_preventing_db_destruction(self, cursor, parameters, verbosity, autoclobber):
# There are objects in the test tablespace which prevent dropping it
# The easy fix is to drop the test user -- but are we allowed to do so?
print("There are objects in the old test database which prevent its destruction.")
print("If they belong to the test user, deleting the user will allow the test "
"database to be recreated.")
print("Otherwise, you will need to find and remove each of these objects, "
"or use a different tablespace.\n")
if self._test_user_create():
if not autoclobber:
confirm = input("Type 'yes' to delete user %s: " % parameters['user'])
if autoclobber or confirm == 'yes':
try:
if verbosity >= 1:
print("Destroying old test user...")
self._destroy_test_user(cursor, parameters, verbosity)
except Exception as e:
sys.stderr.write("Got an error destroying the test user: %s\n" % e)
sys.exit(2)
try:
if verbosity >= 1:
print("Destroying old test database for alias '%s'..." % self.connection.alias)
self._execute_test_db_destruction(cursor, parameters, verbosity)
except Exception as e:
sys.stderr.write("Got an error destroying the test database: %s\n" % e)
sys.exit(2)
else:
print("Tests cancelled -- test database cannot be recreated.")
sys.exit(1)
else:
print("Django is configured to use pre-existing test user '%s',"
" and will not attempt to delete it.\n" % parameters['user'])
print("Tests cancelled -- test database cannot be recreated.")
sys.exit(1)
def _destroy_test_db(self, test_database_name, verbosity=1):
"""
Destroy a test database, prompting the user for confirmation if the
database already exists. Returns the name of the test database created.
"""
self.connection.settings_dict['USER'] = self.connection.settings_dict['SAVED_USER']
self.connection.settings_dict['PASSWORD'] = self.connection.settings_dict['SAVED_PASSWORD']
self.connection.close()
parameters = self._get_test_db_params()
cursor = self._maindb_connection.cursor()
time.sleep(1) # To avoid "database is being accessed by other users" errors.
if self._test_user_create():
if verbosity >= 1:
print('Destroying test user...')
self._destroy_test_user(cursor, parameters, verbosity)
if self._test_database_create():
if verbosity >= 1:
print('Destroying test database tables...')
self._execute_test_db_destruction(cursor, parameters, verbosity)
self._maindb_connection.close()
def _execute_test_db_creation(self, cursor, parameters, verbosity, keepdb=False):
if verbosity >= 2:
print("_create_test_db(): dbname = %s" % parameters['user'])
statements = [
"""CREATE TABLESPACE %(tblspace)s
DATAFILE '%(datafile)s' SIZE 20M
REUSE AUTOEXTEND ON NEXT 10M MAXSIZE %(maxsize)s
""",
"""CREATE TEMPORARY TABLESPACE %(tblspace_temp)s
TEMPFILE '%(datafile_tmp)s' SIZE 20M
REUSE AUTOEXTEND ON NEXT 10M MAXSIZE %(maxsize_tmp)s
""",
]
# Ignore "tablespace already exists" error when keepdb is on.
acceptable_ora_err = 'ORA-01543' if keepdb else None
self._execute_allow_fail_statements(cursor, statements, parameters, verbosity, acceptable_ora_err)
def _create_test_user(self, cursor, parameters, verbosity, keepdb=False):
if verbosity >= 2:
print("_create_test_user(): username = %s" % parameters['user'])
statements = [
"""CREATE USER %(user)s
IDENTIFIED BY %(password)s
DEFAULT TABLESPACE %(tblspace)s
TEMPORARY TABLESPACE %(tblspace_temp)s
QUOTA UNLIMITED ON %(tblspace)s
""",
"""GRANT CREATE SESSION,
CREATE TABLE,
CREATE SEQUENCE,
CREATE PROCEDURE,
CREATE TRIGGER
TO %(user)s""",
]
# Ignore "user already exists" error when keepdb is on
acceptable_ora_err = 'ORA-01920' if keepdb else None
self._execute_allow_fail_statements(cursor, statements, parameters, verbosity, acceptable_ora_err)
# Most test-suites can be run without the create-view privilege. But some need it.
extra = "GRANT CREATE VIEW TO %(user)s"
success = self._execute_allow_fail_statements(cursor, [extra], parameters, verbosity, 'ORA-01031')
if not success and verbosity >= 2:
print("Failed to grant CREATE VIEW permission to test user. This may be ok.")
def _execute_test_db_destruction(self, cursor, parameters, verbosity):
if verbosity >= 2:
print("_execute_test_db_destruction(): dbname=%s" % parameters['user'])
statements = [
'DROP TABLESPACE %(tblspace)s INCLUDING CONTENTS AND DATAFILES CASCADE CONSTRAINTS',
'DROP TABLESPACE %(tblspace_temp)s INCLUDING CONTENTS AND DATAFILES CASCADE CONSTRAINTS',
]
self._execute_statements(cursor, statements, parameters, verbosity)
def _destroy_test_user(self, cursor, parameters, verbosity):
if verbosity >= 2:
print("_destroy_test_user(): user=%s" % parameters['user'])
print("Be patient. This can take some time...")
statements = [
'DROP USER %(user)s CASCADE',
]
self._execute_statements(cursor, statements, parameters, verbosity)
def _execute_statements(self, cursor, statements, parameters, verbosity, allow_quiet_fail=False):
for template in statements:
stmt = template % parameters
if verbosity >= 2:
print(stmt)
try:
cursor.execute(stmt)
except Exception as err:
if (not allow_quiet_fail) or verbosity >= 2:
sys.stderr.write("Failed (%s)\n" % (err))
raise
def _execute_allow_fail_statements(self, cursor, statements, parameters, verbosity, acceptable_ora_err):
"""
Execute statements which are allowed to fail silently if the Oracle
error code given by `acceptable_ora_err` is raised. Return True if the
statements execute without an exception, or False otherwise.
"""
try:
# Statement can fail when acceptable_ora_err is not None
allow_quiet_fail = acceptable_ora_err is not None and len(acceptable_ora_err) > 0
self._execute_statements(cursor, statements, parameters, verbosity, allow_quiet_fail=allow_quiet_fail)
return True
except DatabaseError as err:
description = str(err)
if acceptable_ora_err is None or acceptable_ora_err not in description:
raise
return False
def _get_test_db_params(self):
return {
'dbname': self._test_database_name(),
'user': self._test_database_user(),
'password': self._test_database_passwd(),
'tblspace': self._test_database_tblspace(),
'tblspace_temp': self._test_database_tblspace_tmp(),
'datafile': self._test_database_tblspace_datafile(),
'datafile_tmp': self._test_database_tblspace_tmp_datafile(),
'maxsize': self._test_database_tblspace_size(),
'maxsize_tmp': self._test_database_tblspace_tmp_size(),
}
def _test_settings_get(self, key, default=None, prefixed=None):
"""
Return a value from the test settings dict,
or a given default,
or a prefixed entry from the main settings dict
"""
settings_dict = self.connection.settings_dict
val = settings_dict['TEST'].get(key, default)
if val is None:
val = TEST_DATABASE_PREFIX + settings_dict[prefixed]
return val
def _test_database_name(self):
return self._test_settings_get('NAME', prefixed='NAME')
def _test_database_create(self):
return self._test_settings_get('CREATE_DB', default=True)
def _test_user_create(self):
return self._test_settings_get('CREATE_USER', default=True)
def _test_database_user(self):
return self._test_settings_get('USER', prefixed='USER')
def _test_database_passwd(self):
return self._test_settings_get('PASSWORD', default=PASSWORD)
def _test_database_tblspace(self):
return self._test_settings_get('TBLSPACE', prefixed='USER')
def _test_database_tblspace_tmp(self):
settings_dict = self.connection.settings_dict
return settings_dict['TEST'].get('TBLSPACE_TMP',
TEST_DATABASE_PREFIX + settings_dict['USER'] + '_temp')
def _test_database_tblspace_datafile(self):
tblspace = '%s.dbf' % self._test_database_tblspace()
return self._test_settings_get('DATAFILE', default=tblspace)
def _test_database_tblspace_tmp_datafile(self):
tblspace = '%s.dbf' % self._test_database_tblspace_tmp()
return self._test_settings_get('DATAFILE_TMP', default=tblspace)
def _test_database_tblspace_size(self):
return self._test_settings_get('DATAFILE_MAXSIZE', default='500M')
def _test_database_tblspace_tmp_size(self):
return self._test_settings_get('DATAFILE_TMP_MAXSIZE', default='500M')
def _get_test_db_name(self):
"""
We need to return the 'production' DB name to get the test DB creation
machinery to work. This isn't a great deal in this case because DB
names as handled by Django haven't real counterparts in Oracle.
"""
return self.connection.settings_dict['NAME']
def test_db_signature(self):
settings_dict = self.connection.settings_dict
return (
settings_dict['HOST'],
settings_dict['PORT'],
settings_dict['ENGINE'],
settings_dict['NAME'],
self._test_database_user(),
)
| gpl-3.0 | 4,158,844,393,285,675,500 | 47.201117 | 114 | 0.581537 | false |
DavidLi2010/ramcloud | bindings/python/stresstest_bank.py | 19 | 8020 | #!/usr/bin/env python
# Copyright (c) 2009-2010 Stanford University
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR(S) DISCLAIM ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL AUTHORS BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""Repeatedly execute bank transfers.
WARNING: This file does not create a new client instance per worker process.
Your client library needs a mutex in shared memory around the networking code.
See RAM-39.
This is a stress test for RAMCloud.
Run this program with --help for usage."""
import os
import sys
import random
import time
from optparse import OptionParser
import multiprocessing
from retries import ImmediateRetry as RetryStrategy
import ramcloud
import txramcloud
from testutil import BreakException
txramcloud.RetryStrategy = RetryStrategy
class Stats(object):
INCREMENTS = 0
ABORTS = 1
CRASHES = 2
NUM = 3
LABELS = ['increments', 'aborts', 'crashes']
@classmethod
def to_str(cls, stats):
pairs = ["'%s': %d" % (l, v) for (l, v) in zip(cls.LABELS, stats)]
return '{%s}' % ', '.join(pairs)
class CountdownHook(object):
def __init__(self, count):
self.count = count
def __call__(self):
if self.count == 0:
raise BreakException
else:
self.count -= 1
class Test(object):
def __init__(self, txrc, table, oids, stats, die, options, args):
self.txrc = txrc
self.table = table
self.oids = oids
self.global_stats = stats
self.die = die
self.options = options
self.args = args
def __call__(self):
# Called by the child in its address space
# self.global_stats, self.die are in shared memory
# detach the child from the parent's TTY
os.setsid()
if self.options.crash:
when = random.randint(0, self.options.crash)
self.txrc.hook = CountdownHook(when)
self.local_stats = [0] * Stats.NUM
self.cache = {}
for oid in self.oids:
self.cache[oid] = None
i = 1
try:
while True:
if i % 10**6 == 0:
print "PID %d: continuing after %s" % (os.getpid(),
Stats.to_str(self.local_stats))
accts = self.choose_accts()
for retry in RetryStrategy():
if die.value:
print "PID %d: done after %s" % (os.getpid(),
Stats.to_str(self.local_stats))
return
try:
for oid in accts:
if self.cache[oid] is None:
blob, version = self.txrc.read(self.table, oid)
value = int(blob)
self.cache[oid] = (value, version)
if not self.algo(accts):
retry.later()
except BreakException:
print "PID %d: crash after %s" % (os.getpid(),
Stats.to_str(self.local_stats))
self.local_stats[Stats.CRASHES] += 1
for oid in self.cache:
self.cache[oid] = None
when = random.randint(0, self.options.crash)
self.txrc.hook = CountdownHook(when)
# and keep going
i += 1
finally:
# update global stats
for i, v in enumerate(self.local_stats):
self.global_stats[i] += v
def choose_accts(self):
assert len(self.oids) >= 2
max_num_accts = len(self.oids)
if (self.options.max_num_accts_per_tx and
self.options.max_num_accts_per_tx < max_num_accts):
max_num_accts = self.options.max_num_accts_per_tx
num_accts = random.randint(2, max_num_accts)
accts = list(oids)
random.shuffle(accts)
accts = accts[:num_accts]
return accts
def algo(self, accts):
mt = txramcloud.MiniTransaction()
new_values = {}
for oid in accts:
value, version = self.cache[oid]
rr = ramcloud.RejectRules.exactly(version)
if oid == accts[0]:
value -= len(accts[1:])
else:
value += 1
new_values[oid] = value
mt[(self.table, oid)] = txramcloud.MTWrite(str(value), rr)
try:
result = self.txrc.mt_commit(mt)
except txramcloud.TxRAMCloud.TransactionRejected, e:
for ((table, oid), reason) in e.reasons.items():
self.cache[oid] = None
self.local_stats[Stats.ABORTS] += 1
return False
except txramcloud.TxRAMCloud.TransactionExpired, e:
self.local_stats[Stats.ABORTS] += 1
return False
else:
for ((table, oid), version) in result.items():
self.cache[oid] = (new_values[oid], version)
self.local_stats[Stats.INCREMENTS] += 1
return True
if __name__ == '__main__':
parser = OptionParser()
parser.set_description(__doc__.split('\n\n', 1)[0])
parser.add_option("-p", "--num-processes",
dest="num_procs", type="int", default=1,
help="spawn NUM processes, defaults to 1",
metavar="NUM")
parser.add_option("-o", "--num-objects",
dest="num_objects", type="int", default=2,
help=("increment across NUM objects, defaults to 2"),
metavar="NUM")
parser.add_option("-m", "--max-tx",
dest="max_num_accts_per_tx", type="int", default=0,
help=("the maximum NUM of accounts to involve in a " +
"single transaction, defaults to infinity"),
metavar="NUM")
parser.add_option("-c", "--crash",
dest="crash", type="int", default=0,
help=("crash randomly by the NUM-th RAMCloud "
"operation, defaults to not crashing"),
metavar="NUM")
(options, args) = parser.parse_args()
assert not args
r = txramcloud.TxRAMCloud(7)
r.connect()
r.create_table("test")
table = r.get_table_id("test")
oids = range(options.num_objects)
for oid in oids:
r.create(table, oid, str(0))
stats = multiprocessing.Array('i', Stats.NUM)
die = multiprocessing.Value('i', 0, lock=False)
target = Test(r, table, oids, stats, die, options, args)
procs = []
for i in range(options.num_procs):
procs.append(multiprocessing.Process(target=target))
start = time.time()
for p in procs:
p.start()
try:
for p in procs:
p.join()
except KeyboardInterrupt:
# a process can be joined multiple times
die.value = 1
for p in procs:
p.join()
end = time.time()
print "wall time: %0.02fs" % (end - start)
print "stats:", Stats.to_str(stats[:])
sum = 0
for oid in oids:
blob, version = r.read(table, oid)
value = int(blob)
sum += value
print 'oid %d: value=%d, version=%d' % (oid, value, version)
print 'sum: %d' % sum
assert sum == 0
| isc | -1,166,648,902,703,800,300 | 32.839662 | 79 | 0.543516 | false |
fengbaicanhe/intellij-community | python/helpers/profiler/thrift/TSerialization.py | 36 | 1401 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from thrift.protocol import TBinaryProtocol
from thrift.transport import TTransport
def serialize(thrift_object,
protocol_factory=TBinaryProtocol.TBinaryProtocolFactory()):
transport = TTransport.TMemoryBuffer()
protocol = protocol_factory.getProtocol(transport)
thrift_object.write(protocol)
return transport.getvalue()
def deserialize(base,
buf,
protocol_factory=TBinaryProtocol.TBinaryProtocolFactory()):
transport = TTransport.TMemoryBuffer(buf)
protocol = protocol_factory.getProtocol(transport)
base.read(protocol)
return base
| apache-2.0 | 507,019,977,667,869,300 | 35.868421 | 75 | 0.754461 | false |
Tesora-Release/tesora-trove | trove/guestagent/module/drivers/new_relic_license_driver.py | 1 | 3407 | # Copyright 2016 Tesora, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from datetime import date
from oslo_log import log as logging
from trove.common import cfg
from trove.common.i18n import _
from trove.common import stream_codecs
from trove.common import utils
from trove.guestagent.common import operating_system
from trove.guestagent.module.drivers import module_driver
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
NR_ADD_LICENSE_CMD = ['nrsysmond-config', '--set', 'license_key=%s']
NR_SRV_CONTROL_CMD = ['/etc/init.d/newrelic-sysmond']
class NewRelicLicenseDriver(module_driver.ModuleDriver):
"""Module to set up the license for the NewRelic service."""
def get_description(self):
return "New Relic License Module Driver"
def get_updated(self):
return date(2016, 4, 12)
@module_driver.output(
log_message=_('Installing New Relic license key'),
success_message=_('New Relic license key installed'),
fail_message=_('New Relic license key not installed'))
def apply(self, name, datastore, ds_version, data_file, admin_module):
license_key = None
data = operating_system.read_file(
data_file, codec=stream_codecs.KeyValueCodec())
for key, value in data.items():
if 'license_key' == key.lower():
license_key = value
break
if license_key:
self._add_license_key(license_key)
self._server_control('start')
else:
return False, "'license_key' not found in contents file"
def _add_license_key(self, license_key):
try:
exec_args = {'timeout': 10,
'run_as_root': True,
'root_helper': 'sudo'}
cmd = list(NR_ADD_LICENSE_CMD)
cmd[-1] = cmd[-1] % license_key
utils.execute_with_timeout(*cmd, **exec_args)
except Exception:
LOG.exception(_("Could not install license key '%s'") %
license_key)
raise
def _server_control(self, command):
try:
exec_args = {'timeout': 10,
'run_as_root': True,
'root_helper': 'sudo'}
cmd = list(NR_SRV_CONTROL_CMD)
cmd.append(command)
utils.execute_with_timeout(*cmd, **exec_args)
except Exception:
LOG.exception(_("Could not %s New Relic server") % command)
raise
@module_driver.output(
log_message=_('Removing New Relic license key'),
success_message=_('New Relic license key removed'),
fail_message=_('New Relic license key not removed'))
def remove(self, name, datastore, ds_version, data_file):
self._add_license_key("bad_key")
self._server_control('stop')
| apache-2.0 | 231,084,127,926,069,860 | 34.863158 | 78 | 0.614617 | false |
RomanHargrave/namebench | libnamebench/provider_extensions.py | 174 | 1713 | # Copyright 2009 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tricks that depend on a certain DNS provider.
Tricks that require inheritence by nameserver.py must go here, otherwise,
see providers.py for externally available functions.
"""
__author__ = 'tstromberg@google.com (Thomas Stromberg)'
class NameServerProvider(object):
"""Inherited by nameserver."""
# myresolver.info
def GetMyResolverIpWithDuration(self):
return self.GetIpFromNameWithDuration('self.myresolver.info.')
def GetMyResolverHostNameWithDuration(self):
return self.GetNameFromNameWithDuration('self.myresolver.info.')
# OpenDNS
def GetOpenDnsNodeWithDuration(self):
return self.GetTxtRecordWithDuration('which.opendns.com.')[0:2]
def GetOpenDnsInterceptionStateWithDuration(self):
"""Check if our packets are actually getting to the correct servers."""
(node_id, duration) = self.GetOpenDnsNodeWithDuration()
if node_id and 'I am not an OpenDNS resolver' in node_id:
return (True, duration)
return (False, duration)
# UltraDNS
def GetUltraDnsNodeWithDuration(self):
return self.GetNameFromNameWithDuration('whoareyou.ultradns.net.')
| apache-2.0 | -7,883,432,375,487,771,000 | 33.959184 | 75 | 0.757151 | false |
DrMattChristian/btrbk | contrib/crypt/kdf_pbkdf2.py | 2 | 1919 | #!/usr/bin/env python3
#
# kdf_pbkdf2.py - (kdf_backend for btrbk)
#
# Copyright (c) 2017 Axel Burri
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# ---------------------------------------------------------------------
# The official btrbk website is located at:
# https://digint.ch/btrbk/
#
# Author:
# Axel Burri <axel@tty0.ch>
# ---------------------------------------------------------------------
import sys
import os
import getpass
import hashlib
def passprompt():
pprompt = lambda: (getpass.getpass("Passphrase: "), getpass.getpass("Retype passphrase: "))
p1, p2 = pprompt()
while p1 != p2:
print("No match, please try again", file=sys.stderr)
p1, p2 = pprompt()
return p1
if len(sys.argv) <= 1:
print("Usage: {} <dklen>".format(sys.argv[0]), file=sys.stderr)
sys.exit(1)
hash_name = "sha256"
iterations = 300000
dklen = int(sys.argv[1])
salt = os.urandom(16)
password = passprompt().encode("utf-8")
dk = hashlib.pbkdf2_hmac(hash_name=hash_name, password=password, salt=salt, iterations=iterations, dklen=dklen)
salt_hex = "".join(["{:02x}".format(x) for x in salt])
dk_hex = "".join(["{:02x}".format(x) for x in dk])
print("KEY=" + dk_hex);
print("algoritm=pbkdf2_hmac");
print("hash_name=" + hash_name);
print("salt=" + salt_hex);
print("iterations=" + str(iterations));
| gpl-3.0 | 3,912,002,943,555,209,000 | 30.983333 | 111 | 0.642001 | false |
lindycoder/netman | netman/adapters/switches/util.py | 3 | 3299 | # Copyright 2015 Internap.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import traceback
from netman import regex
import re
class SubShell(object):
debug = False
def __init__(self, ssh, enter, exit_cmd, validate=None):
self.ssh = ssh
self.enter = enter
self.exit = exit_cmd
self.validate = validate or (lambda x: None)
def __enter__(self):
if isinstance(self.enter, list):
[self.validate(self.ssh.do(cmd)) for cmd in self.enter]
else:
self.validate(self.ssh.do(self.enter))
return self.ssh
def __exit__(self, eType, eValue, eTrace):
if self.debug and eType is not None:
logging.error("Subshell exception {}: {}\n{}"
.format(eType.__name__, eValue, "".join(traceback.format_tb(eTrace))))
self.ssh.do(self.exit)
def no_output(exc, *args):
def m(welcome_msg):
if len(welcome_msg) > 0:
raise exc(*args)
return m
def split_on_bang(data):
current_chunk = []
for line in data:
if re.match("^!.*", line):
if len(current_chunk) > 0:
yield current_chunk
current_chunk = []
else:
current_chunk.append(line)
def split_on_dedent(data):
current_chunk = []
for line in data:
if re.match("^[^\s].*", line) and len(current_chunk) > 0:
yield current_chunk
current_chunk = [line]
else:
current_chunk.append(line)
yield current_chunk
class ResultChecker(object):
def __init__(self, result=None):
self.result = result
def on_any_result(self, exception, *args, **kwargs):
if self.result and len(self.result) > 0:
raise exception(*args, **kwargs)
return self
def on_result_matching(self, matcher, exception, *args, **kwargs):
if regex.match(matcher, "\n".join(self.result), flags=re.DOTALL):
raise exception(*args, **kwargs)
return self
class PageReader(object):
def __init__(self, read_while, and_press, unless_prompt):
self.next_page_indicator = read_while
self.continue_key = and_press
self.prompt = unless_prompt
def do(self, shell, command):
result = shell.do(command,
wait_for=(self.next_page_indicator, self.prompt),
include_last_line=True)
while len(result) > 0 and self.next_page_indicator in result[-1]:
result = result[:-1] + shell.send_key(self.continue_key,
wait_for=(self.next_page_indicator, self.prompt),
include_last_line=True)
return result[:-1]
| apache-2.0 | -3,626,427,164,691,782,000 | 29.831776 | 99 | 0.588663 | false |
jiachenning/odoo | addons/portal/mail_mail.py | 320 | 2625 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2011 OpenERP S.A (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import SUPERUSER_ID
from openerp.osv import osv
from openerp.tools.translate import _
class mail_mail(osv.Model):
""" Update of mail_mail class, to add the signin URL to notifications. """
_inherit = 'mail.mail'
def _get_partner_access_link(self, cr, uid, mail, partner=None, context=None):
""" Generate URLs for links in mails:
- partner is not an user: signup_url
- partner is an user: fallback on classic URL
"""
if context is None:
context = {}
partner_obj = self.pool.get('res.partner')
if partner and not partner.user_ids:
contex_signup = dict(context, signup_valid=True)
signup_url = partner_obj._get_signup_url_for_action(cr, SUPERUSER_ID, [partner.id],
action='mail.action_mail_redirect',
model=mail.model, res_id=mail.res_id,
context=contex_signup)[partner.id]
return ", <span class='oe_mail_footer_access'><small>%(access_msg)s <a style='color:inherit' href='%(portal_link)s'>%(portal_msg)s</a></small></span>" % {
'access_msg': _('access directly to'),
'portal_link': signup_url,
'portal_msg': '%s %s' % (context.get('model_name', ''), mail.record_name) if mail.record_name else _('your messages '),
}
else:
return super(mail_mail, self)._get_partner_access_link(cr, uid, mail, partner=partner, context=context)
| agpl-3.0 | 9,194,375,834,232,678,000 | 50.470588 | 166 | 0.564571 | false |
zhangqifan/findSomething | FindSomething/Pods/AVOSCloudCrashReporting/Breakpad/src/tools/gyp/test/actions/gyptest-all.py | 243 | 3677 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies simple actions when using an explicit build target of 'all'.
"""
import glob
import os
import TestGyp
test = TestGyp.TestGyp(workdir='workarea_all')
test.run_gyp('actions.gyp', chdir='src')
test.relocate('src', 'relocate/src')
# Some gyp files use an action that mentions an output but never
# writes it as a means to making the action run on every build. That
# doesn't mesh well with ninja's semantics. TODO(evan): figure out
# how to work always-run actions in to ninja.
# Android also can't do this as it doesn't have order-only dependencies.
if test.format in ['ninja', 'android']:
test.build('actions.gyp', test.ALL, chdir='relocate/src')
else:
# Test that an "always run" action increases a counter on multiple
# invocations, and that a dependent action updates in step.
test.build('actions.gyp', test.ALL, chdir='relocate/src')
test.must_match('relocate/src/subdir1/actions-out/action-counter.txt', '1')
test.must_match('relocate/src/subdir1/actions-out/action-counter_2.txt', '1')
test.build('actions.gyp', test.ALL, chdir='relocate/src')
test.must_match('relocate/src/subdir1/actions-out/action-counter.txt', '2')
test.must_match('relocate/src/subdir1/actions-out/action-counter_2.txt', '2')
# The "always run" action only counts to 2, but the dependent target
# will count forever if it's allowed to run. This verifies that the
# dependent target only runs when the "always run" action generates
# new output, not just because the "always run" ran.
test.build('actions.gyp', test.ALL, chdir='relocate/src')
test.must_match('relocate/src/subdir1/actions-out/action-counter.txt', '2')
test.must_match('relocate/src/subdir1/actions-out/action-counter_2.txt', '2')
expect = """\
Hello from program.c
Hello from make-prog1.py
Hello from make-prog2.py
"""
if test.format == 'xcode':
chdir = 'relocate/src/subdir1'
else:
chdir = 'relocate/src'
test.run_built_executable('program', chdir=chdir, stdout=expect)
test.must_match('relocate/src/subdir2/file.out', "Hello from make-file.py\n")
expect = "Hello from generate_main.py\n"
if test.format == 'xcode':
chdir = 'relocate/src/subdir3'
else:
chdir = 'relocate/src'
test.run_built_executable('null_input', chdir=chdir, stdout=expect)
# Clean out files which may have been created if test.ALL was run.
def clean_dep_files():
for file in (glob.glob('relocate/src/dep_*.txt') +
glob.glob('relocate/src/deps_all_done_*.txt')):
if os.path.exists(file):
os.remove(file)
# Confirm our clean.
clean_dep_files()
test.must_not_exist('relocate/src/dep_1.txt')
test.must_not_exist('relocate/src/deps_all_done_first_123.txt')
# Make sure all deps finish before an action is run on a 'None' target.
# If using the Make builder, add -j to make things more difficult.
arguments = []
if test.format == 'make':
arguments = ['-j']
test.build('actions.gyp', 'action_with_dependencies_123', chdir='relocate/src',
arguments=arguments)
test.must_exist('relocate/src/deps_all_done_first_123.txt')
# Try again with a target that has deps in reverse. Output files from
# previous tests deleted. Confirm this execution did NOT run the ALL
# target which would mess up our dep tests.
clean_dep_files()
test.build('actions.gyp', 'action_with_dependencies_321', chdir='relocate/src',
arguments=arguments)
test.must_exist('relocate/src/deps_all_done_first_321.txt')
test.must_not_exist('relocate/src/deps_all_done_first_123.txt')
test.pass_test()
| mit | -5,555,688,820,869,340,000 | 35.04902 | 79 | 0.720152 | false |
openstack-ja/horizon | openstack_dashboard/openstack/common/notifier/log_notifier.py | 19 | 1297 | # Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo.config import cfg
from openstack_dashboard.openstack.common import jsonutils
from openstack_dashboard.openstack.common import log as logging
CONF = cfg.CONF
def notify(_context, message):
"""Notifies the recipient of the desired event given the model.
Log notifications using openstack's default logging system.
"""
priority = message.get('priority',
CONF.default_notification_level)
priority = priority.lower()
logger = logging.getLogger(
'openstack_dashboard.openstack.common.notification.%s' %
message['event_type'])
getattr(logger, priority)(jsonutils.dumps(message))
| apache-2.0 | 4,902,825,120,520,859,000 | 34.054054 | 78 | 0.71781 | false |