repo_name
stringlengths 6
100
| path
stringlengths 4
294
| copies
stringlengths 1
5
| size
stringlengths 4
6
| content
stringlengths 606
896k
| license
stringclasses 15
values | var_hash
int64 -9,223,186,179,200,150,000
9,223,291,175B
| doc_hash
int64 -9,223,304,365,658,930,000
9,223,309,051B
| line_mean
float64 3.5
99.8
| line_max
int64 13
999
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|
tangyiyong/odoo | openerp/addons/base/res/res_users.py | 70 | 46571 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
# Copyright (C) 2010-2014 OpenERP s.a. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import itertools
import logging
from functools import partial
from itertools import repeat
from lxml import etree
from lxml.builder import E
import openerp
from openerp import SUPERUSER_ID, models
from openerp import tools
import openerp.exceptions
from openerp.osv import fields, osv, expression
from openerp.service.security import check_super
from openerp.tools.translate import _
from openerp.http import request
_logger = logging.getLogger(__name__)
# Only users who can modify the user (incl. the user herself) see the real contents of these fields
USER_PRIVATE_FIELDS = ['password']
#----------------------------------------------------------
# Basic res.groups and res.users
#----------------------------------------------------------
class res_groups(osv.osv):
_name = "res.groups"
_description = "Access Groups"
_rec_name = 'full_name'
_order = 'name'
def _get_full_name(self, cr, uid, ids, field, arg, context=None):
res = {}
for g in self.browse(cr, uid, ids, context):
if g.category_id:
res[g.id] = '%s / %s' % (g.category_id.name, g.name)
else:
res[g.id] = g.name
return res
def _search_group(self, cr, uid, obj, name, args, context=None):
operand = args[0][2]
operator = args[0][1]
lst = True
if isinstance(operand, bool):
domains = [[('name', operator, operand)], [('category_id.name', operator, operand)]]
if operator in expression.NEGATIVE_TERM_OPERATORS == (not operand):
return expression.AND(domains)
else:
return expression.OR(domains)
if isinstance(operand, basestring):
lst = False
operand = [operand]
where = []
for group in operand:
values = filter(bool, group.split('/'))
group_name = values.pop().strip()
category_name = values and '/'.join(values).strip() or group_name
group_domain = [('name', operator, lst and [group_name] or group_name)]
category_domain = [('category_id.name', operator, lst and [category_name] or category_name)]
if operator in expression.NEGATIVE_TERM_OPERATORS and not values:
category_domain = expression.OR([category_domain, [('category_id', '=', False)]])
if (operator in expression.NEGATIVE_TERM_OPERATORS) == (not values):
sub_where = expression.AND([group_domain, category_domain])
else:
sub_where = expression.OR([group_domain, category_domain])
if operator in expression.NEGATIVE_TERM_OPERATORS:
where = expression.AND([where, sub_where])
else:
where = expression.OR([where, sub_where])
return where
_columns = {
'name': fields.char('Name', required=True, translate=True),
'users': fields.many2many('res.users', 'res_groups_users_rel', 'gid', 'uid', 'Users'),
'model_access': fields.one2many('ir.model.access', 'group_id', 'Access Controls', copy=True),
'rule_groups': fields.many2many('ir.rule', 'rule_group_rel',
'group_id', 'rule_group_id', 'Rules', domain=[('global', '=', False)]),
'menu_access': fields.many2many('ir.ui.menu', 'ir_ui_menu_group_rel', 'gid', 'menu_id', 'Access Menu'),
'view_access': fields.many2many('ir.ui.view', 'ir_ui_view_group_rel', 'group_id', 'view_id', 'Views'),
'comment' : fields.text('Comment', size=250, translate=True),
'category_id': fields.many2one('ir.module.category', 'Application', select=True),
'full_name': fields.function(_get_full_name, type='char', string='Group Name', fnct_search=_search_group),
}
_sql_constraints = [
('name_uniq', 'unique (category_id, name)', 'The name of the group must be unique within an application!')
]
def search(self, cr, uid, args, offset=0, limit=None, order=None, context=None, count=False):
# add explicit ordering if search is sorted on full_name
if order and order.startswith('full_name'):
ids = super(res_groups, self).search(cr, uid, args, context=context)
gs = self.browse(cr, uid, ids, context)
gs.sort(key=lambda g: g.full_name, reverse=order.endswith('DESC'))
gs = gs[offset:offset+limit] if limit else gs[offset:]
return map(int, gs)
return super(res_groups, self).search(cr, uid, args, offset, limit, order, context, count)
def copy(self, cr, uid, id, default=None, context=None):
group_name = self.read(cr, uid, [id], ['name'])[0]['name']
default.update({'name': _('%s (copy)')%group_name})
return super(res_groups, self).copy(cr, uid, id, default, context)
def write(self, cr, uid, ids, vals, context=None):
if 'name' in vals:
if vals['name'].startswith('-'):
raise osv.except_osv(_('Error'),
_('The name of the group can not start with "-"'))
res = super(res_groups, self).write(cr, uid, ids, vals, context=context)
self.pool['ir.model.access'].call_cache_clearing_methods(cr)
self.pool['res.users'].has_group.clear_cache(self.pool['res.users'])
return res
class res_users(osv.osv):
""" User class. A res.users record models an OpenERP user and is different
from an employee.
res.users class now inherits from res.partner. The partner model is
used to store the data related to the partner: lang, name, address,
avatar, ... The user model is now dedicated to technical data.
"""
__admin_ids = {}
__uid_cache = {}
_inherits = {
'res.partner': 'partner_id',
}
_name = "res.users"
_description = 'Users'
_order = 'name, login'
def _set_new_password(self, cr, uid, id, name, value, args, context=None):
if value is False:
# Do not update the password if no value is provided, ignore silently.
# For example web client submits False values for all empty fields.
return
if uid == id:
# To change their own password users must use the client-specific change password wizard,
# so that the new password is immediately used for further RPC requests, otherwise the user
# will face unexpected 'Access Denied' exceptions.
raise osv.except_osv(_('Operation Canceled'), _('Please use the change password wizard (in User Preferences or User menu) to change your own password.'))
self.write(cr, uid, id, {'password': value})
def _get_password(self, cr, uid, ids, arg, karg, context=None):
return dict.fromkeys(ids, '')
_columns = {
'id': fields.integer('ID'),
'login_date': fields.date('Latest connection', select=1, copy=False),
'partner_id': fields.many2one('res.partner', required=True,
string='Related Partner', ondelete='restrict',
help='Partner-related data of the user', auto_join=True),
'login': fields.char('Login', size=64, required=True,
help="Used to log into the system"),
'password': fields.char('Password', size=64, invisible=True, copy=False,
help="Keep empty if you don't want the user to be able to connect on the system."),
'new_password': fields.function(_get_password, type='char', size=64,
fnct_inv=_set_new_password, string='Set Password',
help="Specify a value only when creating a user or if you're "\
"changing the user's password, otherwise leave empty. After "\
"a change of password, the user has to login again."),
'signature': fields.html('Signature'),
'active': fields.boolean('Active'),
'action_id': fields.many2one('ir.actions.actions', 'Home Action', help="If specified, this action will be opened at log on for this user, in addition to the standard menu."),
'groups_id': fields.many2many('res.groups', 'res_groups_users_rel', 'uid', 'gid', 'Groups'),
# Special behavior for this field: res.company.search() will only return the companies
# available to the current user (should be the user's companies?), when the user_preference
# context is set.
'company_id': fields.many2one('res.company', 'Company', required=True,
help='The company this user is currently working for.', context={'user_preference': True}),
'company_ids':fields.many2many('res.company','res_company_users_rel','user_id','cid','Companies'),
}
# overridden inherited fields to bypass access rights, in case you have
# access to the user but not its corresponding partner
name = openerp.fields.Char(related='partner_id.name', inherited=True)
email = openerp.fields.Char(related='partner_id.email', inherited=True)
def on_change_login(self, cr, uid, ids, login, context=None):
if login and tools.single_email_re.match(login):
return {'value': {'email': login}}
return {}
def onchange_state(self, cr, uid, ids, state_id, context=None):
partner_ids = [user.partner_id.id for user in self.browse(cr, uid, ids, context=context)]
return self.pool.get('res.partner').onchange_state(cr, uid, partner_ids, state_id, context=context)
def onchange_type(self, cr, uid, ids, is_company, context=None):
""" Wrapper on the user.partner onchange_type, because some calls to the
partner form view applied to the user may trigger the
partner.onchange_type method, but applied to the user object.
"""
partner_ids = [user.partner_id.id for user in self.browse(cr, uid, ids, context=context)]
return self.pool['res.partner'].onchange_type(cr, uid, partner_ids, is_company, context=context)
def onchange_address(self, cr, uid, ids, use_parent_address, parent_id, context=None):
""" Wrapper on the user.partner onchange_address, because some calls to the
partner form view applied to the user may trigger the
partner.onchange_type method, but applied to the user object.
"""
partner_ids = [user.partner_id.id for user in self.browse(cr, uid, ids, context=context)]
return self.pool['res.partner'].onchange_address(cr, uid, partner_ids, use_parent_address, parent_id, context=context)
def _check_company(self, cr, uid, ids, context=None):
return all(((this.company_id in this.company_ids) or not this.company_ids) for this in self.browse(cr, uid, ids, context))
_constraints = [
(_check_company, 'The chosen company is not in the allowed companies for this user', ['company_id', 'company_ids']),
]
_sql_constraints = [
('login_key', 'UNIQUE (login)', 'You can not have two users with the same login !')
]
def _get_company(self,cr, uid, context=None, uid2=False):
if not uid2:
uid2 = uid
# Use read() to compute default company, and pass load=_classic_write to
# avoid useless name_get() calls. This will avoid prefetching fields
# while computing default values for new db columns, as the
# db backend may not be fully initialized yet.
user_data = self.pool['res.users'].read(cr, uid, uid2, ['company_id'],
context=context, load='_classic_write')
comp_id = user_data['company_id']
return comp_id or False
def _get_companies(self, cr, uid, context=None):
c = self._get_company(cr, uid, context)
if c:
return [c]
return False
def _get_group(self,cr, uid, context=None):
dataobj = self.pool.get('ir.model.data')
result = []
try:
dummy,group_id = dataobj.get_object_reference(cr, SUPERUSER_ID, 'base', 'group_user')
result.append(group_id)
dummy,group_id = dataobj.get_object_reference(cr, SUPERUSER_ID, 'base', 'group_partner_manager')
result.append(group_id)
except ValueError:
# If these groups does not exists anymore
pass
return result
def _get_default_image(self, cr, uid, context=None):
return self.pool['res.partner']._get_default_image(cr, uid, False, colorize=True, context=context)
_defaults = {
'password': '',
'active': True,
'customer': False,
'company_id': _get_company,
'company_ids': _get_companies,
'groups_id': _get_group,
'image': _get_default_image,
}
# User can write on a few of his own fields (but not his groups for example)
SELF_WRITEABLE_FIELDS = ['password', 'signature', 'action_id', 'company_id', 'email', 'name', 'image', 'image_medium', 'image_small', 'lang', 'tz']
# User can read a few of his own fields
SELF_READABLE_FIELDS = ['signature', 'company_id', 'login', 'email', 'name', 'image', 'image_medium', 'image_small', 'lang', 'tz', 'tz_offset', 'groups_id', 'partner_id', '__last_update', 'action_id']
def read(self, cr, uid, ids, fields=None, context=None, load='_classic_read'):
def override_password(o):
if ('id' not in o or o['id'] != uid):
for f in USER_PRIVATE_FIELDS:
if f in o:
o[f] = '********'
return o
if fields and (ids == [uid] or ids == uid):
for key in fields:
if not (key in self.SELF_READABLE_FIELDS or key.startswith('context_')):
break
else:
# safe fields only, so we read as super-user to bypass access rights
uid = SUPERUSER_ID
result = super(res_users, self).read(cr, uid, ids, fields=fields, context=context, load=load)
canwrite = self.pool['ir.model.access'].check(cr, uid, 'res.users', 'write', False)
if not canwrite:
if isinstance(ids, (int, long)):
result = override_password(result)
else:
result = map(override_password, result)
return result
def read_group(self, cr, uid, domain, fields, groupby, offset=0, limit=None, context=None, orderby=False, lazy=True):
if uid != SUPERUSER_ID:
groupby_fields = set([groupby] if isinstance(groupby, basestring) else groupby)
if groupby_fields.intersection(USER_PRIVATE_FIELDS):
raise openerp.exceptions.AccessError('Invalid groupby')
return super(res_users, self).read_group(
cr, uid, domain, fields, groupby, offset=offset, limit=limit, context=context, orderby=orderby, lazy=lazy)
def _search(self, cr, user, args, offset=0, limit=None, order=None, context=None, count=False, access_rights_uid=None):
if user != SUPERUSER_ID and args:
domain_terms = [term for term in args if isinstance(term, (tuple, list))]
domain_fields = set(left for (left, op, right) in domain_terms)
if domain_fields.intersection(USER_PRIVATE_FIELDS):
raise openerp.exceptions.AccessError('Invalid search criterion')
return super(res_users, self)._search(
cr, user, args, offset=offset, limit=limit, order=order, context=context, count=count,
access_rights_uid=access_rights_uid)
def create(self, cr, uid, vals, context=None):
user_id = super(res_users, self).create(cr, uid, vals, context=context)
user = self.browse(cr, uid, user_id, context=context)
if user.partner_id.company_id:
user.partner_id.write({'company_id': user.company_id.id})
return user_id
def write(self, cr, uid, ids, values, context=None):
if not hasattr(ids, '__iter__'):
ids = [ids]
if ids == [uid]:
for key in values.keys():
if not (key in self.SELF_WRITEABLE_FIELDS or key.startswith('context_')):
break
else:
if 'company_id' in values:
user = self.browse(cr, SUPERUSER_ID, uid, context=context)
if not (values['company_id'] in user.company_ids.ids):
del values['company_id']
uid = 1 # safe fields only, so we write as super-user to bypass access rights
res = super(res_users, self).write(cr, uid, ids, values, context=context)
if 'company_id' in values:
for user in self.browse(cr, uid, ids, context=context):
# if partner is global we keep it that way
if user.partner_id.company_id and user.partner_id.company_id.id != values['company_id']:
user.partner_id.write({'company_id': user.company_id.id})
# clear default ir values when company changes
self.pool['ir.values'].get_defaults_dict.clear_cache(self.pool['ir.values'])
# clear caches linked to the users
self.pool['ir.model.access'].call_cache_clearing_methods(cr)
clear = partial(self.pool['ir.rule'].clear_cache, cr)
map(clear, ids)
db = cr.dbname
if db in self.__uid_cache:
for id in ids:
if id in self.__uid_cache[db]:
del self.__uid_cache[db][id]
self._context_get.clear_cache(self)
self.has_group.clear_cache(self)
return res
def unlink(self, cr, uid, ids, context=None):
if 1 in ids:
raise osv.except_osv(_('Can not remove root user!'), _('You can not remove the admin user as it is used internally for resources created by Odoo (updates, module installation, ...)'))
db = cr.dbname
if db in self.__uid_cache:
for id in ids:
if id in self.__uid_cache[db]:
del self.__uid_cache[db][id]
return super(res_users, self).unlink(cr, uid, ids, context=context)
def name_search(self, cr, user, name='', args=None, operator='ilike', context=None, limit=100):
if not args:
args=[]
if not context:
context={}
ids = []
if name and operator in ['=', 'ilike']:
ids = self.search(cr, user, [('login','=',name)]+ args, limit=limit, context=context)
if not ids:
ids = self.search(cr, user, [('name',operator,name)]+ args, limit=limit, context=context)
return self.name_get(cr, user, ids, context=context)
def copy(self, cr, uid, id, default=None, context=None):
user2copy = self.read(cr, uid, [id], ['login','name'])[0]
default = dict(default or {})
if ('name' not in default) and ('partner_id' not in default):
default['name'] = _("%s (copy)") % user2copy['name']
if 'login' not in default:
default['login'] = _("%s (copy)") % user2copy['login']
return super(res_users, self).copy(cr, uid, id, default, context)
@tools.ormcache(skiparg=2)
def _context_get(self, cr, uid):
user = self.browse(cr, SUPERUSER_ID, uid)
result = {}
for k in self._fields:
if k.startswith('context_'):
context_key = k[8:]
elif k in ['lang', 'tz']:
context_key = k
else:
context_key = False
if context_key:
res = getattr(user, k) or False
if isinstance(res, models.BaseModel):
res = res.id
result[context_key] = res or False
return result
def context_get(self, cr, uid, context=None):
return self._context_get(cr, uid)
def action_get(self, cr, uid, context=None):
dataobj = self.pool['ir.model.data']
data_id = dataobj._get_id(cr, SUPERUSER_ID, 'base', 'action_res_users_my')
return dataobj.browse(cr, uid, data_id, context=context).res_id
def check_super(self, passwd):
return check_super(passwd)
def check_credentials(self, cr, uid, password):
""" Override this method to plug additional authentication methods"""
res = self.search(cr, SUPERUSER_ID, [('id','=',uid),('password','=',password)])
if not res:
raise openerp.exceptions.AccessDenied()
def _login(self, db, login, password):
if not password:
return False
user_id = False
cr = self.pool.cursor()
try:
# autocommit: our single update request will be performed atomically.
# (In this way, there is no opportunity to have two transactions
# interleaving their cr.execute()..cr.commit() calls and have one
# of them rolled back due to a concurrent access.)
cr.autocommit(True)
# check if user exists
res = self.search(cr, SUPERUSER_ID, [('login','=',login)])
if res:
user_id = res[0]
# check credentials
self.check_credentials(cr, user_id, password)
# We effectively unconditionally write the res_users line.
# Even w/ autocommit there's a chance the user row will be locked,
# in which case we can't delay the login just for the purpose of
# update the last login date - hence we use FOR UPDATE NOWAIT to
# try to get the lock - fail-fast
# Failing to acquire the lock on the res_users row probably means
# another request is holding it. No big deal, we don't want to
# prevent/delay login in that case. It will also have been logged
# as a SQL error, if anyone cares.
try:
# NO KEY introduced in PostgreSQL 9.3 http://www.postgresql.org/docs/9.3/static/release-9-3.html#AEN115299
update_clause = 'NO KEY UPDATE' if cr._cnx.server_version >= 90300 else 'UPDATE'
cr.execute("SELECT id FROM res_users WHERE id=%%s FOR %s NOWAIT" % update_clause, (user_id,), log_exceptions=False)
cr.execute("UPDATE res_users SET login_date = now() AT TIME ZONE 'UTC' WHERE id=%s", (user_id,))
self.invalidate_cache(cr, user_id, ['login_date'], [user_id])
except Exception:
_logger.debug("Failed to update last_login for db:%s login:%s", db, login, exc_info=True)
except openerp.exceptions.AccessDenied:
_logger.info("Login failed for db:%s login:%s", db, login)
user_id = False
finally:
cr.close()
return user_id
def authenticate(self, db, login, password, user_agent_env):
"""Verifies and returns the user ID corresponding to the given
``login`` and ``password`` combination, or False if there was
no matching user.
:param str db: the database on which user is trying to authenticate
:param str login: username
:param str password: user password
:param dict user_agent_env: environment dictionary describing any
relevant environment attributes
"""
uid = self._login(db, login, password)
if uid == openerp.SUPERUSER_ID:
# Successfully logged in as admin!
# Attempt to guess the web base url...
if user_agent_env and user_agent_env.get('base_location'):
cr = self.pool.cursor()
try:
base = user_agent_env['base_location']
ICP = self.pool['ir.config_parameter']
if not ICP.get_param(cr, uid, 'web.base.url.freeze'):
ICP.set_param(cr, uid, 'web.base.url', base)
cr.commit()
except Exception:
_logger.exception("Failed to update web.base.url configuration parameter")
finally:
cr.close()
return uid
def check(self, db, uid, passwd):
"""Verifies that the given (uid, password) is authorized for the database ``db`` and
raise an exception if it is not."""
if not passwd:
# empty passwords disallowed for obvious security reasons
raise openerp.exceptions.AccessDenied()
if self.__uid_cache.setdefault(db, {}).get(uid) == passwd:
return
cr = self.pool.cursor()
try:
self.check_credentials(cr, uid, passwd)
self.__uid_cache[db][uid] = passwd
finally:
cr.close()
def change_password(self, cr, uid, old_passwd, new_passwd, context=None):
"""Change current user password. Old password must be provided explicitly
to prevent hijacking an existing user session, or for cases where the cleartext
password is not used to authenticate requests.
:return: True
:raise: openerp.exceptions.AccessDenied when old password is wrong
:raise: except_osv when new password is not set or empty
"""
self.check(cr.dbname, uid, old_passwd)
if new_passwd:
return self.write(cr, uid, uid, {'password': new_passwd})
raise osv.except_osv(_('Warning!'), _("Setting empty passwords is not allowed for security reasons!"))
def preference_save(self, cr, uid, ids, context=None):
return {
'type': 'ir.actions.client',
'tag': 'reload_context',
}
def preference_change_password(self, cr, uid, ids, context=None):
return {
'type': 'ir.actions.client',
'tag': 'change_password',
'target': 'new',
}
@tools.ormcache(skiparg=2)
def has_group(self, cr, uid, group_ext_id):
"""Checks whether user belongs to given group.
:param str group_ext_id: external ID (XML ID) of the group.
Must be provided in fully-qualified form (``module.ext_id``), as there
is no implicit module to use..
:return: True if the current user is a member of the group with the
given external ID (XML ID), else False.
"""
assert group_ext_id and '.' in group_ext_id, "External ID must be fully qualified"
module, ext_id = group_ext_id.split('.')
cr.execute("""SELECT 1 FROM res_groups_users_rel WHERE uid=%s AND gid IN
(SELECT res_id FROM ir_model_data WHERE module=%s AND name=%s)""",
(uid, module, ext_id))
return bool(cr.fetchone())
#----------------------------------------------------------
# Implied groups
#
# Extension of res.groups and res.users with a relation for "implied"
# or "inherited" groups. Once a user belongs to a group, it
# automatically belongs to the implied groups (transitively).
#----------------------------------------------------------
class cset(object):
""" A cset (constrained set) is a set of elements that may be constrained to
be a subset of other csets. Elements added to a cset are automatically
added to its supersets. Cycles in the subset constraints are supported.
"""
def __init__(self, xs):
self.supersets = set()
self.elements = set(xs)
def subsetof(self, other):
if other is not self:
self.supersets.add(other)
other.update(self.elements)
def update(self, xs):
xs = set(xs) - self.elements
if xs: # xs will eventually be empty in case of a cycle
self.elements.update(xs)
for s in self.supersets:
s.update(xs)
def __iter__(self):
return iter(self.elements)
concat = itertools.chain.from_iterable
class groups_implied(osv.osv):
_inherit = 'res.groups'
def _get_trans_implied(self, cr, uid, ids, field, arg, context=None):
"computes the transitive closure of relation implied_ids"
memo = {} # use a memo for performance and cycle avoidance
def computed_set(g):
if g not in memo:
memo[g] = cset(g.implied_ids)
for h in g.implied_ids:
computed_set(h).subsetof(memo[g])
return memo[g]
res = {}
for g in self.browse(cr, SUPERUSER_ID, ids, context):
res[g.id] = map(int, computed_set(g))
return res
_columns = {
'implied_ids': fields.many2many('res.groups', 'res_groups_implied_rel', 'gid', 'hid',
string='Inherits', help='Users of this group automatically inherit those groups'),
'trans_implied_ids': fields.function(_get_trans_implied,
type='many2many', relation='res.groups', string='Transitively inherits'),
}
def create(self, cr, uid, values, context=None):
users = values.pop('users', None)
gid = super(groups_implied, self).create(cr, uid, values, context)
if users:
# delegate addition of users to add implied groups
self.write(cr, uid, [gid], {'users': users}, context)
return gid
def write(self, cr, uid, ids, values, context=None):
res = super(groups_implied, self).write(cr, uid, ids, values, context)
if values.get('users') or values.get('implied_ids'):
# add all implied groups (to all users of each group)
for g in self.browse(cr, uid, ids, context=context):
gids = map(int, g.trans_implied_ids)
vals = {'users': [(4, u.id) for u in g.users]}
super(groups_implied, self).write(cr, uid, gids, vals, context)
return res
class users_implied(osv.osv):
_inherit = 'res.users'
def create(self, cr, uid, values, context=None):
groups = values.pop('groups_id', None)
user_id = super(users_implied, self).create(cr, uid, values, context)
if groups:
# delegate addition of groups to add implied groups
self.write(cr, uid, [user_id], {'groups_id': groups}, context)
self.pool['ir.ui.view'].clear_cache()
return user_id
def write(self, cr, uid, ids, values, context=None):
if not isinstance(ids,list):
ids = [ids]
res = super(users_implied, self).write(cr, uid, ids, values, context)
if values.get('groups_id'):
# add implied groups for all users
for user in self.browse(cr, uid, ids):
gs = set(concat(g.trans_implied_ids for g in user.groups_id))
vals = {'groups_id': [(4, g.id) for g in gs]}
super(users_implied, self).write(cr, uid, [user.id], vals, context)
self.pool['ir.ui.view'].clear_cache()
return res
#----------------------------------------------------------
# Vitrual checkbox and selection for res.user form view
#
# Extension of res.groups and res.users for the special groups view in the users
# form. This extension presents groups with selection and boolean widgets:
# - Groups are shown by application, with boolean and/or selection fields.
# Selection fields typically defines a role "Name" for the given application.
# - Uncategorized groups are presented as boolean fields and grouped in a
# section "Others".
#
# The user form view is modified by an inherited view (base.user_groups_view);
# the inherited view replaces the field 'groups_id' by a set of reified group
# fields (boolean or selection fields). The arch of that view is regenerated
# each time groups are changed.
#
# Naming conventions for reified groups fields:
# - boolean field 'in_group_ID' is True iff
# ID is in 'groups_id'
# - selection field 'sel_groups_ID1_..._IDk' is ID iff
# ID is in 'groups_id' and ID is maximal in the set {ID1, ..., IDk}
#----------------------------------------------------------
def name_boolean_group(id):
return 'in_group_' + str(id)
def name_selection_groups(ids):
return 'sel_groups_' + '_'.join(map(str, ids))
def is_boolean_group(name):
return name.startswith('in_group_')
def is_selection_groups(name):
return name.startswith('sel_groups_')
def is_reified_group(name):
return is_boolean_group(name) or is_selection_groups(name)
def get_boolean_group(name):
return int(name[9:])
def get_selection_groups(name):
return map(int, name[11:].split('_'))
def partition(f, xs):
"return a pair equivalent to (filter(f, xs), filter(lambda x: not f(x), xs))"
yes, nos = [], []
for x in xs:
(yes if f(x) else nos).append(x)
return yes, nos
def parse_m2m(commands):
"return a list of ids corresponding to a many2many value"
ids = []
for command in commands:
if isinstance(command, (tuple, list)):
if command[0] in (1, 4):
ids.append(command[1])
elif command[0] == 5:
ids = []
elif command[0] == 6:
ids = list(command[2])
else:
ids.append(command)
return ids
class groups_view(osv.osv):
_inherit = 'res.groups'
def create(self, cr, uid, values, context=None):
res = super(groups_view, self).create(cr, uid, values, context)
self.update_user_groups_view(cr, uid, context)
return res
def write(self, cr, uid, ids, values, context=None):
res = super(groups_view, self).write(cr, uid, ids, values, context)
self.update_user_groups_view(cr, uid, context)
return res
def unlink(self, cr, uid, ids, context=None):
res = super(groups_view, self).unlink(cr, uid, ids, context)
self.update_user_groups_view(cr, uid, context)
return res
def update_user_groups_view(self, cr, uid, context=None):
# the view with id 'base.user_groups_view' inherits the user form view,
# and introduces the reified group fields
# we have to try-catch this, because at first init the view does not exist
# but we are already creating some basic groups
if not context or context.get('install_mode'):
# use installation/admin language for translatable names in the view
context = dict(context or {})
context.update(self.pool['res.users'].context_get(cr, uid))
view = self.pool['ir.model.data'].xmlid_to_object(cr, SUPERUSER_ID, 'base.user_groups_view', context=context)
if view and view.exists() and view._name == 'ir.ui.view':
xml1, xml2 = [], []
xml1.append(E.separator(string=_('Application'), colspan="4"))
for app, kind, gs in self.get_groups_by_application(cr, uid, context):
# hide groups in category 'Hidden' (except to group_no_one)
attrs = {'groups': 'base.group_no_one'} if app and app.xml_id == 'base.module_category_hidden' else {}
if kind == 'selection':
# application name with a selection field
field_name = name_selection_groups(map(int, gs))
xml1.append(E.field(name=field_name, **attrs))
xml1.append(E.newline())
else:
# application separator with boolean fields
app_name = app and app.name or _('Other')
xml2.append(E.separator(string=app_name, colspan="4", **attrs))
for g in gs:
field_name = name_boolean_group(g.id)
xml2.append(E.field(name=field_name, **attrs))
xml = E.field(*(xml1 + xml2), name="groups_id", position="replace")
xml.addprevious(etree.Comment("GENERATED AUTOMATICALLY BY GROUPS"))
xml_content = etree.tostring(xml, pretty_print=True, xml_declaration=True, encoding="utf-8")
view.write({'arch': xml_content})
return True
def get_application_groups(self, cr, uid, domain=None, context=None):
return self.search(cr, uid, domain or [])
def get_groups_by_application(self, cr, uid, context=None):
""" return all groups classified by application (module category), as a list of pairs:
[(app, kind, [group, ...]), ...],
where app and group are browse records, and kind is either 'boolean' or 'selection'.
Applications are given in sequence order. If kind is 'selection', the groups are
given in reverse implication order.
"""
def linearized(gs):
gs = set(gs)
# determine sequence order: a group should appear after its implied groups
order = dict.fromkeys(gs, 0)
for g in gs:
for h in gs.intersection(g.trans_implied_ids):
order[h] -= 1
# check whether order is total, i.e., sequence orders are distinct
if len(set(order.itervalues())) == len(gs):
return sorted(gs, key=lambda g: order[g])
return None
# classify all groups by application
gids = self.get_application_groups(cr, uid, context=context)
by_app, others = {}, []
for g in self.browse(cr, uid, gids, context):
if g.category_id:
by_app.setdefault(g.category_id, []).append(g)
else:
others.append(g)
# build the result
res = []
apps = sorted(by_app.iterkeys(), key=lambda a: a.sequence or 0)
for app in apps:
gs = linearized(by_app[app])
if gs:
res.append((app, 'selection', gs))
else:
res.append((app, 'boolean', by_app[app]))
if others:
res.append((False, 'boolean', others))
return res
class users_view(osv.osv):
_inherit = 'res.users'
def create(self, cr, uid, values, context=None):
values = self._remove_reified_groups(values)
return super(users_view, self).create(cr, uid, values, context)
def write(self, cr, uid, ids, values, context=None):
values = self._remove_reified_groups(values)
return super(users_view, self).write(cr, uid, ids, values, context)
def _remove_reified_groups(self, values):
""" return `values` without reified group fields """
add, rem = [], []
values1 = {}
for key, val in values.iteritems():
if is_boolean_group(key):
(add if val else rem).append(get_boolean_group(key))
elif is_selection_groups(key):
rem += get_selection_groups(key)
if val:
add.append(val)
else:
values1[key] = val
if 'groups_id' not in values and (add or rem):
# remove group ids in `rem` and add group ids in `add`
values1['groups_id'] = zip(repeat(3), rem) + zip(repeat(4), add)
return values1
def default_get(self, cr, uid, fields, context=None):
group_fields, fields = partition(is_reified_group, fields)
fields1 = (fields + ['groups_id']) if group_fields else fields
values = super(users_view, self).default_get(cr, uid, fields1, context)
self._add_reified_groups(group_fields, values)
# add "default_groups_ref" inside the context to set default value for group_id with xml values
if 'groups_id' in fields and isinstance(context.get("default_groups_ref"), list):
groups = []
ir_model_data = self.pool.get('ir.model.data')
for group_xml_id in context["default_groups_ref"]:
group_split = group_xml_id.split('.')
if len(group_split) != 2:
raise osv.except_osv(_('Invalid context value'), _('Invalid context default_groups_ref value (model.name_id) : "%s"') % group_xml_id)
try:
temp, group_id = ir_model_data.get_object_reference(cr, uid, group_split[0], group_split[1])
except ValueError:
group_id = False
groups += [group_id]
values['groups_id'] = groups
return values
def read(self, cr, uid, ids, fields=None, context=None, load='_classic_read'):
# determine whether reified groups fields are required, and which ones
fields1 = fields or self.fields_get(cr, uid, context=context).keys()
group_fields, other_fields = partition(is_reified_group, fields1)
# read regular fields (other_fields); add 'groups_id' if necessary
drop_groups_id = False
if group_fields and fields:
if 'groups_id' not in other_fields:
other_fields.append('groups_id')
drop_groups_id = True
else:
other_fields = fields
res = super(users_view, self).read(cr, uid, ids, other_fields, context=context, load=load)
# post-process result to add reified group fields
if group_fields:
for values in (res if isinstance(res, list) else [res]):
self._add_reified_groups(group_fields, values)
if drop_groups_id:
values.pop('groups_id', None)
return res
def _add_reified_groups(self, fields, values):
""" add the given reified group fields into `values` """
gids = set(parse_m2m(values.get('groups_id') or []))
for f in fields:
if is_boolean_group(f):
values[f] = get_boolean_group(f) in gids
elif is_selection_groups(f):
selected = [gid for gid in get_selection_groups(f) if gid in gids]
values[f] = selected and selected[-1] or False
def fields_get(self, cr, uid, allfields=None, context=None, write_access=True, attributes=None):
res = super(users_view, self).fields_get(cr, uid, allfields, context, write_access, attributes)
# add reified groups fields
if uid != SUPERUSER_ID and not self.pool['res.users'].has_group(cr, uid, 'base.group_erp_manager'):
return res
for app, kind, gs in self.pool['res.groups'].get_groups_by_application(cr, uid, context):
if kind == 'selection':
# selection group field
tips = ['%s: %s' % (g.name, g.comment) for g in gs if g.comment]
res[name_selection_groups(map(int, gs))] = {
'type': 'selection',
'string': app and app.name or _('Other'),
'selection': [(False, '')] + [(g.id, g.name) for g in gs],
'help': '\n'.join(tips),
'exportable': False,
'selectable': False,
}
else:
# boolean group fields
for g in gs:
res[name_boolean_group(g.id)] = {
'type': 'boolean',
'string': g.name,
'help': g.comment,
'exportable': False,
'selectable': False,
}
return res
#----------------------------------------------------------
# change password wizard
#----------------------------------------------------------
class change_password_wizard(osv.TransientModel):
"""
A wizard to manage the change of users' passwords
"""
_name = "change.password.wizard"
_description = "Change Password Wizard"
_columns = {
'user_ids': fields.one2many('change.password.user', 'wizard_id', string='Users'),
}
def _default_user_ids(self, cr, uid, context=None):
if context is None:
context = {}
user_model = self.pool['res.users']
user_ids = context.get('active_model') == 'res.users' and context.get('active_ids') or []
return [
(0, 0, {'user_id': user.id, 'user_login': user.login})
for user in user_model.browse(cr, uid, user_ids, context=context)
]
_defaults = {
'user_ids': _default_user_ids,
}
def change_password_button(self, cr, uid, ids, context=None):
wizard = self.browse(cr, uid, ids, context=context)[0]
need_reload = any(uid == user.user_id.id for user in wizard.user_ids)
line_ids = [user.id for user in wizard.user_ids]
self.pool.get('change.password.user').change_password_button(cr, uid, line_ids, context=context)
if need_reload:
return {
'type': 'ir.actions.client',
'tag': 'reload'
}
return {'type': 'ir.actions.act_window_close'}
class change_password_user(osv.TransientModel):
"""
A model to configure users in the change password wizard
"""
_name = 'change.password.user'
_description = 'Change Password Wizard User'
_columns = {
'wizard_id': fields.many2one('change.password.wizard', string='Wizard', required=True),
'user_id': fields.many2one('res.users', string='User', required=True),
'user_login': fields.char('User Login', readonly=True),
'new_passwd': fields.char('New Password'),
}
_defaults = {
'new_passwd': '',
}
def change_password_button(self, cr, uid, ids, context=None):
for line in self.browse(cr, uid, ids, context=context):
line.user_id.write({'password': line.new_passwd})
# don't keep temporary passwords in the database longer than necessary
self.write(cr, uid, ids, {'new_passwd': False}, context=context)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 6,923,649,523,391,700,000 | -6,868,128,039,573,051,000 | 44.928008 | 204 | 0.582423 | false |
zorojean/scikit-learn | sklearn/preprocessing/data.py | 113 | 56747 | # Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Mathieu Blondel <mathieu@mblondel.org>
# Olivier Grisel <olivier.grisel@ensta.org>
# Andreas Mueller <amueller@ais.uni-bonn.de>
# Eric Martin <eric@ericmart.in>
# License: BSD 3 clause
from itertools import chain, combinations
import numbers
import warnings
import numpy as np
from scipy import sparse
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..utils import check_array
from ..utils.extmath import row_norms
from ..utils.fixes import combinations_with_replacement as combinations_w_r
from ..utils.sparsefuncs_fast import (inplace_csr_row_normalize_l1,
inplace_csr_row_normalize_l2)
from ..utils.sparsefuncs import (inplace_column_scale, mean_variance_axis,
min_max_axis, inplace_row_scale)
from ..utils.validation import check_is_fitted, FLOAT_DTYPES
zip = six.moves.zip
map = six.moves.map
range = six.moves.range
__all__ = [
'Binarizer',
'KernelCenterer',
'MinMaxScaler',
'MaxAbsScaler',
'Normalizer',
'OneHotEncoder',
'RobustScaler',
'StandardScaler',
'add_dummy_feature',
'binarize',
'normalize',
'scale',
'robust_scale',
'maxabs_scale',
'minmax_scale',
]
def _mean_and_std(X, axis=0, with_mean=True, with_std=True):
"""Compute mean and std deviation for centering, scaling.
Zero valued std components are reset to 1.0 to avoid NaNs when scaling.
"""
X = np.asarray(X)
Xr = np.rollaxis(X, axis)
if with_mean:
mean_ = Xr.mean(axis=0)
else:
mean_ = None
if with_std:
std_ = Xr.std(axis=0)
std_ = _handle_zeros_in_scale(std_)
else:
std_ = None
return mean_, std_
def _handle_zeros_in_scale(scale):
''' Makes sure that whenever scale is zero, we handle it correctly.
This happens in most scalers when we have constant features.'''
# if we are fitting on 1D arrays, scale might be a scalar
if np.isscalar(scale):
if scale == 0:
scale = 1.
elif isinstance(scale, np.ndarray):
scale[scale == 0.0] = 1.0
scale[~np.isfinite(scale)] = 1.0
return scale
def scale(X, axis=0, with_mean=True, with_std=True, copy=True):
"""Standardize a dataset along any axis
Center to the mean and component wise scale to unit variance.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
X : array-like or CSR matrix.
The data to center and scale.
axis : int (0 by default)
axis used to compute the means and standard deviations along. If 0,
independently standardize each feature, otherwise (if 1) standardize
each sample.
with_mean : boolean, True by default
If True, center the data before scaling.
with_std : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
Notes
-----
This implementation will refuse to center scipy.sparse matrices
since it would make them non-sparse and would potentially crash the
program with memory exhaustion problems.
Instead the caller is expected to either set explicitly
`with_mean=False` (in that case, only variance scaling will be
performed on the features of the CSR matrix) or to call `X.toarray()`
if he/she expects the materialized dense array to fit in memory.
To avoid memory copy the caller should pass a CSR matrix.
See also
--------
:class:`sklearn.preprocessing.StandardScaler` to perform centering and
scaling using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
X = check_array(X, accept_sparse='csr', copy=copy, ensure_2d=False,
warn_on_dtype=True, estimator='the scale function',
dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` instead"
" See docstring for motivation and alternatives.")
if axis != 0:
raise ValueError("Can only scale sparse matrix on axis=0, "
" got axis=%d" % axis)
if not sparse.isspmatrix_csr(X):
X = X.tocsr()
copy = False
if copy:
X = X.copy()
_, var = mean_variance_axis(X, axis=0)
var = _handle_zeros_in_scale(var)
inplace_column_scale(X, 1 / np.sqrt(var))
else:
X = np.asarray(X)
mean_, std_ = _mean_and_std(
X, axis, with_mean=with_mean, with_std=with_std)
if copy:
X = X.copy()
# Xr is a view on the original array that enables easy use of
# broadcasting on the axis in which we are interested in
Xr = np.rollaxis(X, axis)
if with_mean:
Xr -= mean_
mean_1 = Xr.mean(axis=0)
# Verify that mean_1 is 'close to zero'. If X contains very
# large values, mean_1 can also be very large, due to a lack of
# precision of mean_. In this case, a pre-scaling of the
# concerned feature is efficient, for instance by its mean or
# maximum.
if not np.allclose(mean_1, 0):
warnings.warn("Numerical issues were encountered "
"when centering the data "
"and might not be solved. Dataset may "
"contain too large values. You may need "
"to prescale your features.")
Xr -= mean_1
if with_std:
Xr /= std_
if with_mean:
mean_2 = Xr.mean(axis=0)
# If mean_2 is not 'close to zero', it comes from the fact that
# std_ is very small so that mean_2 = mean_1/std_ > 0, even if
# mean_1 was close to zero. The problem is thus essentially due
# to the lack of precision of mean_. A solution is then to
# substract the mean again:
if not np.allclose(mean_2, 0):
warnings.warn("Numerical issues were encountered "
"when scaling the data "
"and might not be solved. The standard "
"deviation of the data is probably "
"very close to 0. ")
Xr -= mean_2
return X
class MinMaxScaler(BaseEstimator, TransformerMixin):
"""Transforms features by scaling each feature to a given range.
This estimator scales and translates each feature individually such
that it is in the given range on the training set, i.e. between
zero and one.
The transformation is given by::
X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0))
X_scaled = X_std * (max - min) + min
where min, max = feature_range.
This transformation is often used as an alternative to zero mean,
unit variance scaling.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
feature_range: tuple (min, max), default=(0, 1)
Desired range of transformed data.
copy : boolean, optional, default True
Set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array).
Attributes
----------
min_ : ndarray, shape (n_features,)
Per feature adjustment for minimum.
scale_ : ndarray, shape (n_features,)
Per feature relative scaling of the data.
"""
def __init__(self, feature_range=(0, 1), copy=True):
self.feature_range = feature_range
self.copy = copy
def fit(self, X, y=None):
"""Compute the minimum and maximum to be used for later scaling.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to compute the per-feature minimum and maximum
used for later scaling along the features axis.
"""
X = check_array(X, copy=self.copy, ensure_2d=False, warn_on_dtype=True,
estimator=self, dtype=FLOAT_DTYPES)
feature_range = self.feature_range
if feature_range[0] >= feature_range[1]:
raise ValueError("Minimum of desired feature range must be smaller"
" than maximum. Got %s." % str(feature_range))
data_min = np.min(X, axis=0)
data_range = np.max(X, axis=0) - data_min
data_range = _handle_zeros_in_scale(data_range)
self.scale_ = (feature_range[1] - feature_range[0]) / data_range
self.min_ = feature_range[0] - data_min * self.scale_
self.data_range = data_range
self.data_min = data_min
return self
def transform(self, X):
"""Scaling features of X according to feature_range.
Parameters
----------
X : array-like with shape [n_samples, n_features]
Input data that will be transformed.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, copy=self.copy, ensure_2d=False)
X *= self.scale_
X += self.min_
return X
def inverse_transform(self, X):
"""Undo the scaling of X according to feature_range.
Parameters
----------
X : array-like with shape [n_samples, n_features]
Input data that will be transformed.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, copy=self.copy, ensure_2d=False)
X -= self.min_
X /= self.scale_
return X
def minmax_scale(X, feature_range=(0, 1), axis=0, copy=True):
"""Transforms features by scaling each feature to a given range.
This estimator scales and translates each feature individually such
that it is in the given range on the training set, i.e. between
zero and one.
The transformation is given by::
X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0))
X_scaled = X_std * (max - min) + min
where min, max = feature_range.
This transformation is often used as an alternative to zero mean,
unit variance scaling.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
feature_range: tuple (min, max), default=(0, 1)
Desired range of transformed data.
axis : int (0 by default)
axis used to scale along. If 0, independently scale each feature,
otherwise (if 1) scale each sample.
copy : boolean, optional, default is True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
"""
s = MinMaxScaler(feature_range=feature_range, copy=copy)
if axis == 0:
return s.fit_transform(X)
else:
return s.fit_transform(X.T).T
class StandardScaler(BaseEstimator, TransformerMixin):
"""Standardize features by removing the mean and scaling to unit variance
Centering and scaling happen independently on each feature by computing
the relevant statistics on the samples in the training set. Mean and
standard deviation are then stored to be used on later data using the
`transform` method.
Standardization of a dataset is a common requirement for many
machine learning estimators: they might behave badly if the
individual feature do not more or less look like standard normally
distributed data (e.g. Gaussian with 0 mean and unit variance).
For instance many elements used in the objective function of
a learning algorithm (such as the RBF kernel of Support Vector
Machines or the L1 and L2 regularizers of linear models) assume that
all features are centered around 0 and have variance in the same
order. If a feature has a variance that is orders of magnitude larger
that others, it might dominate the objective function and make the
estimator unable to learn from other features correctly as expected.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
with_mean : boolean, True by default
If True, center the data before scaling.
This does not work (and will raise an exception) when attempted on
sparse matrices, because centering them entails building a dense
matrix which in common use cases is likely to be too large to fit in
memory.
with_std : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
copy : boolean, optional, default True
If False, try to avoid a copy and do inplace scaling instead.
This is not guaranteed to always work inplace; e.g. if the data is
not a NumPy array or scipy.sparse CSR matrix, a copy may still be
returned.
Attributes
----------
mean_ : array of floats with shape [n_features]
The mean value for each feature in the training set.
std_ : array of floats with shape [n_features]
The standard deviation for each feature in the training set.
Set to one if the standard deviation is zero for a given feature.
See also
--------
:func:`sklearn.preprocessing.scale` to perform centering and
scaling without using the ``Transformer`` object oriented API
:class:`sklearn.decomposition.RandomizedPCA` with `whiten=True`
to further remove the linear correlation across features.
"""
def __init__(self, copy=True, with_mean=True, with_std=True):
self.with_mean = with_mean
self.with_std = with_std
self.copy = copy
def fit(self, X, y=None):
"""Compute the mean and std to be used for later scaling.
Parameters
----------
X : array-like or CSR matrix with shape [n_samples, n_features]
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
"""
X = check_array(X, accept_sparse='csr', copy=self.copy,
ensure_2d=False, warn_on_dtype=True,
estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` "
"instead. See docstring for motivation and alternatives.")
self.mean_ = None
if self.with_std:
var = mean_variance_axis(X, axis=0)[1]
self.std_ = np.sqrt(var)
self.std_ = _handle_zeros_in_scale(self.std_)
else:
self.std_ = None
return self
else:
self.mean_, self.std_ = _mean_and_std(
X, axis=0, with_mean=self.with_mean, with_std=self.with_std)
return self
def transform(self, X, y=None, copy=None):
"""Perform standardization by centering and scaling
Parameters
----------
X : array-like with shape [n_samples, n_features]
The data used to scale along the features axis.
"""
check_is_fitted(self, 'std_')
copy = copy if copy is not None else self.copy
X = check_array(X, accept_sparse='csr', copy=copy,
ensure_2d=False, warn_on_dtype=True,
estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` "
"instead. See docstring for motivation and alternatives.")
if self.std_ is not None:
inplace_column_scale(X, 1 / self.std_)
else:
if self.with_mean:
X -= self.mean_
if self.with_std:
X /= self.std_
return X
def inverse_transform(self, X, copy=None):
"""Scale back the data to the original representation
Parameters
----------
X : array-like with shape [n_samples, n_features]
The data used to scale along the features axis.
"""
check_is_fitted(self, 'std_')
copy = copy if copy is not None else self.copy
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot uncenter sparse matrices: pass `with_mean=False` "
"instead See docstring for motivation and alternatives.")
if not sparse.isspmatrix_csr(X):
X = X.tocsr()
copy = False
if copy:
X = X.copy()
if self.std_ is not None:
inplace_column_scale(X, self.std_)
else:
X = np.asarray(X)
if copy:
X = X.copy()
if self.with_std:
X *= self.std_
if self.with_mean:
X += self.mean_
return X
class MaxAbsScaler(BaseEstimator, TransformerMixin):
"""Scale each feature by its maximum absolute value.
This estimator scales and translates each feature individually such
that the maximal absolute value of each feature in the
training set will be 1.0. It does not shift/center the data, and
thus does not destroy any sparsity.
This scaler can also be applied to sparse CSR or CSC matrices.
Parameters
----------
copy : boolean, optional, default is True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
Attributes
----------
scale_ : ndarray, shape (n_features,)
Per feature relative scaling of the data.
"""
def __init__(self, copy=True):
self.copy = copy
def fit(self, X, y=None):
"""Compute the minimum and maximum to be used for later scaling.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to compute the per-feature minimum and maximum
used for later scaling along the features axis.
"""
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
mins, maxs = min_max_axis(X, axis=0)
scales = np.maximum(np.abs(mins), np.abs(maxs))
else:
scales = np.abs(X).max(axis=0)
scales = np.array(scales)
scales = scales.reshape(-1)
self.scale_ = _handle_zeros_in_scale(scales)
return self
def transform(self, X, y=None):
"""Scale the data
Parameters
----------
X : array-like or CSR matrix.
The data that should be scaled.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if X.shape[0] == 1:
inplace_row_scale(X, 1.0 / self.scale_)
else:
inplace_column_scale(X, 1.0 / self.scale_)
else:
X /= self.scale_
return X
def inverse_transform(self, X):
"""Scale back the data to the original representation
Parameters
----------
X : array-like or CSR matrix.
The data that should be transformed back.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if X.shape[0] == 1:
inplace_row_scale(X, self.scale_)
else:
inplace_column_scale(X, self.scale_)
else:
X *= self.scale_
return X
def maxabs_scale(X, axis=0, copy=True):
"""Scale each feature to the [-1, 1] range without breaking the sparsity.
This estimator scales each feature individually such
that the maximal absolute value of each feature in the
training set will be 1.0.
This scaler can also be applied to sparse CSR or CSC matrices.
Parameters
----------
axis : int (0 by default)
axis used to scale along. If 0, independently scale each feature,
otherwise (if 1) scale each sample.
copy : boolean, optional, default is True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
"""
s = MaxAbsScaler(copy=copy)
if axis == 0:
return s.fit_transform(X)
else:
return s.fit_transform(X.T).T
class RobustScaler(BaseEstimator, TransformerMixin):
"""Scale features using statistics that are robust to outliers.
This Scaler removes the median and scales the data according to
the Interquartile Range (IQR). The IQR is the range between the 1st
quartile (25th quantile) and the 3rd quartile (75th quantile).
Centering and scaling happen independently on each feature (or each
sample, depending on the `axis` argument) by computing the relevant
statistics on the samples in the training set. Median and interquartile
range are then stored to be used on later data using the `transform`
method.
Standardization of a dataset is a common requirement for many
machine learning estimators. Typically this is done by removing the mean
and scaling to unit variance. However, outliers can often influence the
sample mean / variance in a negative way. In such cases, the median and
the interquartile range often give better results.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
with_centering : boolean, True by default
If True, center the data before scaling.
This does not work (and will raise an exception) when attempted on
sparse matrices, because centering them entails building a dense
matrix which in common use cases is likely to be too large to fit in
memory.
with_scaling : boolean, True by default
If True, scale the data to interquartile range.
copy : boolean, optional, default is True
If False, try to avoid a copy and do inplace scaling instead.
This is not guaranteed to always work inplace; e.g. if the data is
not a NumPy array or scipy.sparse CSR matrix, a copy may still be
returned.
Attributes
----------
center_ : array of floats
The median value for each feature in the training set.
scale_ : array of floats
The (scaled) interquartile range for each feature in the training set.
See also
--------
:class:`sklearn.preprocessing.StandardScaler` to perform centering
and scaling using mean and variance.
:class:`sklearn.decomposition.RandomizedPCA` with `whiten=True`
to further remove the linear correlation across features.
Notes
-----
See examples/preprocessing/plot_robust_scaling.py for an example.
http://en.wikipedia.org/wiki/Median_(statistics)
http://en.wikipedia.org/wiki/Interquartile_range
"""
def __init__(self, with_centering=True, with_scaling=True, copy=True):
self.with_centering = with_centering
self.with_scaling = with_scaling
self.copy = copy
def _check_array(self, X, copy):
"""Makes sure centering is not enabled for sparse matrices."""
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if self.with_centering:
raise ValueError(
"Cannot center sparse matrices: use `with_centering=False`"
" instead. See docstring for motivation and alternatives.")
return X
def fit(self, X, y=None):
"""Compute the median and quantiles to be used for scaling.
Parameters
----------
X : array-like with shape [n_samples, n_features]
The data used to compute the median and quantiles
used for later scaling along the features axis.
"""
if sparse.issparse(X):
raise TypeError("RobustScaler cannot be fitted on sparse inputs")
X = self._check_array(X, self.copy)
if self.with_centering:
self.center_ = np.median(X, axis=0)
if self.with_scaling:
q = np.percentile(X, (25, 75), axis=0)
self.scale_ = (q[1] - q[0])
self.scale_ = _handle_zeros_in_scale(self.scale_)
return self
def transform(self, X, y=None):
"""Center and scale the data
Parameters
----------
X : array-like or CSR matrix.
The data used to scale along the specified axis.
"""
if self.with_centering:
check_is_fitted(self, 'center_')
if self.with_scaling:
check_is_fitted(self, 'scale_')
X = self._check_array(X, self.copy)
if sparse.issparse(X):
if self.with_scaling:
if X.shape[0] == 1:
inplace_row_scale(X, 1.0 / self.scale_)
elif self.axis == 0:
inplace_column_scale(X, 1.0 / self.scale_)
else:
if self.with_centering:
X -= self.center_
if self.with_scaling:
X /= self.scale_
return X
def inverse_transform(self, X):
"""Scale back the data to the original representation
Parameters
----------
X : array-like or CSR matrix.
The data used to scale along the specified axis.
"""
if self.with_centering:
check_is_fitted(self, 'center_')
if self.with_scaling:
check_is_fitted(self, 'scale_')
X = self._check_array(X, self.copy)
if sparse.issparse(X):
if self.with_scaling:
if X.shape[0] == 1:
inplace_row_scale(X, self.scale_)
else:
inplace_column_scale(X, self.scale_)
else:
if self.with_scaling:
X *= self.scale_
if self.with_centering:
X += self.center_
return X
def robust_scale(X, axis=0, with_centering=True, with_scaling=True, copy=True):
"""Standardize a dataset along any axis
Center to the median and component wise scale
according to the interquartile range.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
X : array-like.
The data to center and scale.
axis : int (0 by default)
axis used to compute the medians and IQR along. If 0,
independently scale each feature, otherwise (if 1) scale
each sample.
with_centering : boolean, True by default
If True, center the data before scaling.
with_scaling : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
copy : boolean, optional, default is True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
Notes
-----
This implementation will refuse to center scipy.sparse matrices
since it would make them non-sparse and would potentially crash the
program with memory exhaustion problems.
Instead the caller is expected to either set explicitly
`with_centering=False` (in that case, only variance scaling will be
performed on the features of the CSR matrix) or to call `X.toarray()`
if he/she expects the materialized dense array to fit in memory.
To avoid memory copy the caller should pass a CSR matrix.
See also
--------
:class:`sklearn.preprocessing.RobustScaler` to perform centering and
scaling using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
s = RobustScaler(with_centering=with_centering, with_scaling=with_scaling,
copy=copy)
if axis == 0:
return s.fit_transform(X)
else:
return s.fit_transform(X.T).T
class PolynomialFeatures(BaseEstimator, TransformerMixin):
"""Generate polynomial and interaction features.
Generate a new feature matrix consisting of all polynomial combinations
of the features with degree less than or equal to the specified degree.
For example, if an input sample is two dimensional and of the form
[a, b], the degree-2 polynomial features are [1, a, b, a^2, ab, b^2].
Parameters
----------
degree : integer
The degree of the polynomial features. Default = 2.
interaction_only : boolean, default = False
If true, only interaction features are produced: features that are
products of at most ``degree`` *distinct* input features (so not
``x[1] ** 2``, ``x[0] * x[2] ** 3``, etc.).
include_bias : boolean
If True (default), then include a bias column, the feature in which
all polynomial powers are zero (i.e. a column of ones - acts as an
intercept term in a linear model).
Examples
--------
>>> X = np.arange(6).reshape(3, 2)
>>> X
array([[0, 1],
[2, 3],
[4, 5]])
>>> poly = PolynomialFeatures(2)
>>> poly.fit_transform(X)
array([[ 1, 0, 1, 0, 0, 1],
[ 1, 2, 3, 4, 6, 9],
[ 1, 4, 5, 16, 20, 25]])
>>> poly = PolynomialFeatures(interaction_only=True)
>>> poly.fit_transform(X)
array([[ 1, 0, 1, 0],
[ 1, 2, 3, 6],
[ 1, 4, 5, 20]])
Attributes
----------
powers_ : array, shape (n_input_features, n_output_features)
powers_[i, j] is the exponent of the jth input in the ith output.
n_input_features_ : int
The total number of input features.
n_output_features_ : int
The total number of polynomial output features. The number of output
features is computed by iterating over all suitably sized combinations
of input features.
Notes
-----
Be aware that the number of features in the output array scales
polynomially in the number of features of the input array, and
exponentially in the degree. High degrees can cause overfitting.
See :ref:`examples/linear_model/plot_polynomial_interpolation.py
<example_linear_model_plot_polynomial_interpolation.py>`
"""
def __init__(self, degree=2, interaction_only=False, include_bias=True):
self.degree = degree
self.interaction_only = interaction_only
self.include_bias = include_bias
@staticmethod
def _combinations(n_features, degree, interaction_only, include_bias):
comb = (combinations if interaction_only else combinations_w_r)
start = int(not include_bias)
return chain.from_iterable(comb(range(n_features), i)
for i in range(start, degree + 1))
@property
def powers_(self):
check_is_fitted(self, 'n_input_features_')
combinations = self._combinations(self.n_input_features_, self.degree,
self.interaction_only,
self.include_bias)
return np.vstack(np.bincount(c, minlength=self.n_input_features_)
for c in combinations)
def fit(self, X, y=None):
"""
Compute number of output features.
"""
n_samples, n_features = check_array(X).shape
combinations = self._combinations(n_features, self.degree,
self.interaction_only,
self.include_bias)
self.n_input_features_ = n_features
self.n_output_features_ = sum(1 for _ in combinations)
return self
def transform(self, X, y=None):
"""Transform data to polynomial features
Parameters
----------
X : array with shape [n_samples, n_features]
The data to transform, row by row.
Returns
-------
XP : np.ndarray shape [n_samples, NP]
The matrix of features, where NP is the number of polynomial
features generated from the combination of inputs.
"""
check_is_fitted(self, ['n_input_features_', 'n_output_features_'])
X = check_array(X)
n_samples, n_features = X.shape
if n_features != self.n_input_features_:
raise ValueError("X shape does not match training shape")
# allocate output data
XP = np.empty((n_samples, self.n_output_features_), dtype=X.dtype)
combinations = self._combinations(n_features, self.degree,
self.interaction_only,
self.include_bias)
for i, c in enumerate(combinations):
XP[:, i] = X[:, c].prod(1)
return XP
def normalize(X, norm='l2', axis=1, copy=True):
"""Scale input vectors individually to unit norm (vector length).
Read more in the :ref:`User Guide <preprocessing_normalization>`.
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
The data to normalize, element by element.
scipy.sparse matrices should be in CSR format to avoid an
un-necessary copy.
norm : 'l1', 'l2', or 'max', optional ('l2' by default)
The norm to use to normalize each non zero sample (or each non-zero
feature if axis is 0).
axis : 0 or 1, optional (1 by default)
axis used to normalize the data along. If 1, independently normalize
each sample, otherwise (if 0) normalize each feature.
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
See also
--------
:class:`sklearn.preprocessing.Normalizer` to perform normalization
using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
if norm not in ('l1', 'l2', 'max'):
raise ValueError("'%s' is not a supported norm" % norm)
if axis == 0:
sparse_format = 'csc'
elif axis == 1:
sparse_format = 'csr'
else:
raise ValueError("'%d' is not a supported axis" % axis)
X = check_array(X, sparse_format, copy=copy, warn_on_dtype=True,
estimator='the normalize function', dtype=FLOAT_DTYPES)
if axis == 0:
X = X.T
if sparse.issparse(X):
if norm == 'l1':
inplace_csr_row_normalize_l1(X)
elif norm == 'l2':
inplace_csr_row_normalize_l2(X)
elif norm == 'max':
_, norms = min_max_axis(X, 1)
norms = norms.repeat(np.diff(X.indptr))
mask = norms != 0
X.data[mask] /= norms[mask]
else:
if norm == 'l1':
norms = np.abs(X).sum(axis=1)
elif norm == 'l2':
norms = row_norms(X)
elif norm == 'max':
norms = np.max(X, axis=1)
norms = _handle_zeros_in_scale(norms)
X /= norms[:, np.newaxis]
if axis == 0:
X = X.T
return X
class Normalizer(BaseEstimator, TransformerMixin):
"""Normalize samples individually to unit norm.
Each sample (i.e. each row of the data matrix) with at least one
non zero component is rescaled independently of other samples so
that its norm (l1 or l2) equals one.
This transformer is able to work both with dense numpy arrays and
scipy.sparse matrix (use CSR format if you want to avoid the burden of
a copy / conversion).
Scaling inputs to unit norms is a common operation for text
classification or clustering for instance. For instance the dot
product of two l2-normalized TF-IDF vectors is the cosine similarity
of the vectors and is the base similarity metric for the Vector
Space Model commonly used by the Information Retrieval community.
Read more in the :ref:`User Guide <preprocessing_normalization>`.
Parameters
----------
norm : 'l1', 'l2', or 'max', optional ('l2' by default)
The norm to use to normalize each non zero sample.
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix).
Notes
-----
This estimator is stateless (besides constructor parameters), the
fit method does nothing but is useful when used in a pipeline.
See also
--------
:func:`sklearn.preprocessing.normalize` equivalent function
without the object oriented API
"""
def __init__(self, norm='l2', copy=True):
self.norm = norm
self.copy = copy
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
X = check_array(X, accept_sparse='csr')
return self
def transform(self, X, y=None, copy=None):
"""Scale each non zero row of X to unit norm
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
The data to normalize, row by row. scipy.sparse matrices should be
in CSR format to avoid an un-necessary copy.
"""
copy = copy if copy is not None else self.copy
X = check_array(X, accept_sparse='csr')
return normalize(X, norm=self.norm, axis=1, copy=copy)
def binarize(X, threshold=0.0, copy=True):
"""Boolean thresholding of array-like or scipy.sparse matrix
Read more in the :ref:`User Guide <preprocessing_binarization>`.
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
The data to binarize, element by element.
scipy.sparse matrices should be in CSR or CSC format to avoid an
un-necessary copy.
threshold : float, optional (0.0 by default)
Feature values below or equal to this are replaced by 0, above it by 1.
Threshold may not be less than 0 for operations on sparse matrices.
copy : boolean, optional, default True
set to False to perform inplace binarization and avoid a copy
(if the input is already a numpy array or a scipy.sparse CSR / CSC
matrix and if axis is 1).
See also
--------
:class:`sklearn.preprocessing.Binarizer` to perform binarization
using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
X = check_array(X, accept_sparse=['csr', 'csc'], copy=copy)
if sparse.issparse(X):
if threshold < 0:
raise ValueError('Cannot binarize a sparse matrix with threshold '
'< 0')
cond = X.data > threshold
not_cond = np.logical_not(cond)
X.data[cond] = 1
X.data[not_cond] = 0
X.eliminate_zeros()
else:
cond = X > threshold
not_cond = np.logical_not(cond)
X[cond] = 1
X[not_cond] = 0
return X
class Binarizer(BaseEstimator, TransformerMixin):
"""Binarize data (set feature values to 0 or 1) according to a threshold
Values greater than the threshold map to 1, while values less than
or equal to the threshold map to 0. With the default threshold of 0,
only positive values map to 1.
Binarization is a common operation on text count data where the
analyst can decide to only consider the presence or absence of a
feature rather than a quantified number of occurrences for instance.
It can also be used as a pre-processing step for estimators that
consider boolean random variables (e.g. modelled using the Bernoulli
distribution in a Bayesian setting).
Read more in the :ref:`User Guide <preprocessing_binarization>`.
Parameters
----------
threshold : float, optional (0.0 by default)
Feature values below or equal to this are replaced by 0, above it by 1.
Threshold may not be less than 0 for operations on sparse matrices.
copy : boolean, optional, default True
set to False to perform inplace binarization and avoid a copy (if
the input is already a numpy array or a scipy.sparse CSR matrix).
Notes
-----
If the input is a sparse matrix, only the non-zero values are subject
to update by the Binarizer class.
This estimator is stateless (besides constructor parameters), the
fit method does nothing but is useful when used in a pipeline.
"""
def __init__(self, threshold=0.0, copy=True):
self.threshold = threshold
self.copy = copy
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
check_array(X, accept_sparse='csr')
return self
def transform(self, X, y=None, copy=None):
"""Binarize each element of X
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
The data to binarize, element by element.
scipy.sparse matrices should be in CSR format to avoid an
un-necessary copy.
"""
copy = copy if copy is not None else self.copy
return binarize(X, threshold=self.threshold, copy=copy)
class KernelCenterer(BaseEstimator, TransformerMixin):
"""Center a kernel matrix
Let K(x, z) be a kernel defined by phi(x)^T phi(z), where phi is a
function mapping x to a Hilbert space. KernelCenterer centers (i.e.,
normalize to have zero mean) the data without explicitly computing phi(x).
It is equivalent to centering phi(x) with
sklearn.preprocessing.StandardScaler(with_std=False).
Read more in the :ref:`User Guide <kernel_centering>`.
"""
def fit(self, K, y=None):
"""Fit KernelCenterer
Parameters
----------
K : numpy array of shape [n_samples, n_samples]
Kernel matrix.
Returns
-------
self : returns an instance of self.
"""
K = check_array(K)
n_samples = K.shape[0]
self.K_fit_rows_ = np.sum(K, axis=0) / n_samples
self.K_fit_all_ = self.K_fit_rows_.sum() / n_samples
return self
def transform(self, K, y=None, copy=True):
"""Center kernel matrix.
Parameters
----------
K : numpy array of shape [n_samples1, n_samples2]
Kernel matrix.
copy : boolean, optional, default True
Set to False to perform inplace computation.
Returns
-------
K_new : numpy array of shape [n_samples1, n_samples2]
"""
check_is_fitted(self, 'K_fit_all_')
K = check_array(K)
if copy:
K = K.copy()
K_pred_cols = (np.sum(K, axis=1) /
self.K_fit_rows_.shape[0])[:, np.newaxis]
K -= self.K_fit_rows_
K -= K_pred_cols
K += self.K_fit_all_
return K
def add_dummy_feature(X, value=1.0):
"""Augment dataset with an additional dummy feature.
This is useful for fitting an intercept term with implementations which
cannot otherwise fit it directly.
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
Data.
value : float
Value to use for the dummy feature.
Returns
-------
X : array or scipy.sparse matrix with shape [n_samples, n_features + 1]
Same data with dummy feature added as first column.
Examples
--------
>>> from sklearn.preprocessing import add_dummy_feature
>>> add_dummy_feature([[0, 1], [1, 0]])
array([[ 1., 0., 1.],
[ 1., 1., 0.]])
"""
X = check_array(X, accept_sparse=['csc', 'csr', 'coo'])
n_samples, n_features = X.shape
shape = (n_samples, n_features + 1)
if sparse.issparse(X):
if sparse.isspmatrix_coo(X):
# Shift columns to the right.
col = X.col + 1
# Column indices of dummy feature are 0 everywhere.
col = np.concatenate((np.zeros(n_samples), col))
# Row indices of dummy feature are 0, ..., n_samples-1.
row = np.concatenate((np.arange(n_samples), X.row))
# Prepend the dummy feature n_samples times.
data = np.concatenate((np.ones(n_samples) * value, X.data))
return sparse.coo_matrix((data, (row, col)), shape)
elif sparse.isspmatrix_csc(X):
# Shift index pointers since we need to add n_samples elements.
indptr = X.indptr + n_samples
# indptr[0] must be 0.
indptr = np.concatenate((np.array([0]), indptr))
# Row indices of dummy feature are 0, ..., n_samples-1.
indices = np.concatenate((np.arange(n_samples), X.indices))
# Prepend the dummy feature n_samples times.
data = np.concatenate((np.ones(n_samples) * value, X.data))
return sparse.csc_matrix((data, indices, indptr), shape)
else:
klass = X.__class__
return klass(add_dummy_feature(X.tocoo(), value))
else:
return np.hstack((np.ones((n_samples, 1)) * value, X))
def _transform_selected(X, transform, selected="all", copy=True):
"""Apply a transform function to portion of selected features
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Dense array or sparse matrix.
transform : callable
A callable transform(X) -> X_transformed
copy : boolean, optional
Copy X even if it could be avoided.
selected: "all" or array of indices or mask
Specify which features to apply the transform to.
Returns
-------
X : array or sparse matrix, shape=(n_samples, n_features_new)
"""
if selected == "all":
return transform(X)
X = check_array(X, accept_sparse='csc', copy=copy)
if len(selected) == 0:
return X
n_features = X.shape[1]
ind = np.arange(n_features)
sel = np.zeros(n_features, dtype=bool)
sel[np.asarray(selected)] = True
not_sel = np.logical_not(sel)
n_selected = np.sum(sel)
if n_selected == 0:
# No features selected.
return X
elif n_selected == n_features:
# All features selected.
return transform(X)
else:
X_sel = transform(X[:, ind[sel]])
X_not_sel = X[:, ind[not_sel]]
if sparse.issparse(X_sel) or sparse.issparse(X_not_sel):
return sparse.hstack((X_sel, X_not_sel))
else:
return np.hstack((X_sel, X_not_sel))
class OneHotEncoder(BaseEstimator, TransformerMixin):
"""Encode categorical integer features using a one-hot aka one-of-K scheme.
The input to this transformer should be a matrix of integers, denoting
the values taken on by categorical (discrete) features. The output will be
a sparse matrix where each column corresponds to one possible value of one
feature. It is assumed that input features take on values in the range
[0, n_values).
This encoding is needed for feeding categorical data to many scikit-learn
estimators, notably linear models and SVMs with the standard kernels.
Read more in the :ref:`User Guide <preprocessing_categorical_features>`.
Parameters
----------
n_values : 'auto', int or array of ints
Number of values per feature.
- 'auto' : determine value range from training data.
- int : maximum value for all features.
- array : maximum value per feature.
categorical_features: "all" or array of indices or mask
Specify what features are treated as categorical.
- 'all' (default): All features are treated as categorical.
- array of indices: Array of categorical feature indices.
- mask: Array of length n_features and with dtype=bool.
Non-categorical features are always stacked to the right of the matrix.
dtype : number type, default=np.float
Desired dtype of output.
sparse : boolean, default=True
Will return sparse matrix if set True else will return an array.
handle_unknown : str, 'error' or 'ignore'
Whether to raise an error or ignore if a unknown categorical feature is
present during transform.
Attributes
----------
active_features_ : array
Indices for active features, meaning values that actually occur
in the training set. Only available when n_values is ``'auto'``.
feature_indices_ : array of shape (n_features,)
Indices to feature ranges.
Feature ``i`` in the original data is mapped to features
from ``feature_indices_[i]`` to ``feature_indices_[i+1]``
(and then potentially masked by `active_features_` afterwards)
n_values_ : array of shape (n_features,)
Maximum number of values per feature.
Examples
--------
Given a dataset with three features and two samples, we let the encoder
find the maximum value per feature and transform the data to a binary
one-hot encoding.
>>> from sklearn.preprocessing import OneHotEncoder
>>> enc = OneHotEncoder()
>>> enc.fit([[0, 0, 3], [1, 1, 0], [0, 2, 1], \
[1, 0, 2]]) # doctest: +ELLIPSIS
OneHotEncoder(categorical_features='all', dtype=<... 'float'>,
handle_unknown='error', n_values='auto', sparse=True)
>>> enc.n_values_
array([2, 3, 4])
>>> enc.feature_indices_
array([0, 2, 5, 9])
>>> enc.transform([[0, 1, 1]]).toarray()
array([[ 1., 0., 0., 1., 0., 0., 1., 0., 0.]])
See also
--------
sklearn.feature_extraction.DictVectorizer : performs a one-hot encoding of
dictionary items (also handles string-valued features).
sklearn.feature_extraction.FeatureHasher : performs an approximate one-hot
encoding of dictionary items or strings.
"""
def __init__(self, n_values="auto", categorical_features="all",
dtype=np.float, sparse=True, handle_unknown='error'):
self.n_values = n_values
self.categorical_features = categorical_features
self.dtype = dtype
self.sparse = sparse
self.handle_unknown = handle_unknown
def fit(self, X, y=None):
"""Fit OneHotEncoder to X.
Parameters
----------
X : array-like, shape=(n_samples, n_feature)
Input array of type int.
Returns
-------
self
"""
self.fit_transform(X)
return self
def _fit_transform(self, X):
"""Assumes X contains only categorical features."""
X = check_array(X, dtype=np.int)
if np.any(X < 0):
raise ValueError("X needs to contain only non-negative integers.")
n_samples, n_features = X.shape
if self.n_values == 'auto':
n_values = np.max(X, axis=0) + 1
elif isinstance(self.n_values, numbers.Integral):
if (np.max(X, axis=0) >= self.n_values).any():
raise ValueError("Feature out of bounds for n_values=%d"
% self.n_values)
n_values = np.empty(n_features, dtype=np.int)
n_values.fill(self.n_values)
else:
try:
n_values = np.asarray(self.n_values, dtype=int)
except (ValueError, TypeError):
raise TypeError("Wrong type for parameter `n_values`. Expected"
" 'auto', int or array of ints, got %r"
% type(X))
if n_values.ndim < 1 or n_values.shape[0] != X.shape[1]:
raise ValueError("Shape mismatch: if n_values is an array,"
" it has to be of shape (n_features,).")
self.n_values_ = n_values
n_values = np.hstack([[0], n_values])
indices = np.cumsum(n_values)
self.feature_indices_ = indices
column_indices = (X + indices[:-1]).ravel()
row_indices = np.repeat(np.arange(n_samples, dtype=np.int32),
n_features)
data = np.ones(n_samples * n_features)
out = sparse.coo_matrix((data, (row_indices, column_indices)),
shape=(n_samples, indices[-1]),
dtype=self.dtype).tocsr()
if self.n_values == 'auto':
mask = np.array(out.sum(axis=0)).ravel() != 0
active_features = np.where(mask)[0]
out = out[:, active_features]
self.active_features_ = active_features
return out if self.sparse else out.toarray()
def fit_transform(self, X, y=None):
"""Fit OneHotEncoder to X, then transform X.
Equivalent to self.fit(X).transform(X), but more convenient and more
efficient. See fit for the parameters, transform for the return value.
"""
return _transform_selected(X, self._fit_transform,
self.categorical_features, copy=True)
def _transform(self, X):
"""Assumes X contains only categorical features."""
X = check_array(X, dtype=np.int)
if np.any(X < 0):
raise ValueError("X needs to contain only non-negative integers.")
n_samples, n_features = X.shape
indices = self.feature_indices_
if n_features != indices.shape[0] - 1:
raise ValueError("X has different shape than during fitting."
" Expected %d, got %d."
% (indices.shape[0] - 1, n_features))
# We use only those catgorical features of X that are known using fit.
# i.e lesser than n_values_ using mask.
# This means, if self.handle_unknown is "ignore", the row_indices and
# col_indices corresponding to the unknown categorical feature are
# ignored.
mask = (X < self.n_values_).ravel()
if np.any(~mask):
if self.handle_unknown not in ['error', 'ignore']:
raise ValueError("handle_unknown should be either error or "
"unknown got %s" % self.handle_unknown)
if self.handle_unknown == 'error':
raise ValueError("unknown categorical feature present %s "
"during transform." % X[~mask])
column_indices = (X + indices[:-1]).ravel()[mask]
row_indices = np.repeat(np.arange(n_samples, dtype=np.int32),
n_features)[mask]
data = np.ones(np.sum(mask))
out = sparse.coo_matrix((data, (row_indices, column_indices)),
shape=(n_samples, indices[-1]),
dtype=self.dtype).tocsr()
if self.n_values == 'auto':
out = out[:, self.active_features_]
return out if self.sparse else out.toarray()
def transform(self, X):
"""Transform X using one-hot encoding.
Parameters
----------
X : array-like, shape=(n_samples, n_features)
Input array of type int.
Returns
-------
X_out : sparse matrix if sparse=True else a 2-d array, dtype=int
Transformed input.
"""
return _transform_selected(X, self._transform,
self.categorical_features, copy=True)
| bsd-3-clause | 7,715,055,491,758,691,000 | 7,966,574,425,173,805,000 | 34.961343 | 79 | 0.596278 | false |
chugle/myapp | applications/examples/models/markmin.py | 13 | 1087 | import gluon.template
markmin_dict = dict(
code_python=lambda code: str(CODE(code)),
template=lambda
code: gluon.template.render(code, context=globals()),
sup=lambda
code: '<sup style="font-size:0.5em;">%s</sup>' % code,
br=lambda n: '<br>' * int(n),
groupdates=lambda group: group_feed_reader(group),
)
def get_content(b=None,
c=request.controller,
f=request.function,
l='en',
format='markmin'):
"""Gets and renders the file in
<app>/private/content/<lang>/<controller>/<function>/<block>.<format>
"""
def openfile():
import os
path = os.path.join(
request.folder, 'private', 'content', l, c, f, b + '.' + format)
return open(path)
try:
openedfile = openfile()
except Exception, IOError:
l = 'en'
openedfile = openfile()
if format == 'markmin':
html = MARKMIN(str(T(openedfile.read())), markmin_dict)
else:
html = str(T(openedfile.read()))
openedfile.close()
return html
| gpl-2.0 | -5,680,040,558,546,312,000 | -3,486,229,028,740,129,000 | 25.512195 | 76 | 0.563017 | false |
Pluto-tv/chromium-crosswalk | tools/telemetry/third_party/gsutilz/third_party/boto/boto/ec2/autoscale/launchconfig.py | 135 | 10807 | # Copyright (c) 2009 Reza Lotun http://reza.lotun.name/
# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from boto.ec2.elb.listelement import ListElement
# Namespacing issue with deprecated local class
from boto.ec2.blockdevicemapping import BlockDeviceMapping as BDM
from boto.resultset import ResultSet
import boto.utils
import base64
# this should use the corresponding object from boto.ec2
# Currently in use by deprecated local BlockDeviceMapping class
class Ebs(object):
def __init__(self, connection=None, snapshot_id=None, volume_size=None):
self.connection = connection
self.snapshot_id = snapshot_id
self.volume_size = volume_size
def __repr__(self):
return 'Ebs(%s, %s)' % (self.snapshot_id, self.volume_size)
def startElement(self, name, attrs, connection):
pass
def endElement(self, name, value, connection):
if name == 'SnapshotId':
self.snapshot_id = value
elif name == 'VolumeSize':
self.volume_size = value
class InstanceMonitoring(object):
def __init__(self, connection=None, enabled='false'):
self.connection = connection
self.enabled = enabled
def __repr__(self):
return 'InstanceMonitoring(%s)' % self.enabled
def startElement(self, name, attrs, connection):
pass
def endElement(self, name, value, connection):
if name == 'Enabled':
self.enabled = value
# this should use the BlockDeviceMapping from boto.ec2.blockdevicemapping
# Currently in use by deprecated code for backwards compatability
# Removing this class can also remove the Ebs class in this same file
class BlockDeviceMapping(object):
def __init__(self, connection=None, device_name=None, virtual_name=None,
ebs=None, no_device=None):
self.connection = connection
self.device_name = device_name
self.virtual_name = virtual_name
self.ebs = ebs
self.no_device = no_device
def __repr__(self):
return 'BlockDeviceMapping(%s, %s)' % (self.device_name,
self.virtual_name)
def startElement(self, name, attrs, connection):
if name == 'Ebs':
self.ebs = Ebs(self)
return self.ebs
def endElement(self, name, value, connection):
if name == 'DeviceName':
self.device_name = value
elif name == 'VirtualName':
self.virtual_name = value
elif name == 'NoDevice':
self.no_device = bool(value)
class LaunchConfiguration(object):
def __init__(self, connection=None, name=None, image_id=None,
key_name=None, security_groups=None, user_data=None,
instance_type='m1.small', kernel_id=None,
ramdisk_id=None, block_device_mappings=None,
instance_monitoring=False, spot_price=None,
instance_profile_name=None, ebs_optimized=False,
associate_public_ip_address=None, volume_type=None,
delete_on_termination=True, iops=None,
use_block_device_types=False, classic_link_vpc_id=None,
classic_link_vpc_security_groups=None):
"""
A launch configuration.
:type name: str
:param name: Name of the launch configuration to create.
:type image_id: str
:param image_id: Unique ID of the Amazon Machine Image (AMI) which was
assigned during registration.
:type key_name: str
:param key_name: The name of the EC2 key pair.
:type security_groups: list
:param security_groups: Names or security group id's of the security
groups with which to associate the EC2 instances or VPC instances,
respectively.
:type user_data: str
:param user_data: The user data available to launched EC2 instances.
:type instance_type: str
:param instance_type: The instance type
:type kernel_id: str
:param kernel_id: Kernel id for instance
:type ramdisk_id: str
:param ramdisk_id: RAM disk id for instance
:type block_device_mappings: list
:param block_device_mappings: Specifies how block devices are exposed
for instances
:type instance_monitoring: bool
:param instance_monitoring: Whether instances in group are launched
with detailed monitoring.
:type spot_price: float
:param spot_price: The spot price you are bidding. Only applies
if you are building an autoscaling group with spot instances.
:type instance_profile_name: string
:param instance_profile_name: The name or the Amazon Resource
Name (ARN) of the instance profile associated with the IAM
role for the instance.
:type ebs_optimized: bool
:param ebs_optimized: Specifies whether the instance is optimized
for EBS I/O (true) or not (false).
:type associate_public_ip_address: bool
:param associate_public_ip_address: Used for Auto Scaling groups that launch instances into an Amazon Virtual Private Cloud.
Specifies whether to assign a public IP address to each instance launched in a Amazon VPC.
:type classic_link_vpc_id: str
:param classic_link_vpc_id: ID of ClassicLink enabled VPC.
:type classic_link_vpc_security_groups: list
:param classic_link_vpc_security_groups: Security group
id's of the security groups with which to associate the
ClassicLink VPC instances.
"""
self.connection = connection
self.name = name
self.instance_type = instance_type
self.block_device_mappings = block_device_mappings
self.key_name = key_name
sec_groups = security_groups or []
self.security_groups = ListElement(sec_groups)
self.image_id = image_id
self.ramdisk_id = ramdisk_id
self.created_time = None
self.kernel_id = kernel_id
self.user_data = user_data
self.created_time = None
self.instance_monitoring = instance_monitoring
self.spot_price = spot_price
self.instance_profile_name = instance_profile_name
self.launch_configuration_arn = None
self.ebs_optimized = ebs_optimized
self.associate_public_ip_address = associate_public_ip_address
self.volume_type = volume_type
self.delete_on_termination = delete_on_termination
self.iops = iops
self.use_block_device_types = use_block_device_types
self.classic_link_vpc_id = classic_link_vpc_id
classic_link_vpc_sec_groups = classic_link_vpc_security_groups or []
self.classic_link_vpc_security_groups = \
ListElement(classic_link_vpc_sec_groups)
if connection is not None:
self.use_block_device_types = connection.use_block_device_types
def __repr__(self):
return 'LaunchConfiguration:%s' % self.name
def startElement(self, name, attrs, connection):
if name == 'SecurityGroups':
return self.security_groups
elif name == 'ClassicLinkVPCSecurityGroups':
return self.classic_link_vpc_security_groups
elif name == 'BlockDeviceMappings':
if self.use_block_device_types:
self.block_device_mappings = BDM()
else:
self.block_device_mappings = ResultSet([('member', BlockDeviceMapping)])
return self.block_device_mappings
elif name == 'InstanceMonitoring':
self.instance_monitoring = InstanceMonitoring(self)
return self.instance_monitoring
def endElement(self, name, value, connection):
if name == 'InstanceType':
self.instance_type = value
elif name == 'LaunchConfigurationName':
self.name = value
elif name == 'KeyName':
self.key_name = value
elif name == 'ImageId':
self.image_id = value
elif name == 'CreatedTime':
self.created_time = boto.utils.parse_ts(value)
elif name == 'KernelId':
self.kernel_id = value
elif name == 'RamdiskId':
self.ramdisk_id = value
elif name == 'UserData':
try:
self.user_data = base64.b64decode(value)
except TypeError:
self.user_data = value
elif name == 'LaunchConfigurationARN':
self.launch_configuration_arn = value
elif name == 'InstanceMonitoring':
self.instance_monitoring = value
elif name == 'SpotPrice':
self.spot_price = float(value)
elif name == 'IamInstanceProfile':
self.instance_profile_name = value
elif name == 'EbsOptimized':
self.ebs_optimized = True if value.lower() == 'true' else False
elif name == 'AssociatePublicIpAddress':
self.associate_public_ip_address = True if value.lower() == 'true' else False
elif name == 'VolumeType':
self.volume_type = value
elif name == 'DeleteOnTermination':
if value.lower() == 'true':
self.delete_on_termination = True
else:
self.delete_on_termination = False
elif name == 'Iops':
self.iops = int(value)
elif name == 'ClassicLinkVPCId':
self.classic_link_vpc_id = value
else:
setattr(self, name, value)
def delete(self):
""" Delete this launch configuration. """
return self.connection.delete_launch_configuration(self.name)
| bsd-3-clause | 3,725,252,507,359,023,000 | 6,724,451,411,679,085,000 | 39.025926 | 132 | 0.635236 | false |
wasiqmukhtar/tcp-eval.wasiq | src/antenna/bindings/modulegen__gcc_ILP32.py | 68 | 90458 | from pybindgen import Module, FileCodeSink, param, retval, cppclass, typehandlers
import pybindgen.settings
import warnings
class ErrorHandler(pybindgen.settings.ErrorHandler):
def handle_error(self, wrapper, exception, traceback_):
warnings.warn("exception %r in wrapper %s" % (exception, wrapper))
return True
pybindgen.settings.error_handler = ErrorHandler()
import sys
def module_init():
root_module = Module('ns.antenna', cpp_namespace='::ns3')
return root_module
def register_types(module):
root_module = module.get_root()
## angles.h (module 'antenna'): ns3::Angles [struct]
module.add_class('Angles')
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList [class]
module.add_class('AttributeConstructionList', import_from_module='ns.core')
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item [struct]
module.add_class('Item', import_from_module='ns.core', outer_class=root_module['ns3::AttributeConstructionList'])
## callback.h (module 'core'): ns3::CallbackBase [class]
module.add_class('CallbackBase', import_from_module='ns.core')
## hash.h (module 'core'): ns3::Hasher [class]
module.add_class('Hasher', import_from_module='ns.core')
## object-base.h (module 'core'): ns3::ObjectBase [class]
module.add_class('ObjectBase', allow_subclassing=True, import_from_module='ns.core')
## object.h (module 'core'): ns3::ObjectDeleter [struct]
module.add_class('ObjectDeleter', import_from_module='ns.core')
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter> [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Object', 'ns3::ObjectBase', 'ns3::ObjectDeleter'], parent=root_module['ns3::ObjectBase'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## type-id.h (module 'core'): ns3::TypeId [class]
module.add_class('TypeId', import_from_module='ns.core')
## type-id.h (module 'core'): ns3::TypeId::AttributeFlag [enumeration]
module.add_enum('AttributeFlag', ['ATTR_GET', 'ATTR_SET', 'ATTR_CONSTRUCT', 'ATTR_SGC'], outer_class=root_module['ns3::TypeId'], import_from_module='ns.core')
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation [struct]
module.add_class('AttributeInformation', import_from_module='ns.core', outer_class=root_module['ns3::TypeId'])
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation [struct]
module.add_class('TraceSourceInformation', import_from_module='ns.core', outer_class=root_module['ns3::TypeId'])
## vector.h (module 'core'): ns3::Vector2D [class]
module.add_class('Vector2D', import_from_module='ns.core')
## vector.h (module 'core'): ns3::Vector3D [class]
module.add_class('Vector3D', import_from_module='ns.core')
## empty.h (module 'core'): ns3::empty [class]
module.add_class('empty', import_from_module='ns.core')
## object.h (module 'core'): ns3::Object [class]
module.add_class('Object', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter >'])
## object.h (module 'core'): ns3::Object::AggregateIterator [class]
module.add_class('AggregateIterator', import_from_module='ns.core', outer_class=root_module['ns3::Object'])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeAccessor', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeAccessor>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeChecker', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeChecker>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeValue', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeValue>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::CallbackImplBase', 'ns3::empty', 'ns3::DefaultDeleter<ns3::CallbackImplBase>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Hash::Implementation', 'ns3::empty', 'ns3::DefaultDeleter<ns3::Hash::Implementation>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::TraceSourceAccessor', 'ns3::empty', 'ns3::DefaultDeleter<ns3::TraceSourceAccessor>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor [class]
module.add_class('TraceSourceAccessor', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >'])
## antenna-model.h (module 'antenna'): ns3::AntennaModel [class]
module.add_class('AntennaModel', parent=root_module['ns3::Object'])
## attribute.h (module 'core'): ns3::AttributeAccessor [class]
module.add_class('AttributeAccessor', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >'])
## attribute.h (module 'core'): ns3::AttributeChecker [class]
module.add_class('AttributeChecker', allow_subclassing=False, automatic_type_narrowing=True, import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >'])
## attribute.h (module 'core'): ns3::AttributeValue [class]
module.add_class('AttributeValue', allow_subclassing=False, automatic_type_narrowing=True, import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >'])
## callback.h (module 'core'): ns3::CallbackChecker [class]
module.add_class('CallbackChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## callback.h (module 'core'): ns3::CallbackImplBase [class]
module.add_class('CallbackImplBase', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >'])
## callback.h (module 'core'): ns3::CallbackValue [class]
module.add_class('CallbackValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## cosine-antenna-model.h (module 'antenna'): ns3::CosineAntennaModel [class]
module.add_class('CosineAntennaModel', parent=root_module['ns3::AntennaModel'])
## attribute.h (module 'core'): ns3::EmptyAttributeValue [class]
module.add_class('EmptyAttributeValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## isotropic-antenna-model.h (module 'antenna'): ns3::IsotropicAntennaModel [class]
module.add_class('IsotropicAntennaModel', parent=root_module['ns3::AntennaModel'])
## parabolic-antenna-model.h (module 'antenna'): ns3::ParabolicAntennaModel [class]
module.add_class('ParabolicAntennaModel', parent=root_module['ns3::AntennaModel'])
## type-id.h (module 'core'): ns3::TypeIdChecker [class]
module.add_class('TypeIdChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## type-id.h (module 'core'): ns3::TypeIdValue [class]
module.add_class('TypeIdValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## vector.h (module 'core'): ns3::Vector2DChecker [class]
module.add_class('Vector2DChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## vector.h (module 'core'): ns3::Vector2DValue [class]
module.add_class('Vector2DValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## vector.h (module 'core'): ns3::Vector3DChecker [class]
module.add_class('Vector3DChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## vector.h (module 'core'): ns3::Vector3DValue [class]
module.add_class('Vector3DValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
typehandlers.add_type_alias(u'ns3::Vector3D', u'ns3::Vector')
typehandlers.add_type_alias(u'ns3::Vector3D*', u'ns3::Vector*')
typehandlers.add_type_alias(u'ns3::Vector3D&', u'ns3::Vector&')
module.add_typedef(root_module['ns3::Vector3D'], 'Vector')
typehandlers.add_type_alias(u'ns3::Vector3DValue', u'ns3::VectorValue')
typehandlers.add_type_alias(u'ns3::Vector3DValue*', u'ns3::VectorValue*')
typehandlers.add_type_alias(u'ns3::Vector3DValue&', u'ns3::VectorValue&')
module.add_typedef(root_module['ns3::Vector3DValue'], 'VectorValue')
typehandlers.add_type_alias(u'ns3::Vector3DChecker', u'ns3::VectorChecker')
typehandlers.add_type_alias(u'ns3::Vector3DChecker*', u'ns3::VectorChecker*')
typehandlers.add_type_alias(u'ns3::Vector3DChecker&', u'ns3::VectorChecker&')
module.add_typedef(root_module['ns3::Vector3DChecker'], 'VectorChecker')
## Register a nested module for the namespace FatalImpl
nested_module = module.add_cpp_namespace('FatalImpl')
register_types_ns3_FatalImpl(nested_module)
## Register a nested module for the namespace Hash
nested_module = module.add_cpp_namespace('Hash')
register_types_ns3_Hash(nested_module)
def register_types_ns3_FatalImpl(module):
root_module = module.get_root()
def register_types_ns3_Hash(module):
root_module = module.get_root()
## hash-function.h (module 'core'): ns3::Hash::Implementation [class]
module.add_class('Implementation', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >'])
typehandlers.add_type_alias(u'uint32_t ( * ) ( char const *, size_t ) *', u'ns3::Hash::Hash32Function_ptr')
typehandlers.add_type_alias(u'uint32_t ( * ) ( char const *, size_t ) **', u'ns3::Hash::Hash32Function_ptr*')
typehandlers.add_type_alias(u'uint32_t ( * ) ( char const *, size_t ) *&', u'ns3::Hash::Hash32Function_ptr&')
typehandlers.add_type_alias(u'uint64_t ( * ) ( char const *, size_t ) *', u'ns3::Hash::Hash64Function_ptr')
typehandlers.add_type_alias(u'uint64_t ( * ) ( char const *, size_t ) **', u'ns3::Hash::Hash64Function_ptr*')
typehandlers.add_type_alias(u'uint64_t ( * ) ( char const *, size_t ) *&', u'ns3::Hash::Hash64Function_ptr&')
## Register a nested module for the namespace Function
nested_module = module.add_cpp_namespace('Function')
register_types_ns3_Hash_Function(nested_module)
def register_types_ns3_Hash_Function(module):
root_module = module.get_root()
## hash-fnv.h (module 'core'): ns3::Hash::Function::Fnv1a [class]
module.add_class('Fnv1a', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation'])
## hash-function.h (module 'core'): ns3::Hash::Function::Hash32 [class]
module.add_class('Hash32', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation'])
## hash-function.h (module 'core'): ns3::Hash::Function::Hash64 [class]
module.add_class('Hash64', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation'])
## hash-murmur3.h (module 'core'): ns3::Hash::Function::Murmur3 [class]
module.add_class('Murmur3', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation'])
def register_methods(root_module):
register_Ns3Angles_methods(root_module, root_module['ns3::Angles'])
register_Ns3AttributeConstructionList_methods(root_module, root_module['ns3::AttributeConstructionList'])
register_Ns3AttributeConstructionListItem_methods(root_module, root_module['ns3::AttributeConstructionList::Item'])
register_Ns3CallbackBase_methods(root_module, root_module['ns3::CallbackBase'])
register_Ns3Hasher_methods(root_module, root_module['ns3::Hasher'])
register_Ns3ObjectBase_methods(root_module, root_module['ns3::ObjectBase'])
register_Ns3ObjectDeleter_methods(root_module, root_module['ns3::ObjectDeleter'])
register_Ns3SimpleRefCount__Ns3Object_Ns3ObjectBase_Ns3ObjectDeleter_methods(root_module, root_module['ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter >'])
register_Ns3TypeId_methods(root_module, root_module['ns3::TypeId'])
register_Ns3TypeIdAttributeInformation_methods(root_module, root_module['ns3::TypeId::AttributeInformation'])
register_Ns3TypeIdTraceSourceInformation_methods(root_module, root_module['ns3::TypeId::TraceSourceInformation'])
register_Ns3Vector2D_methods(root_module, root_module['ns3::Vector2D'])
register_Ns3Vector3D_methods(root_module, root_module['ns3::Vector3D'])
register_Ns3Empty_methods(root_module, root_module['ns3::empty'])
register_Ns3Object_methods(root_module, root_module['ns3::Object'])
register_Ns3ObjectAggregateIterator_methods(root_module, root_module['ns3::Object::AggregateIterator'])
register_Ns3SimpleRefCount__Ns3AttributeAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeAccessor__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >'])
register_Ns3SimpleRefCount__Ns3AttributeChecker_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeChecker__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >'])
register_Ns3SimpleRefCount__Ns3AttributeValue_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeValue__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >'])
register_Ns3SimpleRefCount__Ns3CallbackImplBase_Ns3Empty_Ns3DefaultDeleter__lt__ns3CallbackImplBase__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >'])
register_Ns3SimpleRefCount__Ns3HashImplementation_Ns3Empty_Ns3DefaultDeleter__lt__ns3HashImplementation__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >'])
register_Ns3SimpleRefCount__Ns3TraceSourceAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3TraceSourceAccessor__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >'])
register_Ns3TraceSourceAccessor_methods(root_module, root_module['ns3::TraceSourceAccessor'])
register_Ns3AntennaModel_methods(root_module, root_module['ns3::AntennaModel'])
register_Ns3AttributeAccessor_methods(root_module, root_module['ns3::AttributeAccessor'])
register_Ns3AttributeChecker_methods(root_module, root_module['ns3::AttributeChecker'])
register_Ns3AttributeValue_methods(root_module, root_module['ns3::AttributeValue'])
register_Ns3CallbackChecker_methods(root_module, root_module['ns3::CallbackChecker'])
register_Ns3CallbackImplBase_methods(root_module, root_module['ns3::CallbackImplBase'])
register_Ns3CallbackValue_methods(root_module, root_module['ns3::CallbackValue'])
register_Ns3CosineAntennaModel_methods(root_module, root_module['ns3::CosineAntennaModel'])
register_Ns3EmptyAttributeValue_methods(root_module, root_module['ns3::EmptyAttributeValue'])
register_Ns3IsotropicAntennaModel_methods(root_module, root_module['ns3::IsotropicAntennaModel'])
register_Ns3ParabolicAntennaModel_methods(root_module, root_module['ns3::ParabolicAntennaModel'])
register_Ns3TypeIdChecker_methods(root_module, root_module['ns3::TypeIdChecker'])
register_Ns3TypeIdValue_methods(root_module, root_module['ns3::TypeIdValue'])
register_Ns3Vector2DChecker_methods(root_module, root_module['ns3::Vector2DChecker'])
register_Ns3Vector2DValue_methods(root_module, root_module['ns3::Vector2DValue'])
register_Ns3Vector3DChecker_methods(root_module, root_module['ns3::Vector3DChecker'])
register_Ns3Vector3DValue_methods(root_module, root_module['ns3::Vector3DValue'])
register_Ns3HashImplementation_methods(root_module, root_module['ns3::Hash::Implementation'])
register_Ns3HashFunctionFnv1a_methods(root_module, root_module['ns3::Hash::Function::Fnv1a'])
register_Ns3HashFunctionHash32_methods(root_module, root_module['ns3::Hash::Function::Hash32'])
register_Ns3HashFunctionHash64_methods(root_module, root_module['ns3::Hash::Function::Hash64'])
register_Ns3HashFunctionMurmur3_methods(root_module, root_module['ns3::Hash::Function::Murmur3'])
return
def register_Ns3Angles_methods(root_module, cls):
cls.add_output_stream_operator()
## angles.h (module 'antenna'): ns3::Angles::Angles(ns3::Angles const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Angles const &', 'arg0')])
## angles.h (module 'antenna'): ns3::Angles::Angles() [constructor]
cls.add_constructor([])
## angles.h (module 'antenna'): ns3::Angles::Angles(double phi, double theta) [constructor]
cls.add_constructor([param('double', 'phi'), param('double', 'theta')])
## angles.h (module 'antenna'): ns3::Angles::Angles(ns3::Vector v) [constructor]
cls.add_constructor([param('ns3::Vector', 'v')])
## angles.h (module 'antenna'): ns3::Angles::Angles(ns3::Vector v, ns3::Vector o) [constructor]
cls.add_constructor([param('ns3::Vector', 'v'), param('ns3::Vector', 'o')])
## angles.h (module 'antenna'): ns3::Angles::phi [variable]
cls.add_instance_attribute('phi', 'double', is_const=False)
## angles.h (module 'antenna'): ns3::Angles::theta [variable]
cls.add_instance_attribute('theta', 'double', is_const=False)
return
def register_Ns3AttributeConstructionList_methods(root_module, cls):
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::AttributeConstructionList(ns3::AttributeConstructionList const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeConstructionList const &', 'arg0')])
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::AttributeConstructionList() [constructor]
cls.add_constructor([])
## attribute-construction-list.h (module 'core'): void ns3::AttributeConstructionList::Add(std::string name, ns3::Ptr<ns3::AttributeChecker const> checker, ns3::Ptr<ns3::AttributeValue> value) [member function]
cls.add_method('Add',
'void',
[param('std::string', 'name'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker'), param('ns3::Ptr< ns3::AttributeValue >', 'value')])
## attribute-construction-list.h (module 'core'): std::_List_const_iterator<ns3::AttributeConstructionList::Item> ns3::AttributeConstructionList::Begin() const [member function]
cls.add_method('Begin',
'std::_List_const_iterator< ns3::AttributeConstructionList::Item >',
[],
is_const=True)
## attribute-construction-list.h (module 'core'): std::_List_const_iterator<ns3::AttributeConstructionList::Item> ns3::AttributeConstructionList::End() const [member function]
cls.add_method('End',
'std::_List_const_iterator< ns3::AttributeConstructionList::Item >',
[],
is_const=True)
## attribute-construction-list.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeConstructionList::Find(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('Find',
'ns3::Ptr< ns3::AttributeValue >',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True)
return
def register_Ns3AttributeConstructionListItem_methods(root_module, cls):
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::Item() [constructor]
cls.add_constructor([])
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::Item(ns3::AttributeConstructionList::Item const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeConstructionList::Item const &', 'arg0')])
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::checker [variable]
cls.add_instance_attribute('checker', 'ns3::Ptr< ns3::AttributeChecker const >', is_const=False)
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::name [variable]
cls.add_instance_attribute('name', 'std::string', is_const=False)
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::value [variable]
cls.add_instance_attribute('value', 'ns3::Ptr< ns3::AttributeValue >', is_const=False)
return
def register_Ns3CallbackBase_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackBase::CallbackBase(ns3::CallbackBase const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CallbackBase const &', 'arg0')])
## callback.h (module 'core'): ns3::CallbackBase::CallbackBase() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::Ptr<ns3::CallbackImplBase> ns3::CallbackBase::GetImpl() const [member function]
cls.add_method('GetImpl',
'ns3::Ptr< ns3::CallbackImplBase >',
[],
is_const=True)
## callback.h (module 'core'): ns3::CallbackBase::CallbackBase(ns3::Ptr<ns3::CallbackImplBase> impl) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::CallbackImplBase >', 'impl')],
visibility='protected')
return
def register_Ns3Hasher_methods(root_module, cls):
## hash.h (module 'core'): ns3::Hasher::Hasher(ns3::Hasher const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Hasher const &', 'arg0')])
## hash.h (module 'core'): ns3::Hasher::Hasher() [constructor]
cls.add_constructor([])
## hash.h (module 'core'): ns3::Hasher::Hasher(ns3::Ptr<ns3::Hash::Implementation> hp) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::Hash::Implementation >', 'hp')])
## hash.h (module 'core'): uint32_t ns3::Hasher::GetHash32(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('size_t const', 'size')])
## hash.h (module 'core'): uint32_t ns3::Hasher::GetHash32(std::string const s) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('std::string const', 's')])
## hash.h (module 'core'): uint64_t ns3::Hasher::GetHash64(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('char const *', 'buffer'), param('size_t const', 'size')])
## hash.h (module 'core'): uint64_t ns3::Hasher::GetHash64(std::string const s) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('std::string const', 's')])
## hash.h (module 'core'): ns3::Hasher & ns3::Hasher::clear() [member function]
cls.add_method('clear',
'ns3::Hasher &',
[])
return
def register_Ns3ObjectBase_methods(root_module, cls):
## object-base.h (module 'core'): ns3::ObjectBase::ObjectBase() [constructor]
cls.add_constructor([])
## object-base.h (module 'core'): ns3::ObjectBase::ObjectBase(ns3::ObjectBase const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ObjectBase const &', 'arg0')])
## object-base.h (module 'core'): void ns3::ObjectBase::GetAttribute(std::string name, ns3::AttributeValue & value) const [member function]
cls.add_method('GetAttribute',
'void',
[param('std::string', 'name'), param('ns3::AttributeValue &', 'value')],
is_const=True)
## object-base.h (module 'core'): bool ns3::ObjectBase::GetAttributeFailSafe(std::string name, ns3::AttributeValue & value) const [member function]
cls.add_method('GetAttributeFailSafe',
'bool',
[param('std::string', 'name'), param('ns3::AttributeValue &', 'value')],
is_const=True)
## object-base.h (module 'core'): ns3::TypeId ns3::ObjectBase::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## object-base.h (module 'core'): static ns3::TypeId ns3::ObjectBase::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## object-base.h (module 'core'): void ns3::ObjectBase::SetAttribute(std::string name, ns3::AttributeValue const & value) [member function]
cls.add_method('SetAttribute',
'void',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
## object-base.h (module 'core'): bool ns3::ObjectBase::SetAttributeFailSafe(std::string name, ns3::AttributeValue const & value) [member function]
cls.add_method('SetAttributeFailSafe',
'bool',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceConnect(std::string name, std::string context, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceConnect',
'bool',
[param('std::string', 'name'), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceConnectWithoutContext(std::string name, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceConnectWithoutContext',
'bool',
[param('std::string', 'name'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceDisconnect(std::string name, std::string context, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceDisconnect',
'bool',
[param('std::string', 'name'), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceDisconnectWithoutContext(std::string name, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceDisconnectWithoutContext',
'bool',
[param('std::string', 'name'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): void ns3::ObjectBase::ConstructSelf(ns3::AttributeConstructionList const & attributes) [member function]
cls.add_method('ConstructSelf',
'void',
[param('ns3::AttributeConstructionList const &', 'attributes')],
visibility='protected')
## object-base.h (module 'core'): void ns3::ObjectBase::NotifyConstructionCompleted() [member function]
cls.add_method('NotifyConstructionCompleted',
'void',
[],
visibility='protected', is_virtual=True)
return
def register_Ns3ObjectDeleter_methods(root_module, cls):
## object.h (module 'core'): ns3::ObjectDeleter::ObjectDeleter() [constructor]
cls.add_constructor([])
## object.h (module 'core'): ns3::ObjectDeleter::ObjectDeleter(ns3::ObjectDeleter const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ObjectDeleter const &', 'arg0')])
## object.h (module 'core'): static void ns3::ObjectDeleter::Delete(ns3::Object * object) [member function]
cls.add_method('Delete',
'void',
[param('ns3::Object *', 'object')],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3Object_Ns3ObjectBase_Ns3ObjectDeleter_methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::SimpleRefCount(ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter> const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3TypeId_methods(root_module, cls):
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## type-id.h (module 'core'): ns3::TypeId::TypeId(char const * name) [constructor]
cls.add_constructor([param('char const *', 'name')])
## type-id.h (module 'core'): ns3::TypeId::TypeId() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeId::TypeId(ns3::TypeId const & o) [copy constructor]
cls.add_constructor([param('ns3::TypeId const &', 'o')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddAttribute(std::string name, std::string help, ns3::AttributeValue const & initialValue, ns3::Ptr<ns3::AttributeAccessor const> accessor, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('AddAttribute',
'ns3::TypeId',
[param('std::string', 'name'), param('std::string', 'help'), param('ns3::AttributeValue const &', 'initialValue'), param('ns3::Ptr< ns3::AttributeAccessor const >', 'accessor'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddAttribute(std::string name, std::string help, uint32_t flags, ns3::AttributeValue const & initialValue, ns3::Ptr<ns3::AttributeAccessor const> accessor, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('AddAttribute',
'ns3::TypeId',
[param('std::string', 'name'), param('std::string', 'help'), param('uint32_t', 'flags'), param('ns3::AttributeValue const &', 'initialValue'), param('ns3::Ptr< ns3::AttributeAccessor const >', 'accessor'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddTraceSource(std::string name, std::string help, ns3::Ptr<ns3::TraceSourceAccessor const> accessor) [member function]
cls.add_method('AddTraceSource',
'ns3::TypeId',
[param('std::string', 'name'), param('std::string', 'help'), param('ns3::Ptr< ns3::TraceSourceAccessor const >', 'accessor')],
deprecated=True)
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddTraceSource(std::string name, std::string help, ns3::Ptr<ns3::TraceSourceAccessor const> accessor, std::string callback) [member function]
cls.add_method('AddTraceSource',
'ns3::TypeId',
[param('std::string', 'name'), param('std::string', 'help'), param('ns3::Ptr< ns3::TraceSourceAccessor const >', 'accessor'), param('std::string', 'callback')])
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation ns3::TypeId::GetAttribute(uint32_t i) const [member function]
cls.add_method('GetAttribute',
'ns3::TypeId::AttributeInformation',
[param('uint32_t', 'i')],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeId::GetAttributeFullName(uint32_t i) const [member function]
cls.add_method('GetAttributeFullName',
'std::string',
[param('uint32_t', 'i')],
is_const=True)
## type-id.h (module 'core'): uint32_t ns3::TypeId::GetAttributeN() const [member function]
cls.add_method('GetAttributeN',
'uint32_t',
[],
is_const=True)
## type-id.h (module 'core'): ns3::Callback<ns3::ObjectBase*,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> ns3::TypeId::GetConstructor() const [member function]
cls.add_method('GetConstructor',
'ns3::Callback< ns3::ObjectBase *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >',
[],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeId::GetGroupName() const [member function]
cls.add_method('GetGroupName',
'std::string',
[],
is_const=True)
## type-id.h (module 'core'): uint32_t ns3::TypeId::GetHash() const [member function]
cls.add_method('GetHash',
'uint32_t',
[],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeId::GetName() const [member function]
cls.add_method('GetName',
'std::string',
[],
is_const=True)
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::GetParent() const [member function]
cls.add_method('GetParent',
'ns3::TypeId',
[],
is_const=True)
## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::GetRegistered(uint32_t i) [member function]
cls.add_method('GetRegistered',
'ns3::TypeId',
[param('uint32_t', 'i')],
is_static=True)
## type-id.h (module 'core'): static uint32_t ns3::TypeId::GetRegisteredN() [member function]
cls.add_method('GetRegisteredN',
'uint32_t',
[],
is_static=True)
## type-id.h (module 'core'): std::size_t ns3::TypeId::GetSize() const [member function]
cls.add_method('GetSize',
'std::size_t',
[],
is_const=True)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation ns3::TypeId::GetTraceSource(uint32_t i) const [member function]
cls.add_method('GetTraceSource',
'ns3::TypeId::TraceSourceInformation',
[param('uint32_t', 'i')],
is_const=True)
## type-id.h (module 'core'): uint32_t ns3::TypeId::GetTraceSourceN() const [member function]
cls.add_method('GetTraceSourceN',
'uint32_t',
[],
is_const=True)
## type-id.h (module 'core'): uint16_t ns3::TypeId::GetUid() const [member function]
cls.add_method('GetUid',
'uint16_t',
[],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::HasConstructor() const [member function]
cls.add_method('HasConstructor',
'bool',
[],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::HasParent() const [member function]
cls.add_method('HasParent',
'bool',
[],
is_const=True)
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::HideFromDocumentation() [member function]
cls.add_method('HideFromDocumentation',
'ns3::TypeId',
[])
## type-id.h (module 'core'): bool ns3::TypeId::IsChildOf(ns3::TypeId other) const [member function]
cls.add_method('IsChildOf',
'bool',
[param('ns3::TypeId', 'other')],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::LookupAttributeByName(std::string name, ns3::TypeId::AttributeInformation * info) const [member function]
cls.add_method('LookupAttributeByName',
'bool',
[param('std::string', 'name'), param('ns3::TypeId::AttributeInformation *', 'info', transfer_ownership=False)],
is_const=True)
## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::LookupByHash(uint32_t hash) [member function]
cls.add_method('LookupByHash',
'ns3::TypeId',
[param('uint32_t', 'hash')],
is_static=True)
## type-id.h (module 'core'): static bool ns3::TypeId::LookupByHashFailSafe(uint32_t hash, ns3::TypeId * tid) [member function]
cls.add_method('LookupByHashFailSafe',
'bool',
[param('uint32_t', 'hash'), param('ns3::TypeId *', 'tid')],
is_static=True)
## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::LookupByName(std::string name) [member function]
cls.add_method('LookupByName',
'ns3::TypeId',
[param('std::string', 'name')],
is_static=True)
## type-id.h (module 'core'): ns3::Ptr<ns3::TraceSourceAccessor const> ns3::TypeId::LookupTraceSourceByName(std::string name) const [member function]
cls.add_method('LookupTraceSourceByName',
'ns3::Ptr< ns3::TraceSourceAccessor const >',
[param('std::string', 'name')],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::MustHideFromDocumentation() const [member function]
cls.add_method('MustHideFromDocumentation',
'bool',
[],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::SetAttributeInitialValue(uint32_t i, ns3::Ptr<ns3::AttributeValue const> initialValue) [member function]
cls.add_method('SetAttributeInitialValue',
'bool',
[param('uint32_t', 'i'), param('ns3::Ptr< ns3::AttributeValue const >', 'initialValue')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetGroupName(std::string groupName) [member function]
cls.add_method('SetGroupName',
'ns3::TypeId',
[param('std::string', 'groupName')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetParent(ns3::TypeId tid) [member function]
cls.add_method('SetParent',
'ns3::TypeId',
[param('ns3::TypeId', 'tid')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetSize(std::size_t size) [member function]
cls.add_method('SetSize',
'ns3::TypeId',
[param('std::size_t', 'size')])
## type-id.h (module 'core'): void ns3::TypeId::SetUid(uint16_t tid) [member function]
cls.add_method('SetUid',
'void',
[param('uint16_t', 'tid')])
return
def register_Ns3TypeIdAttributeInformation_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::AttributeInformation() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::AttributeInformation(ns3::TypeId::AttributeInformation const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TypeId::AttributeInformation const &', 'arg0')])
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::accessor [variable]
cls.add_instance_attribute('accessor', 'ns3::Ptr< ns3::AttributeAccessor const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::checker [variable]
cls.add_instance_attribute('checker', 'ns3::Ptr< ns3::AttributeChecker const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::flags [variable]
cls.add_instance_attribute('flags', 'uint32_t', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::help [variable]
cls.add_instance_attribute('help', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::initialValue [variable]
cls.add_instance_attribute('initialValue', 'ns3::Ptr< ns3::AttributeValue const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::name [variable]
cls.add_instance_attribute('name', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::originalInitialValue [variable]
cls.add_instance_attribute('originalInitialValue', 'ns3::Ptr< ns3::AttributeValue const >', is_const=False)
return
def register_Ns3TypeIdTraceSourceInformation_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::TraceSourceInformation() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::TraceSourceInformation(ns3::TypeId::TraceSourceInformation const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TypeId::TraceSourceInformation const &', 'arg0')])
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::accessor [variable]
cls.add_instance_attribute('accessor', 'ns3::Ptr< ns3::TraceSourceAccessor const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::callback [variable]
cls.add_instance_attribute('callback', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::help [variable]
cls.add_instance_attribute('help', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::name [variable]
cls.add_instance_attribute('name', 'std::string', is_const=False)
return
def register_Ns3Vector2D_methods(root_module, cls):
cls.add_output_stream_operator()
## vector.h (module 'core'): ns3::Vector2D::Vector2D(ns3::Vector2D const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Vector2D const &', 'arg0')])
## vector.h (module 'core'): ns3::Vector2D::Vector2D(double _x, double _y) [constructor]
cls.add_constructor([param('double', '_x'), param('double', '_y')])
## vector.h (module 'core'): ns3::Vector2D::Vector2D() [constructor]
cls.add_constructor([])
## vector.h (module 'core'): ns3::Vector2D::x [variable]
cls.add_instance_attribute('x', 'double', is_const=False)
## vector.h (module 'core'): ns3::Vector2D::y [variable]
cls.add_instance_attribute('y', 'double', is_const=False)
return
def register_Ns3Vector3D_methods(root_module, cls):
cls.add_output_stream_operator()
## vector.h (module 'core'): ns3::Vector3D::Vector3D(ns3::Vector3D const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Vector3D const &', 'arg0')])
## vector.h (module 'core'): ns3::Vector3D::Vector3D(double _x, double _y, double _z) [constructor]
cls.add_constructor([param('double', '_x'), param('double', '_y'), param('double', '_z')])
## vector.h (module 'core'): ns3::Vector3D::Vector3D() [constructor]
cls.add_constructor([])
## vector.h (module 'core'): ns3::Vector3D::x [variable]
cls.add_instance_attribute('x', 'double', is_const=False)
## vector.h (module 'core'): ns3::Vector3D::y [variable]
cls.add_instance_attribute('y', 'double', is_const=False)
## vector.h (module 'core'): ns3::Vector3D::z [variable]
cls.add_instance_attribute('z', 'double', is_const=False)
return
def register_Ns3Empty_methods(root_module, cls):
## empty.h (module 'core'): ns3::empty::empty() [constructor]
cls.add_constructor([])
## empty.h (module 'core'): ns3::empty::empty(ns3::empty const & arg0) [copy constructor]
cls.add_constructor([param('ns3::empty const &', 'arg0')])
return
def register_Ns3Object_methods(root_module, cls):
## object.h (module 'core'): ns3::Object::Object() [constructor]
cls.add_constructor([])
## object.h (module 'core'): void ns3::Object::AggregateObject(ns3::Ptr<ns3::Object> other) [member function]
cls.add_method('AggregateObject',
'void',
[param('ns3::Ptr< ns3::Object >', 'other')])
## object.h (module 'core'): void ns3::Object::Dispose() [member function]
cls.add_method('Dispose',
'void',
[])
## object.h (module 'core'): ns3::Object::AggregateIterator ns3::Object::GetAggregateIterator() const [member function]
cls.add_method('GetAggregateIterator',
'ns3::Object::AggregateIterator',
[],
is_const=True)
## object.h (module 'core'): ns3::TypeId ns3::Object::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_const=True, is_virtual=True)
## object.h (module 'core'): static ns3::TypeId ns3::Object::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## object.h (module 'core'): void ns3::Object::Initialize() [member function]
cls.add_method('Initialize',
'void',
[])
## object.h (module 'core'): ns3::Object::Object(ns3::Object const & o) [copy constructor]
cls.add_constructor([param('ns3::Object const &', 'o')],
visibility='protected')
## object.h (module 'core'): void ns3::Object::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
visibility='protected', is_virtual=True)
## object.h (module 'core'): void ns3::Object::DoInitialize() [member function]
cls.add_method('DoInitialize',
'void',
[],
visibility='protected', is_virtual=True)
## object.h (module 'core'): void ns3::Object::NotifyNewAggregate() [member function]
cls.add_method('NotifyNewAggregate',
'void',
[],
visibility='protected', is_virtual=True)
return
def register_Ns3ObjectAggregateIterator_methods(root_module, cls):
## object.h (module 'core'): ns3::Object::AggregateIterator::AggregateIterator(ns3::Object::AggregateIterator const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Object::AggregateIterator const &', 'arg0')])
## object.h (module 'core'): ns3::Object::AggregateIterator::AggregateIterator() [constructor]
cls.add_constructor([])
## object.h (module 'core'): bool ns3::Object::AggregateIterator::HasNext() const [member function]
cls.add_method('HasNext',
'bool',
[],
is_const=True)
## object.h (module 'core'): ns3::Ptr<ns3::Object const> ns3::Object::AggregateIterator::Next() [member function]
cls.add_method('Next',
'ns3::Ptr< ns3::Object const >',
[])
return
def register_Ns3SimpleRefCount__Ns3AttributeAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeAccessor__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter< ns3::AttributeAccessor > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3AttributeChecker_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeChecker__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter< ns3::AttributeChecker > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3AttributeValue_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeValue__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter< ns3::AttributeValue > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3CallbackImplBase_Ns3Empty_Ns3DefaultDeleter__lt__ns3CallbackImplBase__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::SimpleRefCount(ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter< ns3::CallbackImplBase > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3HashImplementation_Ns3Empty_Ns3DefaultDeleter__lt__ns3HashImplementation__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >::SimpleRefCount(ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter< ns3::Hash::Implementation > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3TraceSourceAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3TraceSourceAccessor__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::SimpleRefCount(ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter< ns3::TraceSourceAccessor > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3TraceSourceAccessor_methods(root_module, cls):
## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor::TraceSourceAccessor(ns3::TraceSourceAccessor const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TraceSourceAccessor const &', 'arg0')])
## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor::TraceSourceAccessor() [constructor]
cls.add_constructor([])
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::Connect(ns3::ObjectBase * obj, std::string context, ns3::CallbackBase const & cb) const [member function]
cls.add_method('Connect',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::ConnectWithoutContext(ns3::ObjectBase * obj, ns3::CallbackBase const & cb) const [member function]
cls.add_method('ConnectWithoutContext',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('ns3::CallbackBase const &', 'cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::Disconnect(ns3::ObjectBase * obj, std::string context, ns3::CallbackBase const & cb) const [member function]
cls.add_method('Disconnect',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::DisconnectWithoutContext(ns3::ObjectBase * obj, ns3::CallbackBase const & cb) const [member function]
cls.add_method('DisconnectWithoutContext',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('ns3::CallbackBase const &', 'cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3AntennaModel_methods(root_module, cls):
## antenna-model.h (module 'antenna'): ns3::AntennaModel::AntennaModel(ns3::AntennaModel const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AntennaModel const &', 'arg0')])
## antenna-model.h (module 'antenna'): ns3::AntennaModel::AntennaModel() [constructor]
cls.add_constructor([])
## antenna-model.h (module 'antenna'): double ns3::AntennaModel::GetGainDb(ns3::Angles a) [member function]
cls.add_method('GetGainDb',
'double',
[param('ns3::Angles', 'a')],
is_pure_virtual=True, is_virtual=True)
## antenna-model.h (module 'antenna'): static ns3::TypeId ns3::AntennaModel::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
return
def register_Ns3AttributeAccessor_methods(root_module, cls):
## attribute.h (module 'core'): ns3::AttributeAccessor::AttributeAccessor(ns3::AttributeAccessor const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeAccessor const &', 'arg0')])
## attribute.h (module 'core'): ns3::AttributeAccessor::AttributeAccessor() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): bool ns3::AttributeAccessor::Get(ns3::ObjectBase const * object, ns3::AttributeValue & attribute) const [member function]
cls.add_method('Get',
'bool',
[param('ns3::ObjectBase const *', 'object'), param('ns3::AttributeValue &', 'attribute')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeAccessor::HasGetter() const [member function]
cls.add_method('HasGetter',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeAccessor::HasSetter() const [member function]
cls.add_method('HasSetter',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeAccessor::Set(ns3::ObjectBase * object, ns3::AttributeValue const & value) const [member function]
cls.add_method('Set',
'bool',
[param('ns3::ObjectBase *', 'object', transfer_ownership=False), param('ns3::AttributeValue const &', 'value')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3AttributeChecker_methods(root_module, cls):
## attribute.h (module 'core'): ns3::AttributeChecker::AttributeChecker(ns3::AttributeChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeChecker const &', 'arg0')])
## attribute.h (module 'core'): ns3::AttributeChecker::AttributeChecker() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): bool ns3::AttributeChecker::Check(ns3::AttributeValue const & value) const [member function]
cls.add_method('Check',
'bool',
[param('ns3::AttributeValue const &', 'value')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeChecker::Copy(ns3::AttributeValue const & source, ns3::AttributeValue & destination) const [member function]
cls.add_method('Copy',
'bool',
[param('ns3::AttributeValue const &', 'source'), param('ns3::AttributeValue &', 'destination')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeChecker::Create() const [member function]
cls.add_method('Create',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeChecker::CreateValidValue(ns3::AttributeValue const & value) const [member function]
cls.add_method('CreateValidValue',
'ns3::Ptr< ns3::AttributeValue >',
[param('ns3::AttributeValue const &', 'value')],
is_const=True)
## attribute.h (module 'core'): std::string ns3::AttributeChecker::GetUnderlyingTypeInformation() const [member function]
cls.add_method('GetUnderlyingTypeInformation',
'std::string',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): std::string ns3::AttributeChecker::GetValueTypeName() const [member function]
cls.add_method('GetValueTypeName',
'std::string',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeChecker::HasUnderlyingTypeInformation() const [member function]
cls.add_method('HasUnderlyingTypeInformation',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3AttributeValue_methods(root_module, cls):
## attribute.h (module 'core'): ns3::AttributeValue::AttributeValue(ns3::AttributeValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeValue const &', 'arg0')])
## attribute.h (module 'core'): ns3::AttributeValue::AttributeValue() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_pure_virtual=True, is_virtual=True)
## attribute.h (module 'core'): std::string ns3::AttributeValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3CallbackChecker_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackChecker::CallbackChecker() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackChecker::CallbackChecker(ns3::CallbackChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CallbackChecker const &', 'arg0')])
return
def register_Ns3CallbackImplBase_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackImplBase::CallbackImplBase() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackImplBase::CallbackImplBase(ns3::CallbackImplBase const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CallbackImplBase const &', 'arg0')])
## callback.h (module 'core'): std::string ns3::CallbackImplBase::GetTypeid() const [member function]
cls.add_method('GetTypeid',
'std::string',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## callback.h (module 'core'): bool ns3::CallbackImplBase::IsEqual(ns3::Ptr<ns3::CallbackImplBase const> other) const [member function]
cls.add_method('IsEqual',
'bool',
[param('ns3::Ptr< ns3::CallbackImplBase const >', 'other')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## callback.h (module 'core'): static std::string ns3::CallbackImplBase::Demangle(std::string const & mangled) [member function]
cls.add_method('Demangle',
'std::string',
[param('std::string const &', 'mangled')],
is_static=True, visibility='protected')
return
def register_Ns3CallbackValue_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackValue::CallbackValue(ns3::CallbackValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CallbackValue const &', 'arg0')])
## callback.h (module 'core'): ns3::CallbackValue::CallbackValue() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackValue::CallbackValue(ns3::CallbackBase const & base) [constructor]
cls.add_constructor([param('ns3::CallbackBase const &', 'base')])
## callback.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::CallbackValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## callback.h (module 'core'): bool ns3::CallbackValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## callback.h (module 'core'): std::string ns3::CallbackValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## callback.h (module 'core'): void ns3::CallbackValue::Set(ns3::CallbackBase base) [member function]
cls.add_method('Set',
'void',
[param('ns3::CallbackBase', 'base')])
return
def register_Ns3CosineAntennaModel_methods(root_module, cls):
## cosine-antenna-model.h (module 'antenna'): ns3::CosineAntennaModel::CosineAntennaModel() [constructor]
cls.add_constructor([])
## cosine-antenna-model.h (module 'antenna'): ns3::CosineAntennaModel::CosineAntennaModel(ns3::CosineAntennaModel const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CosineAntennaModel const &', 'arg0')])
## cosine-antenna-model.h (module 'antenna'): double ns3::CosineAntennaModel::GetBeamwidth() const [member function]
cls.add_method('GetBeamwidth',
'double',
[],
is_const=True)
## cosine-antenna-model.h (module 'antenna'): double ns3::CosineAntennaModel::GetGainDb(ns3::Angles a) [member function]
cls.add_method('GetGainDb',
'double',
[param('ns3::Angles', 'a')],
is_virtual=True)
## cosine-antenna-model.h (module 'antenna'): double ns3::CosineAntennaModel::GetOrientation() const [member function]
cls.add_method('GetOrientation',
'double',
[],
is_const=True)
## cosine-antenna-model.h (module 'antenna'): static ns3::TypeId ns3::CosineAntennaModel::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## cosine-antenna-model.h (module 'antenna'): void ns3::CosineAntennaModel::SetBeamwidth(double beamwidthDegrees) [member function]
cls.add_method('SetBeamwidth',
'void',
[param('double', 'beamwidthDegrees')])
## cosine-antenna-model.h (module 'antenna'): void ns3::CosineAntennaModel::SetOrientation(double orientationDegrees) [member function]
cls.add_method('SetOrientation',
'void',
[param('double', 'orientationDegrees')])
return
def register_Ns3EmptyAttributeValue_methods(root_module, cls):
## attribute.h (module 'core'): ns3::EmptyAttributeValue::EmptyAttributeValue(ns3::EmptyAttributeValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::EmptyAttributeValue const &', 'arg0')])
## attribute.h (module 'core'): ns3::EmptyAttributeValue::EmptyAttributeValue() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::EmptyAttributeValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, visibility='private', is_virtual=True)
## attribute.h (module 'core'): bool ns3::EmptyAttributeValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
visibility='private', is_virtual=True)
## attribute.h (module 'core'): std::string ns3::EmptyAttributeValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, visibility='private', is_virtual=True)
return
def register_Ns3IsotropicAntennaModel_methods(root_module, cls):
## isotropic-antenna-model.h (module 'antenna'): ns3::IsotropicAntennaModel::IsotropicAntennaModel(ns3::IsotropicAntennaModel const & arg0) [copy constructor]
cls.add_constructor([param('ns3::IsotropicAntennaModel const &', 'arg0')])
## isotropic-antenna-model.h (module 'antenna'): ns3::IsotropicAntennaModel::IsotropicAntennaModel() [constructor]
cls.add_constructor([])
## isotropic-antenna-model.h (module 'antenna'): double ns3::IsotropicAntennaModel::GetGainDb(ns3::Angles a) [member function]
cls.add_method('GetGainDb',
'double',
[param('ns3::Angles', 'a')],
is_virtual=True)
## isotropic-antenna-model.h (module 'antenna'): static ns3::TypeId ns3::IsotropicAntennaModel::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
return
def register_Ns3ParabolicAntennaModel_methods(root_module, cls):
## parabolic-antenna-model.h (module 'antenna'): ns3::ParabolicAntennaModel::ParabolicAntennaModel() [constructor]
cls.add_constructor([])
## parabolic-antenna-model.h (module 'antenna'): ns3::ParabolicAntennaModel::ParabolicAntennaModel(ns3::ParabolicAntennaModel const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ParabolicAntennaModel const &', 'arg0')])
## parabolic-antenna-model.h (module 'antenna'): double ns3::ParabolicAntennaModel::GetBeamwidth() const [member function]
cls.add_method('GetBeamwidth',
'double',
[],
is_const=True)
## parabolic-antenna-model.h (module 'antenna'): double ns3::ParabolicAntennaModel::GetGainDb(ns3::Angles a) [member function]
cls.add_method('GetGainDb',
'double',
[param('ns3::Angles', 'a')],
is_virtual=True)
## parabolic-antenna-model.h (module 'antenna'): double ns3::ParabolicAntennaModel::GetOrientation() const [member function]
cls.add_method('GetOrientation',
'double',
[],
is_const=True)
## parabolic-antenna-model.h (module 'antenna'): static ns3::TypeId ns3::ParabolicAntennaModel::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## parabolic-antenna-model.h (module 'antenna'): void ns3::ParabolicAntennaModel::SetBeamwidth(double beamwidthDegrees) [member function]
cls.add_method('SetBeamwidth',
'void',
[param('double', 'beamwidthDegrees')])
## parabolic-antenna-model.h (module 'antenna'): void ns3::ParabolicAntennaModel::SetOrientation(double orientationDegrees) [member function]
cls.add_method('SetOrientation',
'void',
[param('double', 'orientationDegrees')])
return
def register_Ns3TypeIdChecker_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeIdChecker::TypeIdChecker() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeIdChecker::TypeIdChecker(ns3::TypeIdChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TypeIdChecker const &', 'arg0')])
return
def register_Ns3TypeIdValue_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue(ns3::TypeIdValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TypeIdValue const &', 'arg0')])
## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue(ns3::TypeId const & value) [constructor]
cls.add_constructor([param('ns3::TypeId const &', 'value')])
## type-id.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::TypeIdValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## type-id.h (module 'core'): bool ns3::TypeIdValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## type-id.h (module 'core'): ns3::TypeId ns3::TypeIdValue::Get() const [member function]
cls.add_method('Get',
'ns3::TypeId',
[],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeIdValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## type-id.h (module 'core'): void ns3::TypeIdValue::Set(ns3::TypeId const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::TypeId const &', 'value')])
return
def register_Ns3Vector2DChecker_methods(root_module, cls):
## vector.h (module 'core'): ns3::Vector2DChecker::Vector2DChecker() [constructor]
cls.add_constructor([])
## vector.h (module 'core'): ns3::Vector2DChecker::Vector2DChecker(ns3::Vector2DChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Vector2DChecker const &', 'arg0')])
return
def register_Ns3Vector2DValue_methods(root_module, cls):
## vector.h (module 'core'): ns3::Vector2DValue::Vector2DValue() [constructor]
cls.add_constructor([])
## vector.h (module 'core'): ns3::Vector2DValue::Vector2DValue(ns3::Vector2DValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Vector2DValue const &', 'arg0')])
## vector.h (module 'core'): ns3::Vector2DValue::Vector2DValue(ns3::Vector2D const & value) [constructor]
cls.add_constructor([param('ns3::Vector2D const &', 'value')])
## vector.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::Vector2DValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## vector.h (module 'core'): bool ns3::Vector2DValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## vector.h (module 'core'): ns3::Vector2D ns3::Vector2DValue::Get() const [member function]
cls.add_method('Get',
'ns3::Vector2D',
[],
is_const=True)
## vector.h (module 'core'): std::string ns3::Vector2DValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## vector.h (module 'core'): void ns3::Vector2DValue::Set(ns3::Vector2D const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Vector2D const &', 'value')])
return
def register_Ns3Vector3DChecker_methods(root_module, cls):
## vector.h (module 'core'): ns3::Vector3DChecker::Vector3DChecker() [constructor]
cls.add_constructor([])
## vector.h (module 'core'): ns3::Vector3DChecker::Vector3DChecker(ns3::Vector3DChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Vector3DChecker const &', 'arg0')])
return
def register_Ns3Vector3DValue_methods(root_module, cls):
## vector.h (module 'core'): ns3::Vector3DValue::Vector3DValue() [constructor]
cls.add_constructor([])
## vector.h (module 'core'): ns3::Vector3DValue::Vector3DValue(ns3::Vector3DValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Vector3DValue const &', 'arg0')])
## vector.h (module 'core'): ns3::Vector3DValue::Vector3DValue(ns3::Vector3D const & value) [constructor]
cls.add_constructor([param('ns3::Vector3D const &', 'value')])
## vector.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::Vector3DValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## vector.h (module 'core'): bool ns3::Vector3DValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## vector.h (module 'core'): ns3::Vector3D ns3::Vector3DValue::Get() const [member function]
cls.add_method('Get',
'ns3::Vector3D',
[],
is_const=True)
## vector.h (module 'core'): std::string ns3::Vector3DValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## vector.h (module 'core'): void ns3::Vector3DValue::Set(ns3::Vector3D const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Vector3D const &', 'value')])
return
def register_Ns3HashImplementation_methods(root_module, cls):
## hash-function.h (module 'core'): ns3::Hash::Implementation::Implementation(ns3::Hash::Implementation const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Hash::Implementation const &', 'arg0')])
## hash-function.h (module 'core'): ns3::Hash::Implementation::Implementation() [constructor]
cls.add_constructor([])
## hash-function.h (module 'core'): uint32_t ns3::Hash::Implementation::GetHash32(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_pure_virtual=True, is_virtual=True)
## hash-function.h (module 'core'): uint64_t ns3::Hash::Implementation::GetHash64(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-function.h (module 'core'): void ns3::Hash::Implementation::clear() [member function]
cls.add_method('clear',
'void',
[],
is_pure_virtual=True, is_virtual=True)
return
def register_Ns3HashFunctionFnv1a_methods(root_module, cls):
## hash-fnv.h (module 'core'): ns3::Hash::Function::Fnv1a::Fnv1a(ns3::Hash::Function::Fnv1a const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Hash::Function::Fnv1a const &', 'arg0')])
## hash-fnv.h (module 'core'): ns3::Hash::Function::Fnv1a::Fnv1a() [constructor]
cls.add_constructor([])
## hash-fnv.h (module 'core'): uint32_t ns3::Hash::Function::Fnv1a::GetHash32(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-fnv.h (module 'core'): uint64_t ns3::Hash::Function::Fnv1a::GetHash64(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-fnv.h (module 'core'): void ns3::Hash::Function::Fnv1a::clear() [member function]
cls.add_method('clear',
'void',
[],
is_virtual=True)
return
def register_Ns3HashFunctionHash32_methods(root_module, cls):
## hash-function.h (module 'core'): ns3::Hash::Function::Hash32::Hash32(ns3::Hash::Function::Hash32 const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Hash::Function::Hash32 const &', 'arg0')])
## hash-function.h (module 'core'): ns3::Hash::Function::Hash32::Hash32(ns3::Hash::Hash32Function_ptr hp) [constructor]
cls.add_constructor([param('ns3::Hash::Hash32Function_ptr', 'hp')])
## hash-function.h (module 'core'): uint32_t ns3::Hash::Function::Hash32::GetHash32(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-function.h (module 'core'): void ns3::Hash::Function::Hash32::clear() [member function]
cls.add_method('clear',
'void',
[],
is_virtual=True)
return
def register_Ns3HashFunctionHash64_methods(root_module, cls):
## hash-function.h (module 'core'): ns3::Hash::Function::Hash64::Hash64(ns3::Hash::Function::Hash64 const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Hash::Function::Hash64 const &', 'arg0')])
## hash-function.h (module 'core'): ns3::Hash::Function::Hash64::Hash64(ns3::Hash::Hash64Function_ptr hp) [constructor]
cls.add_constructor([param('ns3::Hash::Hash64Function_ptr', 'hp')])
## hash-function.h (module 'core'): uint32_t ns3::Hash::Function::Hash64::GetHash32(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-function.h (module 'core'): uint64_t ns3::Hash::Function::Hash64::GetHash64(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-function.h (module 'core'): void ns3::Hash::Function::Hash64::clear() [member function]
cls.add_method('clear',
'void',
[],
is_virtual=True)
return
def register_Ns3HashFunctionMurmur3_methods(root_module, cls):
## hash-murmur3.h (module 'core'): ns3::Hash::Function::Murmur3::Murmur3(ns3::Hash::Function::Murmur3 const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Hash::Function::Murmur3 const &', 'arg0')])
## hash-murmur3.h (module 'core'): ns3::Hash::Function::Murmur3::Murmur3() [constructor]
cls.add_constructor([])
## hash-murmur3.h (module 'core'): uint32_t ns3::Hash::Function::Murmur3::GetHash32(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-murmur3.h (module 'core'): uint64_t ns3::Hash::Function::Murmur3::GetHash64(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-murmur3.h (module 'core'): void ns3::Hash::Function::Murmur3::clear() [member function]
cls.add_method('clear',
'void',
[],
is_virtual=True)
return
def register_functions(root_module):
module = root_module
## angles.h (module 'antenna'): extern double ns3::DegreesToRadians(double degrees) [free function]
module.add_function('DegreesToRadians',
'double',
[param('double', 'degrees')])
## angles.h (module 'antenna'): extern double ns3::RadiansToDegrees(double radians) [free function]
module.add_function('RadiansToDegrees',
'double',
[param('double', 'radians')])
register_functions_ns3_FatalImpl(module.get_submodule('FatalImpl'), root_module)
register_functions_ns3_Hash(module.get_submodule('Hash'), root_module)
return
def register_functions_ns3_FatalImpl(module, root_module):
return
def register_functions_ns3_Hash(module, root_module):
register_functions_ns3_Hash_Function(module.get_submodule('Function'), root_module)
return
def register_functions_ns3_Hash_Function(module, root_module):
return
def main():
out = FileCodeSink(sys.stdout)
root_module = module_init()
register_types(root_module)
register_methods(root_module)
register_functions(root_module)
root_module.generate(out)
if __name__ == '__main__':
main()
| gpl-2.0 | 5,153,383,364,505,956,000 | -6,545,621,648,853,416,000 | 66.606876 | 383 | 0.643989 | false |
molotof/infernal-twin | build/reportlab/src/reportlab/platypus/__init__.py | 29 | 1203 | #Copyright ReportLab Europe Ltd. 2000-2012
#see license.txt for license details
#history http://www.reportlab.co.uk/cgi-bin/viewcvs.cgi/public/reportlab/trunk/reportlab/platypus/__init__.py
__version__=''' $Id$ '''
__doc__='''Page Layout and Typography Using Scripts" - higher-level framework for flowing documents'''
from reportlab.platypus.flowables import Flowable, Image, Macro, PageBreak, Preformatted, Spacer, XBox, \
CondPageBreak, KeepTogether, TraceInfo, FailOnWrap, FailOnDraw, PTOContainer, \
KeepInFrame, ParagraphAndImage, ImageAndFlowables, ListFlowable, ListItem, FrameBG, \
PageBreakIfNotEmpty
from reportlab.platypus.paragraph import Paragraph, cleanBlockQuotedText, ParaLines
from reportlab.platypus.paraparser import ParaFrag
from reportlab.platypus.tables import Table, TableStyle, CellStyle, LongTable
from reportlab.platypus.frames import Frame
from reportlab.platypus.doctemplate import BaseDocTemplate, NextPageTemplate, PageTemplate, ActionFlowable, \
SimpleDocTemplate, FrameBreak, PageBegin, Indenter, NotAtTopPageBreak
from reportlab.platypus.xpreformatted import XPreformatted
| gpl-3.0 | 9,025,484,507,914,436,000 | 5,615,596,751,038,547,000 | 69.764706 | 109 | 0.757273 | false |
jorsea/odoo-addons | product_force_create_variants/__openerp__.py | 8 | 1477 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2015 ADHOC SA (http://www.adhoc.com.ar)
# All Rights Reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'author': 'ADHOC SA.',
'category': 'base.module_category_knowledge_management',
'demo_xml': [],
'depends': ['product'],
'description': """
Product Force Create Variants
=============================
""",
'installable': True,
'license': 'AGPL-3',
'name': u'Product Force Create Variants',
'test': [],
'data': [
'wizard/product_force_create_variant_view.xml',
],
'website': 'www.adhoc.com.ar'}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 3,299,713,120,136,771,600 | 3,770,673,059,241,588,700 | 36.871795 | 78 | 0.584292 | false |
ikargis/horizon_fod | openstack_dashboard/dashboards/project/routers/ports/forms.py | 12 | 7736 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012, Nachi Ueno, NTT MCL, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from django.core.urlresolvers import reverse # noqa
from django.utils.translation import ugettext_lazy as _ # noqa
from horizon import exceptions
from horizon import forms
from horizon import messages
from horizon.utils import fields
from openstack_dashboard import api
LOG = logging.getLogger(__name__)
class AddInterface(forms.SelfHandlingForm):
subnet_id = forms.ChoiceField(label=_("Subnet"))
ip_address = fields.IPField(
label=_("IP Address (optional)"), required=False, initial="",
help_text=_("You can specify an IP address of the interface "
"created if you want (e.g. 192.168.0.254)."),
version=fields.IPv4 | fields.IPv6, mask=False)
router_name = forms.CharField(label=_("Router Name"),
widget=forms.TextInput(
attrs={'readonly': 'readonly'}))
router_id = forms.CharField(label=_("Router ID"),
widget=forms.TextInput(
attrs={'readonly': 'readonly'}))
failure_url = 'horizon:project:routers:detail'
def __init__(self, request, *args, **kwargs):
super(AddInterface, self).__init__(request, *args, **kwargs)
c = self.populate_subnet_id_choices(request)
self.fields['subnet_id'].choices = c
def populate_subnet_id_choices(self, request):
tenant_id = self.request.user.tenant_id
networks = []
try:
networks = api.neutron.network_list_for_tenant(request, tenant_id)
except Exception as e:
msg = _('Failed to get network list %s') % e.message
LOG.info(msg)
messages.error(request, msg)
redirect = reverse(self.failure_url,
args=[request.REQUEST['router_id']])
exceptions.handle(request, msg, redirect=redirect)
return
choices = []
for n in networks:
net_name = n.name + ': ' if n.name else ''
choices += [(subnet.id,
'%s%s (%s)' % (net_name, subnet.cidr,
subnet.name or subnet.id))
for subnet in n['subnets']]
if choices:
choices.insert(0, ("", _("Select Subnet")))
else:
choices.insert(0, ("", _("No subnets available.")))
return choices
def handle(self, request, data):
if data['ip_address']:
port = self._add_interface_by_port(request, data)
else:
port = self._add_interface_by_subnet(request, data)
msg = _('Interface added')
if port:
msg += ' ' + port.fixed_ips[0]['ip_address']
LOG.debug(msg)
messages.success(request, msg)
return True
def _add_interface_by_subnet(self, request, data):
router_id = data['router_id']
try:
router_inf = api.neutron.router_add_interface(
request, router_id, subnet_id=data['subnet_id'])
except Exception as e:
self._handle_error(request, router_id, e)
try:
port = api.neutron.port_get(request, router_inf['port_id'])
except Exception:
# Ignore an error when port_get() since it is just
# to get an IP address for the interface.
port = None
return port
def _add_interface_by_port(self, request, data):
router_id = data['router_id']
subnet_id = data['subnet_id']
try:
subnet = api.neutron.subnet_get(request, subnet_id)
except Exception:
msg = _('Unable to get subnet "%s"') % subnet_id
self._handle_error(request, router_id, msg)
try:
ip_address = data['ip_address']
body = {'network_id': subnet.network_id,
'fixed_ips': [{'subnet_id': subnet.id,
'ip_address': ip_address}]}
port = api.neutron.port_create(request, **body)
except Exception as e:
self._handle_error(request, router_id, e)
try:
api.neutron.router_add_interface(request, router_id,
port_id=port.id)
except Exception as e:
self._delete_port(request, port)
self._handle_error(request, router_id, e)
return port
def _handle_error(self, request, router_id, reason):
msg = _('Failed to add_interface: %s') % reason
LOG.info(msg)
redirect = reverse(self.failure_url, args=[router_id])
exceptions.handle(request, msg, redirect=redirect)
def _delete_port(self, request, port):
try:
api.neutron.port_delete(request, port.id)
except Exception:
msg = _('Failed to delete port %s') % port.id
LOG.info(msg)
exceptions.handle(request, msg)
class SetGatewayForm(forms.SelfHandlingForm):
network_id = forms.ChoiceField(label=_("External Network"))
router_name = forms.CharField(label=_("Router Name"),
widget=forms.TextInput(
attrs={'readonly': 'readonly'}))
router_id = forms.CharField(label=_("Router ID"),
widget=forms.TextInput(
attrs={'readonly': 'readonly'}))
failure_url = 'horizon:project:routers:index'
def __init__(self, request, *args, **kwargs):
super(SetGatewayForm, self).__init__(request, *args, **kwargs)
c = self.populate_network_id_choices(request)
self.fields['network_id'].choices = c
def populate_network_id_choices(self, request):
search_opts = {'router:external': True}
try:
networks = api.neutron.network_list(request, **search_opts)
except Exception as e:
msg = _('Failed to get network list %s') % e.message
LOG.info(msg)
messages.error(request, msg)
redirect = reverse(self.failure_url)
exceptions.handle(request, msg, redirect=redirect)
return
choices = [(network.id, network.name or network.id)
for network in networks]
if choices:
choices.insert(0, ("", _("Select network")))
else:
choices.insert(0, ("", _("No networks available.")))
return choices
def handle(self, request, data):
try:
api.neutron.router_add_gateway(request,
data['router_id'],
data['network_id'])
msg = _('Gateway interface is added')
LOG.debug(msg)
messages.success(request, msg)
return True
except Exception as e:
msg = _('Failed to set gateway %s') % e.message
LOG.info(msg)
redirect = reverse(self.failure_url)
exceptions.handle(request, msg, redirect=redirect)
| apache-2.0 | -6,757,672,786,645,593,000 | -228,679,028,528,022,750 | 39.502618 | 78 | 0.558299 | false |
HuimingCheng/AutoGrading | learning/number recognization/network.py | 1 | 12668 | # # Written for Theano 0.6 and 0.7, needs some changes for more recent
# # versions of Theano.
#
#
# #### Libraries
# # Standard library
# import cPickle
# import gzip
#
# # Third-party libraries
# import numpy as np
# import theano
# import theano.tensor as T
# from theano.tensor.nnet import conv
# from theano.tensor.nnet import softmax
# from theano.tensor import shared_randomstreams
# from theano.tensor.signal import downsample
#
# # Activation functions for neurons
# def linear(z): return z
# def ReLU(z): return T.maximum(0.0, z)
# from theano.tensor.nnet import sigmoid
# from theano.tensor import tanh
#
#
# #### Constants
# GPU = True
# if GPU:
# print "Trying to run under a GPU. If this is not desired, then modify "+\
# "network3.py\nto set the GPU flag to False."
# try: theano.config.device = 'gpu'
# except: pass # it's already set
# theano.config.floatX = 'float32'
# else:
# print "Running with a CPU. If this is not desired, then the modify "+\
# "network3.py to set\nthe GPU flag to True."
#
# #### Load the MNIST data
# def load_data_shared(filename="../data/mnist.pkl.gz"):
# f = gzip.open(filename, 'rb')
# training_data, validation_data, test_data = cPickle.load(f)
# f.close()
# def shared(data):
# """Place the data into shared variables. This allows Theano to copy
# the data to the GPU, if one is available.
# """
# shared_x = theano.shared(
# np.asarray(data[0], dtype=theano.config.floatX), borrow=True)
# shared_y = theano.shared(
# np.asarray(data[1], dtype=theano.config.floatX), borrow=True)
# return shared_x, T.cast(shared_y, "int32")
# return [shared(training_data), shared(validation_data), shared(test_data)]
#
# #### Main class used to construct and train networks
# class Network(object):
#
# def __init__(self, layers, mini_batch_size):
# """Takes a list of `layers`, describing the network architecture, and
# a value for the `mini_batch_size` to be used during training
# by stochastic gradient descent.
# """
# self.layers = layers
# self.mini_batch_size = mini_batch_size
# self.params = [param for layer in self.layers for param in layer.params]
# self.x = T.matrix("x")
# self.y = T.ivector("y")
# init_layer = self.layers[0]
# init_layer.set_inpt(self.x, self.x, self.mini_batch_size)
# for j in xrange(1, len(self.layers)):
# prev_layer, layer = self.layers[j-1], self.layers[j]
# layer.set_inpt(
# prev_layer.output, prev_layer.output_dropout, self.mini_batch_size)
# self.output = self.layers[-1].output
# self.output_dropout = self.layers[-1].output_dropout
#
# def SGD(self, training_data, epochs, mini_batch_size, eta,
# validation_data, test_data, lmbda=0.0):
# """Train the network using mini-batch stochastic gradient descent."""
# training_x, training_y = training_data
# validation_x, validation_y = validation_data
# test_x, test_y = test_data
#
# # compute number of minibatches for training, validation and testing
# num_training_batches = size(training_data)/mini_batch_size
# num_validation_batches = size(validation_data)/mini_batch_size
# num_test_batches = size(test_data)/mini_batch_size
#
# # define the (regularized) cost function, symbolic gradients, and updates
# l2_norm_squared = sum([(layer.w**2).sum() for layer in self.layers])
# cost = self.layers[-1].cost(self)+\
# 0.5*lmbda*l2_norm_squared/num_training_batches
# grads = T.grad(cost, self.params)
# updates = [(param, param-eta*grad)
# for param, grad in zip(self.params, grads)]
#
# # define functions to train a mini-batch, and to compute the
# # accuracy in validation and test mini-batches.
# i = T.lscalar() # mini-batch index
# train_mb = theano.function(
# [i], cost, updates=updates,
# givens={
# self.x:
# training_x[i*self.mini_batch_size: (i+1)*self.mini_batch_size],
# self.y:
# training_y[i*self.mini_batch_size: (i+1)*self.mini_batch_size]
# })
# validate_mb_accuracy = theano.function(
# [i], self.layers[-1].accuracy(self.y),
# givens={
# self.x:
# validation_x[i*self.mini_batch_size: (i+1)*self.mini_batch_size],
# self.y:
# validation_y[i*self.mini_batch_size: (i+1)*self.mini_batch_size]
# })
# test_mb_accuracy = theano.function(
# [i], self.layers[-1].accuracy(self.y),
# givens={
# self.x:
# test_x[i*self.mini_batch_size: (i+1)*self.mini_batch_size],
# self.y:
# test_y[i*self.mini_batch_size: (i+1)*self.mini_batch_size]
# })
# self.test_mb_predictions = theano.function(
# [i], self.layers[-1].y_out,
# givens={
# self.x:
# test_x[i*self.mini_batch_size: (i+1)*self.mini_batch_size]
# })
# # Do the actual training
# best_validation_accuracy = 0.0
# for epoch in xrange(epochs):
# for minibatch_index in xrange(num_training_batches):
# iteration = num_training_batches*epoch+minibatch_index
# if iteration % 1000 == 0:
# print("Training mini-batch number {0}".format(iteration))
# cost_ij = train_mb(minibatch_index)
# if (iteration+1) % num_training_batches == 0:
# validation_accuracy = np.mean(
# [validate_mb_accuracy(j) for j in xrange(num_validation_batches)])
# print("Epoch {0}: validation accuracy {1:.2%}".format(
# epoch, validation_accuracy))
# if validation_accuracy >= best_validation_accuracy:
# print("This is the best validation accuracy to date.")
# best_validation_accuracy = validation_accuracy
# best_iteration = iteration
# if test_data:
# test_accuracy = np.mean(
# [test_mb_accuracy(j) for j in xrange(num_test_batches)])
# print('The corresponding test accuracy is {0:.2%}'.format(
# test_accuracy))
# print("Finished training network.")
# print("Best validation accuracy of {0:.2%} obtained at iteration {1}".format(
# best_validation_accuracy, best_iteration))
# print("Corresponding test accuracy of {0:.2%}".format(test_accuracy))
#
# #### Define layer types
#
# class ConvPoolLayer(object):
# Used to create a combination of a convolutional and a max-pooling
# layer. A more sophisticated implementation would separate the
# two, but for our purposes we'll always use them together, and it
# simplifies the code, so it makes sense to combine them.
#
#
# def __init__(self, filter_shape, image_shape, poolsize=(2, 2),
# activation_fn=sigmoid):
# `filter_shape` is a tuple of length 4, whose entries are the number
# of filters, the number of input feature maps, the filter height, and the
# filter width.
# `image_shape` is a tuple of length 4, whose entries are the
# mini-batch size, the number of input feature maps, the image
# height, and the image width.
# `poolsize` is a tuple of length 2, whose entries are the y and
# x pooling sizes.
#
# self.filter_shape = filter_shape
# self.image_shape = image_shape
# self.poolsize = poolsize
# self.activation_fn=activation_fn
# # initialize weights and biases
# n_out = (filter_shape[0]*np.prod(filter_shape[2:])/np.prod(poolsize))
# self.w = theano.shared(
# np.asarray(
# np.random.normal(loc=0, scale=np.sqrt(1.0/n_out), size=filter_shape),
# dtype=theano.config.floatX),
# borrow=True)
# self.b = theano.shared(
# np.asarray(
# np.random.normal(loc=0, scale=1.0, size=(filter_shape[0],)),
# dtype=theano.config.floatX),
# borrow=True)
# self.params = [self.w, self.b]
#
# def set_inpt(self, inpt, inpt_dropout, mini_batch_size):
# self.inpt = inpt.reshape(self.image_shape)
# conv_out = conv.conv2d(
# input=self.inpt, filters=self.w, filter_shape=self.filter_shape,
# image_shape=self.image_shape)
# pooled_out = downsample.max_pool_2d(
# input=conv_out, ds=self.poolsize, ignore_border=True)
# self.output = self.activation_fn(
# pooled_out + self.b.dimshuffle('x', 0, 'x', 'x'))
# self.output_dropout = self.output # no dropout in the convolutional layers
#
# class FullyConnectedLayer(object):
#
# def __init__(self, n_in, n_out, activation_fn=sigmoid, p_dropout=0.0):
# self.n_in = n_in
# self.n_out = n_out
# self.activation_fn = activation_fn
# self.p_dropout = p_dropout
# # Initialize weights and biases
# self.w = theano.shared(
# np.asarray(
# np.random.normal(
# loc=0.0, scale=np.sqrt(1.0/n_out), size=(n_in, n_out)),
# dtype=theano.config.floatX),
# name='w', borrow=True)
# self.b = theano.shared(
# np.asarray(np.random.normal(loc=0.0, scale=1.0, size=(n_out,)),
# dtype=theano.config.floatX),
# name='b', borrow=True)
# self.params = [self.w, self.b]
#
# def set_inpt(self, inpt, inpt_dropout, mini_batch_size):
# self.inpt = inpt.reshape((mini_batch_size, self.n_in))
# self.output = self.activation_fn(
# (1-self.p_dropout)*T.dot(self.inpt, self.w) + self.b)
# self.y_out = T.argmax(self.output, axis=1)
# self.inpt_dropout = dropout_layer(
# inpt_dropout.reshape((mini_batch_size, self.n_in)), self.p_dropout)
# self.output_dropout = self.activation_fn(
# T.dot(self.inpt_dropout, self.w) + self.b)
#
# def accuracy(self, y):
# "Return the accuracy for the mini-batch."
# return T.mean(T.eq(y, self.y_out))
#
# class SoftmaxLayer(object):
#
# def __init__(self, n_in, n_out, p_dropout=0.0):
# self.n_in = n_in
# self.n_out = n_out
# self.p_dropout = p_dropout
# # Initialize weights and biases
# self.w = theano.shared(
# np.zeros((n_in, n_out), dtype=theano.config.floatX),
# name='w', borrow=True)
# self.b = theano.shared(
# np.zeros((n_out,), dtype=theano.config.floatX),
# name='b', borrow=True)
# self.params = [self.w, self.b]
#
# def set_inpt(self, inpt, inpt_dropout, mini_batch_size):
# self.inpt = inpt.reshape((mini_batch_size, self.n_in))
# self.output = softmax((1-self.p_dropout)*T.dot(self.inpt, self.w) + self.b)
# self.y_out = T.argmax(self.output, axis=1)
# self.inpt_dropout = dropout_layer(
# inpt_dropout.reshape((mini_batch_size, self.n_in)), self.p_dropout)
# self.output_dropout = softmax(T.dot(self.inpt_dropout, self.w) + self.b)
#
# def cost(self, net):
# "Return the log-likelihood cost."
# return -T.mean(T.log(self.output_dropout)[T.arange(net.y.shape[0]), net.y])
#
# def accuracy(self, y):
# "Return the accuracy for the mini-batch."
# return T.mean(T.eq(y, self.y_out))
#
#
# #### Miscellanea
# def size(data):
# "Return the size of the dataset `data`."
# return data[0].get_value(borrow=True).shape[0]
#
# def dropout_layer(layer, p_dropout):
# srng = shared_randomstreams.RandomStreams(
# np.random.RandomState(0).randint(999999))
# mask = srng.binomial(n=1, p=1-p_dropout, size=layer.shape)
# return layer*T.cast(mask, theano.config.floatX) | mit | 8,246,930,248,080,784,000 | -5,170,010,686,072,394,000 | 43.25 | 92 | 0.558573 | false |
yanchen036/tensorflow | tensorflow/contrib/autograph/impl/naming_test.py | 4 | 2899 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for naming module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.autograph.impl import naming
from tensorflow.python.platform import test
class NamerTest(test.TestCase):
def test_compiled_function_name_tracks_names(self):
def bar():
pass
namer = naming.Namer({}, True, None, ())
self.assertEqual(('tf__foo', True), namer.compiled_function_name('foo'))
self.assertEqual(('tf__bar', True), namer.compiled_function_name(
'bar', bar))
self.assertEqual({bar: 'tf__bar'}, namer.renamed_calls)
self.assertItemsEqual(('tf__bar', 'tf__foo'), namer.generated_names)
def test_compiled_function_name_consistent(self):
def foo():
pass
namer = naming.Namer({}, True, None, ())
self.assertEqual(('tf__foo', True), namer.compiled_function_name(
'foo', foo))
self.assertEqual(('tf__foo', True), namer.compiled_function_name(
'foo', foo))
def test_compiled_function_name_avoids_global_conflicts(self):
def foo():
pass
namer = naming.Namer({'tf__foo': 1}, True, None, ())
self.assertEqual(('tf__foo_1', True),
namer.compiled_function_name('foo', foo))
def test_new_symbol_tracks_names(self):
namer = naming.Namer({}, True, None, ())
self.assertEqual('temp', namer.new_symbol('temp', set()))
self.assertItemsEqual(('temp',), namer.generated_names)
def test_new_symbol_avoids_duplicates(self):
namer = naming.Namer({}, True, None, ())
self.assertEqual('temp', namer.new_symbol('temp', set()))
self.assertEqual('temp_1', namer.new_symbol('temp', set()))
self.assertItemsEqual(('temp', 'temp_1'), namer.generated_names)
def test_new_symbol_avoids_conflicts(self):
namer = naming.Namer({'temp': 1}, True, None, ())
# temp is reserved in the global namespace
self.assertEqual('temp_1', namer.new_symbol('temp', set()))
# temp_2 is reserved in the local namespace
self.assertEqual('temp_3', namer.new_symbol('temp', set(('temp_2',))))
self.assertItemsEqual(('temp_1', 'temp_3'), namer.generated_names)
if __name__ == '__main__':
test.main()
| apache-2.0 | 1,777,085,365,575,135,200 | 8,145,280,370,751,971,000 | 36.649351 | 80 | 0.654364 | false |
TeamTwisted/external_chromium_org | tools/binary_size/binary_size_utils.py | 53 | 2234 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Common utilities for tools that deal with binary size information.
"""
import logging
import re
def ParseNm(nm_lines):
"""Parse nm output, returning data for all relevant (to binary size)
symbols and ignoring the rest.
Args:
nm_lines: an iterable over lines of nm output.
Yields:
(symbol name, symbol type, symbol size, source file path).
Path may be None if nm couldn't figure out the source file.
"""
# Match lines with size, symbol, optional location, optional discriminator
sym_re = re.compile(r'^[0-9a-f]{8,} ' # address (8+ hex digits)
'([0-9a-f]{8,}) ' # size (8+ hex digits)
'(.) ' # symbol type, one character
'([^\t]+)' # symbol name, separated from next by tab
'(?:\t(.*):[\d\?]+)?.*$') # location
# Match lines with addr but no size.
addr_re = re.compile(r'^[0-9a-f]{8,} (.) ([^\t]+)(?:\t.*)?$')
# Match lines that don't have an address at all -- typically external symbols.
noaddr_re = re.compile(r'^ {8,} (.) (.*)$')
# Match lines with no symbol name, only addr and type
addr_only_re = re.compile(r'^[0-9a-f]{8,} (.)$')
for line in nm_lines:
line = line.rstrip()
match = sym_re.match(line)
if match:
size, sym_type, sym = match.groups()[0:3]
size = int(size, 16)
if sym_type in ('B', 'b'):
continue # skip all BSS for now.
path = match.group(4)
yield sym, sym_type, size, path
continue
match = addr_re.match(line)
if match:
# sym_type, sym = match.groups()[0:2]
continue # No size == we don't care.
match = noaddr_re.match(line)
if match:
sym_type, sym = match.groups()
if sym_type in ('U', 'w'):
continue # external or weak symbol
match = addr_only_re.match(line)
if match:
continue # Nothing to do.
# If we reach this part of the loop, there was something in the
# line that we didn't expect or recognize.
logging.warning('nm output parser failed to parse: %s', repr(line))
| bsd-3-clause | -1,602,696,550,793,238,800 | 3,115,354,686,275,310,000 | 33.369231 | 80 | 0.596688 | false |
frankvdp/django | django/contrib/gis/geos/prototypes/threadsafe.py | 85 | 2309 | import threading
from django.contrib.gis.geos.base import GEOSBase
from django.contrib.gis.geos.libgeos import (
CONTEXT_PTR, error_h, lgeos, notice_h,
)
class GEOSContextHandle(GEOSBase):
"""Represent a GEOS context handle."""
ptr_type = CONTEXT_PTR
destructor = lgeos.finishGEOS_r
def __init__(self):
# Initializing the context handler for this thread with
# the notice and error handler.
self.ptr = lgeos.initGEOS_r(notice_h, error_h)
# Defining a thread-local object and creating an instance
# to hold a reference to GEOSContextHandle for this thread.
class GEOSContext(threading.local):
handle = None
thread_context = GEOSContext()
class GEOSFunc:
"""
Serve as a wrapper for GEOS C Functions. Use thread-safe function
variants when available.
"""
def __init__(self, func_name):
# GEOS thread-safe function signatures end with '_r' and take an
# additional context handle parameter.
self.cfunc = getattr(lgeos, func_name + '_r')
# Create a reference to thread_context so it's not garbage-collected
# before an attempt to call this object.
self.thread_context = thread_context
def __call__(self, *args):
# Create a context handle if one doesn't exist for this thread.
self.thread_context.handle = self.thread_context.handle or GEOSContextHandle()
# Call the threaded GEOS routine with the pointer of the context handle
# as the first argument.
return self.cfunc(self.thread_context.handle.ptr, *args)
def __str__(self):
return self.cfunc.__name__
# argtypes property
def _get_argtypes(self):
return self.cfunc.argtypes
def _set_argtypes(self, argtypes):
self.cfunc.argtypes = [CONTEXT_PTR, *argtypes]
argtypes = property(_get_argtypes, _set_argtypes)
# restype property
def _get_restype(self):
return self.cfunc.restype
def _set_restype(self, restype):
self.cfunc.restype = restype
restype = property(_get_restype, _set_restype)
# errcheck property
def _get_errcheck(self):
return self.cfunc.errcheck
def _set_errcheck(self, errcheck):
self.cfunc.errcheck = errcheck
errcheck = property(_get_errcheck, _set_errcheck)
| bsd-3-clause | 157,944,812,134,777,760 | -7,240,735,115,103,628,000 | 28.987013 | 86 | 0.669554 | false |
the-adrian/demoFlask | venv/lib/python2.7/site-packages/flask/blueprints.py | 773 | 16320 | # -*- coding: utf-8 -*-
"""
flask.blueprints
~~~~~~~~~~~~~~~~
Blueprints are the recommended way to implement larger or more
pluggable applications in Flask 0.7 and later.
:copyright: (c) 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
from functools import update_wrapper
from .helpers import _PackageBoundObject, _endpoint_from_view_func
class BlueprintSetupState(object):
"""Temporary holder object for registering a blueprint with the
application. An instance of this class is created by the
:meth:`~flask.Blueprint.make_setup_state` method and later passed
to all register callback functions.
"""
def __init__(self, blueprint, app, options, first_registration):
#: a reference to the current application
self.app = app
#: a reference to the blueprint that created this setup state.
self.blueprint = blueprint
#: a dictionary with all options that were passed to the
#: :meth:`~flask.Flask.register_blueprint` method.
self.options = options
#: as blueprints can be registered multiple times with the
#: application and not everything wants to be registered
#: multiple times on it, this attribute can be used to figure
#: out if the blueprint was registered in the past already.
self.first_registration = first_registration
subdomain = self.options.get('subdomain')
if subdomain is None:
subdomain = self.blueprint.subdomain
#: The subdomain that the blueprint should be active for, `None`
#: otherwise.
self.subdomain = subdomain
url_prefix = self.options.get('url_prefix')
if url_prefix is None:
url_prefix = self.blueprint.url_prefix
#: The prefix that should be used for all URLs defined on the
#: blueprint.
self.url_prefix = url_prefix
#: A dictionary with URL defaults that is added to each and every
#: URL that was defined with the blueprint.
self.url_defaults = dict(self.blueprint.url_values_defaults)
self.url_defaults.update(self.options.get('url_defaults', ()))
def add_url_rule(self, rule, endpoint=None, view_func=None, **options):
"""A helper method to register a rule (and optionally a view function)
to the application. The endpoint is automatically prefixed with the
blueprint's name.
"""
if self.url_prefix:
rule = self.url_prefix + rule
options.setdefault('subdomain', self.subdomain)
if endpoint is None:
endpoint = _endpoint_from_view_func(view_func)
defaults = self.url_defaults
if 'defaults' in options:
defaults = dict(defaults, **options.pop('defaults'))
self.app.add_url_rule(rule, '%s.%s' % (self.blueprint.name, endpoint),
view_func, defaults=defaults, **options)
class Blueprint(_PackageBoundObject):
"""Represents a blueprint. A blueprint is an object that records
functions that will be called with the
:class:`~flask.blueprint.BlueprintSetupState` later to register functions
or other things on the main application. See :ref:`blueprints` for more
information.
.. versionadded:: 0.7
"""
warn_on_modifications = False
_got_registered_once = False
def __init__(self, name, import_name, static_folder=None,
static_url_path=None, template_folder=None,
url_prefix=None, subdomain=None, url_defaults=None):
_PackageBoundObject.__init__(self, import_name, template_folder)
self.name = name
self.url_prefix = url_prefix
self.subdomain = subdomain
self.static_folder = static_folder
self.static_url_path = static_url_path
self.deferred_functions = []
self.view_functions = {}
if url_defaults is None:
url_defaults = {}
self.url_values_defaults = url_defaults
def record(self, func):
"""Registers a function that is called when the blueprint is
registered on the application. This function is called with the
state as argument as returned by the :meth:`make_setup_state`
method.
"""
if self._got_registered_once and self.warn_on_modifications:
from warnings import warn
warn(Warning('The blueprint was already registered once '
'but is getting modified now. These changes '
'will not show up.'))
self.deferred_functions.append(func)
def record_once(self, func):
"""Works like :meth:`record` but wraps the function in another
function that will ensure the function is only called once. If the
blueprint is registered a second time on the application, the
function passed is not called.
"""
def wrapper(state):
if state.first_registration:
func(state)
return self.record(update_wrapper(wrapper, func))
def make_setup_state(self, app, options, first_registration=False):
"""Creates an instance of :meth:`~flask.blueprints.BlueprintSetupState`
object that is later passed to the register callback functions.
Subclasses can override this to return a subclass of the setup state.
"""
return BlueprintSetupState(self, app, options, first_registration)
def register(self, app, options, first_registration=False):
"""Called by :meth:`Flask.register_blueprint` to register a blueprint
on the application. This can be overridden to customize the register
behavior. Keyword arguments from
:func:`~flask.Flask.register_blueprint` are directly forwarded to this
method in the `options` dictionary.
"""
self._got_registered_once = True
state = self.make_setup_state(app, options, first_registration)
if self.has_static_folder:
state.add_url_rule(self.static_url_path + '/<path:filename>',
view_func=self.send_static_file,
endpoint='static')
for deferred in self.deferred_functions:
deferred(state)
def route(self, rule, **options):
"""Like :meth:`Flask.route` but for a blueprint. The endpoint for the
:func:`url_for` function is prefixed with the name of the blueprint.
"""
def decorator(f):
endpoint = options.pop("endpoint", f.__name__)
self.add_url_rule(rule, endpoint, f, **options)
return f
return decorator
def add_url_rule(self, rule, endpoint=None, view_func=None, **options):
"""Like :meth:`Flask.add_url_rule` but for a blueprint. The endpoint for
the :func:`url_for` function is prefixed with the name of the blueprint.
"""
if endpoint:
assert '.' not in endpoint, "Blueprint endpoint's should not contain dot's"
self.record(lambda s:
s.add_url_rule(rule, endpoint, view_func, **options))
def endpoint(self, endpoint):
"""Like :meth:`Flask.endpoint` but for a blueprint. This does not
prefix the endpoint with the blueprint name, this has to be done
explicitly by the user of this method. If the endpoint is prefixed
with a `.` it will be registered to the current blueprint, otherwise
it's an application independent endpoint.
"""
def decorator(f):
def register_endpoint(state):
state.app.view_functions[endpoint] = f
self.record_once(register_endpoint)
return f
return decorator
def app_template_filter(self, name=None):
"""Register a custom template filter, available application wide. Like
:meth:`Flask.template_filter` but for a blueprint.
:param name: the optional name of the filter, otherwise the
function name will be used.
"""
def decorator(f):
self.add_app_template_filter(f, name=name)
return f
return decorator
def add_app_template_filter(self, f, name=None):
"""Register a custom template filter, available application wide. Like
:meth:`Flask.add_template_filter` but for a blueprint. Works exactly
like the :meth:`app_template_filter` decorator.
:param name: the optional name of the filter, otherwise the
function name will be used.
"""
def register_template(state):
state.app.jinja_env.filters[name or f.__name__] = f
self.record_once(register_template)
def app_template_test(self, name=None):
"""Register a custom template test, available application wide. Like
:meth:`Flask.template_test` but for a blueprint.
.. versionadded:: 0.10
:param name: the optional name of the test, otherwise the
function name will be used.
"""
def decorator(f):
self.add_app_template_test(f, name=name)
return f
return decorator
def add_app_template_test(self, f, name=None):
"""Register a custom template test, available application wide. Like
:meth:`Flask.add_template_test` but for a blueprint. Works exactly
like the :meth:`app_template_test` decorator.
.. versionadded:: 0.10
:param name: the optional name of the test, otherwise the
function name will be used.
"""
def register_template(state):
state.app.jinja_env.tests[name or f.__name__] = f
self.record_once(register_template)
def app_template_global(self, name=None):
"""Register a custom template global, available application wide. Like
:meth:`Flask.template_global` but for a blueprint.
.. versionadded:: 0.10
:param name: the optional name of the global, otherwise the
function name will be used.
"""
def decorator(f):
self.add_app_template_global(f, name=name)
return f
return decorator
def add_app_template_global(self, f, name=None):
"""Register a custom template global, available application wide. Like
:meth:`Flask.add_template_global` but for a blueprint. Works exactly
like the :meth:`app_template_global` decorator.
.. versionadded:: 0.10
:param name: the optional name of the global, otherwise the
function name will be used.
"""
def register_template(state):
state.app.jinja_env.globals[name or f.__name__] = f
self.record_once(register_template)
def before_request(self, f):
"""Like :meth:`Flask.before_request` but for a blueprint. This function
is only executed before each request that is handled by a function of
that blueprint.
"""
self.record_once(lambda s: s.app.before_request_funcs
.setdefault(self.name, []).append(f))
return f
def before_app_request(self, f):
"""Like :meth:`Flask.before_request`. Such a function is executed
before each request, even if outside of a blueprint.
"""
self.record_once(lambda s: s.app.before_request_funcs
.setdefault(None, []).append(f))
return f
def before_app_first_request(self, f):
"""Like :meth:`Flask.before_first_request`. Such a function is
executed before the first request to the application.
"""
self.record_once(lambda s: s.app.before_first_request_funcs.append(f))
return f
def after_request(self, f):
"""Like :meth:`Flask.after_request` but for a blueprint. This function
is only executed after each request that is handled by a function of
that blueprint.
"""
self.record_once(lambda s: s.app.after_request_funcs
.setdefault(self.name, []).append(f))
return f
def after_app_request(self, f):
"""Like :meth:`Flask.after_request` but for a blueprint. Such a function
is executed after each request, even if outside of the blueprint.
"""
self.record_once(lambda s: s.app.after_request_funcs
.setdefault(None, []).append(f))
return f
def teardown_request(self, f):
"""Like :meth:`Flask.teardown_request` but for a blueprint. This
function is only executed when tearing down requests handled by a
function of that blueprint. Teardown request functions are executed
when the request context is popped, even when no actual request was
performed.
"""
self.record_once(lambda s: s.app.teardown_request_funcs
.setdefault(self.name, []).append(f))
return f
def teardown_app_request(self, f):
"""Like :meth:`Flask.teardown_request` but for a blueprint. Such a
function is executed when tearing down each request, even if outside of
the blueprint.
"""
self.record_once(lambda s: s.app.teardown_request_funcs
.setdefault(None, []).append(f))
return f
def context_processor(self, f):
"""Like :meth:`Flask.context_processor` but for a blueprint. This
function is only executed for requests handled by a blueprint.
"""
self.record_once(lambda s: s.app.template_context_processors
.setdefault(self.name, []).append(f))
return f
def app_context_processor(self, f):
"""Like :meth:`Flask.context_processor` but for a blueprint. Such a
function is executed each request, even if outside of the blueprint.
"""
self.record_once(lambda s: s.app.template_context_processors
.setdefault(None, []).append(f))
return f
def app_errorhandler(self, code):
"""Like :meth:`Flask.errorhandler` but for a blueprint. This
handler is used for all requests, even if outside of the blueprint.
"""
def decorator(f):
self.record_once(lambda s: s.app.errorhandler(code)(f))
return f
return decorator
def url_value_preprocessor(self, f):
"""Registers a function as URL value preprocessor for this
blueprint. It's called before the view functions are called and
can modify the url values provided.
"""
self.record_once(lambda s: s.app.url_value_preprocessors
.setdefault(self.name, []).append(f))
return f
def url_defaults(self, f):
"""Callback function for URL defaults for this blueprint. It's called
with the endpoint and values and should update the values passed
in place.
"""
self.record_once(lambda s: s.app.url_default_functions
.setdefault(self.name, []).append(f))
return f
def app_url_value_preprocessor(self, f):
"""Same as :meth:`url_value_preprocessor` but application wide.
"""
self.record_once(lambda s: s.app.url_value_preprocessors
.setdefault(None, []).append(f))
return f
def app_url_defaults(self, f):
"""Same as :meth:`url_defaults` but application wide.
"""
self.record_once(lambda s: s.app.url_default_functions
.setdefault(None, []).append(f))
return f
def errorhandler(self, code_or_exception):
"""Registers an error handler that becomes active for this blueprint
only. Please be aware that routing does not happen local to a
blueprint so an error handler for 404 usually is not handled by
a blueprint unless it is caused inside a view function. Another
special case is the 500 internal server error which is always looked
up from the application.
Otherwise works as the :meth:`~flask.Flask.errorhandler` decorator
of the :class:`~flask.Flask` object.
"""
def decorator(f):
self.record_once(lambda s: s.app._register_error_handler(
self.name, code_or_exception, f))
return f
return decorator
| gpl-3.0 | -7,103,255,276,024,498,000 | 3,001,014,858,968,943,600 | 39.698254 | 87 | 0.624387 | false |
benob/icsisumm | tac09/unresolved/utils.py | 8 | 20846 | import re, traceback, sys
from lxml import etree
import treenode
# based on ~dilek/GALE-DIST/DISTILL-2008/LDC2008E39/scripts/english-preprocessing/text-cleanup.pl (from Wen)
def cleanup(text):
text = " " + text.replace(",", "") + " "
# negative number
text = re.sub(r' -([0-9])', r' minus \1', text)
# decimal point
text = re.sub(r' ([0-9]*)[.]([0-9][0-9]*) ', r' \1 point \2 ', text)
# height
text = re.sub(r"(\d)'(\d)",r"\1 \2", text)
# time
text = re.sub(r"(\d)\s*am ",r"\1 a. m. ", text)
text = re.sub(r"(\d)\s*pm ",r"\1 p. m. ", text)
text = re.sub(r"(\d)'o",r"\1 oh ", text)
text = re.sub(r"(\d?\d):00",r"\1 ", text)
text = re.sub(r"(\d?\d):(\d\d)",r"\1 \2", text)
# Dates
#
text = re.sub(r" (\d{1,2})(jan|feb|mar|apr|may|jun|jul|aug|sep|oct|nov|dec)(\d{2,4}) ",r" \1 \2 \3 ", text)
text = re.sub(r" ufo's",r" u. f. o.'s ", text)
text = re.sub(r" www's",r" w. w. w.'s ", text)
text = re.sub(r" cnnfn's",r" c. n. n. f. n.'s", text)
text = re.sub(r" cnn's",r" c. n. n.'s", text)
text = re.sub(r" abc's",r" a. b. c.'s", text)
text = re.sub(r" nbc's",r" n. b. c.'s", text)
text = re.sub(r" voa's",r" v. o. a.'s", text)
text = re.sub(r" pri's",r" p. r. i.'s", text)
text = re.sub(r" msnbc's",r" m. s. n. b. c.'s", text)
text = re.sub(r" msn's",r" m. s. n.'s", text)
text = re.sub(r" fbi's",r" f. b. i.'s", text)
text = re.sub(r" usa's",r" u. s. a.'s", text)
text = re.sub(r" cia's",r" c. i. a.'s", text)
text = re.sub(r" nhl's",r" n. h. l.'s", text)
text = re.sub(r" ftc's",r" f. t. c.'s", text)
text = re.sub(r" fcc's",r" f. c. c.'s", text)
text = re.sub(r" 3com's ",r" three com's ", text)
text = re.sub(r" aka's ",r" a. k. a.'s ", text)
text = re.sub(r" ceo's ",r" c. e. o.'s ", text)
text = re.sub(r" ufo ",r" u. f. o. ", text)
text = re.sub(r" www",r" w. w. w. ", text)
text = re.sub(r" cnnfn",r" c. n. n. f. n. ", text)
text = re.sub(r" cnn",r" c. n. n. ", text)
text = re.sub(r" abc ",r" a. b. c. ", text)
text = re.sub(r" nbc",r" n. b. c. ", text)
text = re.sub(r" voa ",r" v. o. a. ", text)
text = re.sub(r" pri ",r" p. r. i. ", text)
text = re.sub(r" msnbc",r" m. s. n. b. c. ", text)
text = re.sub(r" msn",r" m. s. n. ", text)
text = re.sub(r" fbi",r" f. b. i. ", text)
text = re.sub(r" usa ",r" u. s. a. ", text)
text = re.sub(r" cia ",r" c. i. a. ", text)
text = re.sub(r" nhl",r" n. h. l. ", text)
text = re.sub(r" ftc",r" f. t. c. ", text)
text = re.sub(r" fcc",r" f. c. c. ", text)
text = re.sub(r" 3com ",r" three com ", text)
text = re.sub(r" aka ",r" a. k. a. ", text)
text = re.sub(r" ceo ",r" c. e. o. ", text)
text = re.sub(r" sec ",r" s. e. c. ", text)
text = re.sub(r" 401k",r" four oh one k. ", text)
text = re.sub(r" 403b",r" four oh three b. ", text)
text = re.sub(r" 1040 ",r" ten forty ", text)
text = re.sub(r" 1040ez ",r" ten forty e. z. ", text)
text = re.sub(r" 1040nr ",r" ten forty n. r. ", text)
text = re.sub(r" 1040nrez ",r" ten forty n. r. e. z. ", text)
text = re.sub(r"([0-9])khz - ",r"\1 kilo hertz to ", text)
text = re.sub(r"([0-9])hz - ",r"\1 hertz to ", text)
text = re.sub(r"([0-9])khz ",r"\1 kilo hertz ", text)
text = re.sub(r"([0-9])hz ",r"\1 kilo hertz ", text)
# Split hyphenated words
#
text = re.sub(r"([^ ])-([^ ])",r"\1 \2", text)
# Separate letter sequences
#
text = re.sub(r" ([a-z])[.]([a-z])[.]",r" \1. \2. ", text)
text = re.sub(r" ([a-z])[.]([a-z])[.]",r" \1. \2. ", text)
# Handle some special abbrevs
#
text = re.sub(r" jr[.]? ",r" junior ", text)
text = re.sub(r" sr[.]? ",r" senior ", text)
text = re.sub(r" no[.] ",r" text ", text)
text = re.sub(r" nr[.]",r" text ", text)
text = re.sub(r" vs[.]? ",r" versus ", text)
text = re.sub(r" mt[.]? ",r" mount ", text)
text = re.sub(r" sgt[.]? ",r" sargent ", text)
text = re.sub(r" hz[.]? ",r" hertz ", text)
text = re.sub(r" khz[.]? ",r" kilo hertz ", text)
text = re.sub(r" pt[.]? ",r" part ", text)
text = re.sub(r" op[.] ",r" opus ", text)
text = re.sub(r" sec[.] ",r" section ", text)
text = re.sub(r" no ([0-9]+)",r" text \1 ", text)
# Preserve letter/abbrev markers and decimal points, delete other periods.
#
text = re.sub(r" ([a-z])[.]'s ",r" \1~'s ", text)
text = re.sub(r" ([a-z])[.]",r" \1~ ", text)
text = re.sub(r" mr[.] ",r" mr~ ", text)
text = re.sub(r" mrs[.] ",r" mrs~ ", text)
text = re.sub(r" ms[.] ",r" ms~ ", text)
text = re.sub(r" messrs[.] ",r" messrs~ ", text)
text = re.sub(r"[.]",r" ", text)
text = re.sub(r" ([a-z])~",r" \1.", text)
text = re.sub(r" mr~ ",r" mr. ", text)
text = re.sub(r" mrs~ ",r" mrs. ", text)
text = re.sub(r" ms~ ",r" ms. ", text)
text = re.sub(r" messrs~ ",r" messrs. ", text)
# Preserve fragment markers and remove other hyphens
#
text = re.sub(r" -([a-z]+)",r" ~\1", text)
text = re.sub(r"([a-z]+)- ",r"\1~ ", text)
text = re.sub(r"-+",r" ", text)
text = re.sub(r" ~([a-z]+)",r" -\1", text)
text = re.sub(r"([a-z]+)~ ",r"\1- ", text)
text = re.sub("\s+", " ", text)
return text.strip()
def number_to_words(text):
text = " " + text + " "
# Get rid of empty fractions
#
text = re.sub(r"\s([0-9]+)[.]\s",r" \1 ", text)
# Other fractionals
#
text = re.sub(r" ([0-9])*[.]([0-9][0-9]*)((nd|rd|st|th|s|\'s|s\')?) ",r" \1 point \2\3 ", text)
# Phone numbers
#
text = re.sub("1-(\d)00-(\d)(\d)(\d)-(\d)(\d)(\d)(\d)", r"one \1 hundred \2 \3 \4 \5 \6 \7 \8 ", text)
text = re.sub("1-(\d)(\d)(\d)-(\d)(\d)(\d)-(\d)(\d)(\d)(\d)", r"one \1 \2 \3 \4 \5 \6 \7 \8 \9 \10", text)
text = re.sub("(\d)(\d)(\d)-(\d)(\d)(\d)-(\d)(\d)(\d)(\d)", r"\1 \2 \3 \4 \5 \6 \7 \8 \9 \10", text)
text = re.sub("(\d)(\d)(\d)-(\d)(\d)(\d)(\d)", r"\1 \2 \3 \4 \5 \6 \7", text)
# Sports scores.
#
text = re.sub(r" ([0-9]+)-([0-9]+) ",r" \1 \2 ", text)
# Wordize years. This needs doing up front, before we convert, say, 1872 into
# one thousand eight hundred and seventy two.
#
text = re.sub(r" '(0[0-9])((s|\'s|s\')?) ",r" o. \1\2 ", text)
text = re.sub(r" '([1-9][0-9])((s|\'s|s\')?) ",r" \1\2 ", text)
text = re.sub(r" (1[1-9])00((s|\'s|s\')?) ",r" \1 hundred\2 ", text)
text = re.sub(r" 19([0-9][0-9])((s|\'s|s\')?) ",r" nineteen \1\2 ", text)
text = re.sub(r" 18([0-9][0-9])((s|\'s|s\')?) ",r" eighteen \1\2 ", text)
text = re.sub(r" 17([0-9][0-9])((s|\'s|s\')?) ",r" seventeen \1\2 ", text)
text = re.sub(r" 16([0-9][0-9])((s|\'s|s\')?) ",r" sixteen \1\2 ", text)
text = re.sub(r" 15([0-9][0-9])((s|\'s|s\')?) ",r" fifteen \1\2 ", text)
text = re.sub(r" 14([0-9][0-9])((s|\'s|s\')?) ",r" fourteen \1\2 ", text)
text = re.sub(r" 13([0-9][0-9])((s|\'s|s\')?) ",r" thirteen \1\2 ", text)
text = re.sub(r" 12([0-9][0-9])((s|\'s|s\')?) ",r" twelve \1\2 ", text)
text = re.sub(r" 11([0-9][0-9])((s|\'s|s\')?) ",r" eleven \1\2 ", text)
# Mils
#
text = re.sub(r" ([1-9][0-9][0-9])000000((th|s|\'s|s\')?) ",r" \1 million\2 ", text)
text = re.sub(r" ([1-9][0-9])000000((th|s|\'s|s\')?) ",r" \1 million\2 ", text)
text = re.sub(r" ([1-9])000000((th|s|\'s|s\')?) ",r" \1 million\2 ", text)
# 1 2 3 4 5 6 7 8 9
text = re.sub(r" ([1-9][0-9][0-9])([0-9][0-9][0-9][0-9][0-9][0-9])((nd|rd|st|th|s|\'s|s\')?) ",r" \1 million and \2\3 ", text)
text = re.sub(r" ([1-9][0-9])([0-9][0-9][0-9][0-9][0-9][0-9])((nd|rd|st|th|s|\'s|s\')?) ",r" \1 million and \2\3 ", text)
text = re.sub(r" ([1-9])([0-9][0-9][0-9][0-9][0-9][0-9])((nd|rd|st|th|s|\'s|s\')?) ",r" \1 million and \2\3 ", text)
# Thousands
#
text = re.sub(r" ([1-9][0-9][0-9])000((th|s|\'s|s\')?) ",r" \1 thousand\2 ", text)
text = re.sub(r" ([0-9][0-9])000((th|s|\'s|s\')?) ",r" \1 thousand\2 ", text)
text = re.sub(r" ([1-9])000((th|s|\'s|s\')?) ",r" \1 thousand\2 ", text)
text = re.sub(r" ([1-9][0-9][0-9])([0-9][0-9][0-9])((nd|rd|th|s|\'s|s\')?) ",r" \1 thousand and \2\3 ", text)
text = re.sub(r" ([0-9][0-9])([0-9][0-9][0-9])((nd|rd|th|s|\'s|s\')?) ",r" \1 thousand and \2\3 ", text)
text = re.sub(r" ([1-9])([0-9][0-9][0-9])((st|nd|rd|th|s|\'s|s\')?) ",r" \1 thousand and \2\3 ", text)
# Hundreds
#
text = re.sub(r" 0?([0-9][0-9])((th|st|rd|nd|s|\'s|s\')?) ",r" \1\2 ", text)
text = re.sub(r" 0?([0-9])((th|st|rd|nd|s|\'s|s\')?) ",r" \1\2 ", text)
text = re.sub(r" ([1-9])00((th|s|\'s|s\')?) ",r" \1 hundred\2 ", text)
text = re.sub(r" ([1-9])([0-9][0-9])((th|st|rd|nd|s|\'s|s\')?) ",r" \1 hundred and \2\3 ", text)
# Tens
#
text = re.sub(r" 10((th|\'s|s\'|s)?) ",r" ten\1 ", text)
text = re.sub(r" 11((th|\'s|s\'|s)?) ",r" eleven\1 ", text)
text = re.sub(r" 12th ",r" twelfth ", text)
text = re.sub(r" 12((\'s|s\')?) ",r" twelve\1 ", text)
text = re.sub(r" 12s ",r" twelves ", text)
text = re.sub(r" 13((th|\'s|s\'|s)?) ",r" thirteen\1 ", text)
text = re.sub(r" 14((th|\'s|s\'|s)?) ",r" fourteen\1 ", text)
text = re.sub(r" 15((th|\'s|s\'|s)?) ",r" fifteen\1 ", text)
text = re.sub(r" 16((th|\'s|s\'|s)?) ",r" sixteen\1 ", text)
text = re.sub(r" 17((th|\'s|s\'|s)?) ",r" seventeen\1 ", text)
text = re.sub(r" 18((th|\'s|s\'|s)?) ",r" eighteen\1 ", text)
text = re.sub(r" 19((th|\'s|s\'|s)?) ",r" nineteen\1 ", text)
text = re.sub(r" 20(th|s) ",r" twentie\1 ", text)
text = re.sub(r" 30(th|s) ",r" thirtie\1 ", text)
text = re.sub(r" 40(th|s) ",r" fortie\1 ", text)
text = re.sub(r" 50(th|s) ",r" fiftie\1 ", text)
text = re.sub(r" 60(th|s) ",r" sixtie\1 ", text)
text = re.sub(r" 70(th|s) ",r" seventie\1 ", text)
text = re.sub(r" 80(th|s) ",r" eightie\1 ", text)
text = re.sub(r" 90(th|s) ",r" ninetie\1 ", text)
text = re.sub(r" 20((\'s)?) ",r" twenty\1 ", text)
text = re.sub(r" 30((\'s)?) ",r" thirty\1 ", text)
text = re.sub(r" 40((\'s)?) ",r" forty\1 ", text)
text = re.sub(r" 50((\'s)?) ",r" fifty\1 ", text)
text = re.sub(r" 60((\'s)?) ",r" sixty\1 ", text)
text = re.sub(r" 70((\'s)?) ",r" seventy\1 ", text)
text = re.sub(r" 80((\'s)?) ",r" eighty\1 ", text)
text = re.sub(r" 90((\'s)?) ",r" ninety\1 ", text)
text = re.sub(r" 2([1-9])((er)?)((nd|rd|st|th|s|\'s|s\')?) ",r" twenty \1\2\4 ", text)
text = re.sub(r" 3([1-9])((er)?)((nd|rd|st|th|s|\'s|s\')?) ",r" thirty \1\2\4 ", text)
text = re.sub(r" 4([1-9])((er)?)((nd|rd|st|th|s|\'s|s\')?) ",r" forty \1\2\4 ", text)
text = re.sub(r" 5([1-9])((er)?)((nd|rd|st|th|s|\'s|s\')?) ",r" fifty \1\2\4 ", text)
text = re.sub(r" 6([1-9])((er)?)((nd|rd|st|th|s|\'s|s\')?) ",r" sixty \1\2\4 ", text)
text = re.sub(r" 7([1-9])((er)?)((nd|rd|st|th|s|\'s|s\')?) ",r" seventy \1\2\4 ", text)
text = re.sub(r" 8([1-9])((er)?)((nd|rd|st|th|s|\'s|s\')?) ",r" eighty \1\2\4 ", text)
text = re.sub(r" 9([1-9])((er)?)((nd|rd|st|th|s|\'s|s\')?) ",r" ninety \1\2\4 ", text)
text = re.sub(r" 1er((s|s\'|\'s)?) ",r" oner\1 ", text)
text = re.sub(r" 2er((s|s\'|\'s)?) ",r" twoer\1 ", text)
text = re.sub(r" 3er((s|s\'|\'s)?) ",r" threeer\1 ", text)
text = re.sub(r" 4er((s|s\'|\'s)?) ",r" fourer\1 ", text)
text = re.sub(r" 5er((s|s\'|\'s)?) ",r" fiver\1 ", text)
text = re.sub(r" 6er((s|s\'|\'s)?) ",r" sixer\1 ", text)
text = re.sub(r" 7er((s|s\'|\'s)?) ",r" sevener\1 ", text)
text = re.sub(r" 8er((s|s\'|\'s)?) ",r" eighter\1 ", text)
text = re.sub(r" 9er((s|s\'|\'s)?) ",r" niner\1 ", text)
text = re.sub(r" 0*1st ",r" first ", text)
text = re.sub(r" 0*2nd ",r" second ", text)
text = re.sub(r" 0*3rd ",r" third ", text)
text = re.sub(r" 0*4th ",r" fourth ", text)
text = re.sub(r" 0*5th ",r" fifth ", text)
text = re.sub(r" 0*6th ",r" sixth ", text)
text = re.sub(r" 0*7th ",r" seventh ", text)
text = re.sub(r" 0*8th ",r" eighth ", text)
text = re.sub(r" 0*9th ",r" ninth ", text)
text = re.sub(r" 0*0s ",r" zeroes ", text)
text = re.sub(r" 0*0((\'s|s\')?) ",r" zero\1 ", text)
text = re.sub(r" 0*1((\'s|s\'|s)?) ",r" one\1 ", text)
text = re.sub(r" 0*2((\'s|s\'|s)?) ",r" two\1 ", text)
text = re.sub(r" 0*3((\'s|s\'|s)?) ",r" three\1 ", text)
text = re.sub(r" 0*4((\'s|s\'|s)?) ",r" four\1 ", text)
text = re.sub(r" 0*5((\'s|s\'|s)?) ",r" five\1 ", text)
text = re.sub(r" 0*6 ",r" six ", text)
text = re.sub(r" 0*6s ",r" sixes ", text)
text = re.sub(r" 0*6(s\'|\'s) ",r" six\1 ", text)
text = re.sub(r" 0*7((\'s|s\'|s)?) ",r" seven\1 ", text)
text = re.sub(r" 0*8((\'s|s\'|s)?) ",r" eight\1 ", text)
text = re.sub(r" 0*9((\'s|s\'|s)?) ",r" nine\1 ", text)
# Common fractions
#
text = re.sub(r" 1/4 ",r" a quarter ", text)
text = re.sub(r" 1/2 ",r" a half ", text)
text = re.sub(r" 3/4 ",r" three quarters ", text)
text = re.sub(r" 000([0-9]) ",r" \1 ", text)
text = re.sub(r" 00([0-9]) ",r" \1 ", text)
text = re.sub(r" 0([0-9]) ",r" \1 ", text)
# Handle cases like 1234 processed by above.
#
text = re.sub(r" million and ([^ ][^ ]*) thousand and ",r" million \1 thousand and ", text)
text = re.sub(r" thousand and ([^ ][^ ]*) hundred and ",r" thousand \1 hundred and ", text)
# Corrections of some introduced errors
#
text = re.sub(r" one (hundreds|thousands|millions) ",r" \1 ", text)
# Condense and trim
#
text = re.sub(r" *",r" ", text)
text = text.strip()
return text
def asrify(root):
for node in root:
if not re.search("[a-zA-Z]", node.label) or node.label == "CODE" or node.label == "-NONE-" or node.label == "META":
node.cut()
if node.isLeaf() and not re.search("[a-zA-Z0-9]", node.text):
node.cut()
if node.label.startswith("-") and node.label.endswith("-"):
node.cut()
if not node.label.endswith('-'):
node.label = re.sub(r'[-=].*', '', node.label)
if node.label == "TOP":
node.label = ""
for leaf in root.leaves:
leaf.text = leaf.text.upper()
for node in root.leaves:
if node.isLeaf():
# split A.B.C. in A. B. C.
found = re.search('^([A-Z](\.[A-Z])+\.?)', node.text)
if found:
parent = node.parent
index = node.index
for letter in ['(%s %s.)' % (node.label, x) for x in found.groups(0)[0].split(".") if len(x) > 0]:
parent.grow(letter, index)
index += 1
node.cut()
continue
# paste 'S 'LL N'T to the previous word
if (re.search("^'[A-Z]", node.text) or node.text == "N'T" or (node.text == "NA" and node.label == "TO")) and node.previousLeaf != None:
node.previousLeaf.text += node.text
#node.previousLeaf.label += "-" + node.label
node.cut()
continue
node.text = cleanup(node.text.lower()).upper()
node.text = cleanup(node.text.lower()).upper()
# convert numbers to words
if re.search('[0-9]', node.text):
node.text = number_to_words(node.text.lower()).upper()
if re.search(node.text, '[^A-Z ]'):
sys.stderr.write('WARNING: %s %s\n' % (str(node), str(tree)))
if " " in node.text:
parent = node.parent
index = node.index
for word in node.text.split():
parent.grow("(%s %s)" % (node.label, word), index)
index += 1
node.cut()
#def asrify(tree):
# for node in tree:
# if not re.search("[a-zA-Z]", node.label) or node.label == "CODE" or node.label == "-NONE-" or node.label == "META":
# node.cut()
# if node.text != "" and not re.search("[a-zA-Z0-9]", node.text):
# node.cut()
# if node.label.startswith("-") and node.label.endswith("-"):
# node.cut()
# if not node.label.endswith('-'):
# node.label = re.sub(r'[-=].*', '', node.label)
# if node.label == "TOP":
# node.label = ""
# for leaf in tree.leaves:
# leaf.text = leaf.text.upper()
def _add_appos_annot(element, parse_tree, word_id):
text_start = word_id
if element.text:
word_id += len(element.text.strip().split())
for child in element:
word_id += _add_appos_annot(child, parse_tree, word_id)
text_end = word_id
if element.tag == 'COREF' and 'TYPE' in element.attrib and element.attrib["TYPE"] == "APPOS":
found = 0
node = parse_tree.leaves[text_start]
while node.parent:
if node.leaves[0].leaf_index < text_start or node.leaves[-1].leaf_index >= text_end - 1:
found = 1
node.appos_label = element.attrib['SUBTYPE']
if node.appos_label.endswith('ATTRIB'):
node.appos_label = node.appos_label.replace('ATTRIB', 'ATTRIBUTE')
break
node = node.parent
if found == 0:
sys.stderr.write('WARNING: not found [%s] in %s\n' % (" ".join([x.text for x in parse_tree.leaves[text_start:text_end]]), parse_tree) )
raise Exception()
if element.tail:
word_id += len(element.tail.strip().split())
return word_id - text_start
def annotate_tree_with_appos(tree, line):
for node in tree:
node.appos_label = "O"
line = line.replace("&", "&")
try:
root = etree.XML("<ROOT>%s</ROOT>" % line)
word_id = 0
if root.text:
word_id += len(root.text.strip().split())
for element in root:
word_id += _add_appos_annot(element, tree, word_id)
except:
traceback.print_exc()
return False
return True
name_mapping = {
'LOCATION':'LOC',
'ORGANIZATION':'ORG',
'ORGANISATION':'ORG',
}
name_allowed = ['PERSON', 'ORG', 'ORGANISATION', 'ORGANIZATION', 'GPE', 'TIME', 'DATE', 'LOC', 'LOCATION']
def _add_name_annot(element, parse_tree, word_id):
text_start = word_id
if element.text:
word_id += len(element.text.strip().split())
for child in element:
word_id += _add_name_annot(child, parse_tree, word_id)
text_end = word_id
if element.tag == "ENAMEX" and 'TYPE' in element.attrib and element.attrib['TYPE'] in name_allowed:
found = 0
node = parse_tree.leaves[text_start]
while node.parent:
if node.leaves[0].leaf_index < text_start or node.leaves[-1].leaf_index >= text_end - 1:
found = 1
if element.attrib['TYPE'] in name_mapping:
node.name_label = name_mapping[element.attrib['TYPE']]
else:
node.name_label = element.attrib['TYPE']
break
node = node.parent
if found == 0:
sys.stderr.write('WARNING: not found [%s] in %s\n' % (" ".join([x.text for x in parse_tree.leaves[text_start:text_end]]), parse_tree) )
raise Exception()
if element.tail:
word_id += len(element.tail.strip().split())
return word_id - text_start
def annotate_tree_with_names(tree, line):
for node in tree:
node.name_label = "O"
line = line.replace("&", "&")
try:
root = etree.XML("<ROOT>%s</ROOT>" % line)
word_id = 0
if root.text:
word_id += len(root.text.strip().split())
for element in root:
word_id += _add_name_annot(element, tree, word_id)
except:
traceback.print_exc()
return False
return True
def set_bio_label(node, attribute):
label = getattr(node, attribute)
if label != "O":
setattr(node.leaves[0], attribute, "B-" + label)
for leaf in node.leaves[1:]:
setattr(leaf, attribute, "I-" + label)
def read_bio_labels(input, id_column, bio_column):
output = {}
last_label = "I-O"
begin_tokens = []
for line in input.xreadlines():
tokens = line.strip().split()
if len(tokens) < max(bio_column, id_column) + 1:
continue
if last_label != 'I-O' and tokens[bio_column] != last_label:
id = ":".join(begin_tokens[id_column].split(":")[:2])
if id not in output:
output[id] = []
begin_id = int(begin_tokens[id_column].split(":")[2])
end_id = int(end_tokens[id_column].split(":")[2])
label = last_label[2:]
output[id].append((begin_id, end_id + 1, label))
if tokens[bio_column].startswith('B-'):
begin_tokens = tokens
last_label = 'I-' + tokens[bio_column].split('-')[-1]
end_tokens = tokens
if last_label != 'I-O':
id = ":".join(begin_tokens[id_column].split(":")[:2])
if id not in output:
output[id] = []
begin_id = int(begin_tokens[id_column].split(":")[2])
end_id = int(end_tokens[id_column].split(":")[2])
label = last_label[2:]
output[id].append((begin_id, end_id + 1, label))
return output
| gpl-3.0 | -6,216,817,027,224,054,000 | 8,985,670,797,417,163,000 | 41.893004 | 147 | 0.495395 | false |
Just-D/chromium-1 | remoting/host/linux/linux_me2me_host.py | 12 | 50759 | #!/usr/bin/python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Virtual Me2Me implementation. This script runs and manages the processes
# required for a Virtual Me2Me desktop, which are: X server, X desktop
# session, and Host process.
# This script is intended to run continuously as a background daemon
# process, running under an ordinary (non-root) user account.
import atexit
import errno
import fcntl
import getpass
import grp
import hashlib
import json
import logging
import optparse
import os
import pipes
import platform
import psutil
import platform
import pwd
import re
import signal
import socket
import subprocess
import sys
import tempfile
import time
import uuid
LOG_FILE_ENV_VAR = "CHROME_REMOTE_DESKTOP_LOG_FILE"
# This script has a sensible default for the initial and maximum desktop size,
# which can be overridden either on the command-line, or via a comma-separated
# list of sizes in this environment variable.
DEFAULT_SIZES_ENV_VAR = "CHROME_REMOTE_DESKTOP_DEFAULT_DESKTOP_SIZES"
# By default, provide a maximum size that is large enough to support clients
# with large or multiple monitors. This is a comma-separated list of
# resolutions that will be made available if the X server supports RANDR. These
# defaults can be overridden in ~/.profile.
DEFAULT_SIZES = "1600x1200,3840x2560"
# If RANDR is not available, use a smaller default size. Only a single
# resolution is supported in this case.
DEFAULT_SIZE_NO_RANDR = "1600x1200"
SCRIPT_PATH = os.path.abspath(sys.argv[0])
SCRIPT_DIR = os.path.dirname(SCRIPT_PATH)
IS_INSTALLED = (os.path.basename(sys.argv[0]) != 'linux_me2me_host.py')
if IS_INSTALLED:
HOST_BINARY_NAME = "chrome-remote-desktop-host"
else:
HOST_BINARY_NAME = "remoting_me2me_host"
CHROME_REMOTING_GROUP_NAME = "chrome-remote-desktop"
HOME_DIR = os.environ["HOME"]
CONFIG_DIR = os.path.join(HOME_DIR, ".config/chrome-remote-desktop")
SESSION_FILE_PATH = os.path.join(HOME_DIR, ".chrome-remote-desktop-session")
SYSTEM_SESSION_FILE_PATH = "/etc/chrome-remote-desktop-session"
X_LOCK_FILE_TEMPLATE = "/tmp/.X%d-lock"
FIRST_X_DISPLAY_NUMBER = 20
# Amount of time to wait between relaunching processes.
SHORT_BACKOFF_TIME = 5
LONG_BACKOFF_TIME = 60
# How long a process must run in order not to be counted against the restart
# thresholds.
MINIMUM_PROCESS_LIFETIME = 60
# Thresholds for switching from fast- to slow-restart and for giving up
# trying to restart entirely.
SHORT_BACKOFF_THRESHOLD = 5
MAX_LAUNCH_FAILURES = SHORT_BACKOFF_THRESHOLD + 10
# Globals needed by the atexit cleanup() handler.
g_desktops = []
g_host_hash = hashlib.md5(socket.gethostname()).hexdigest()
def is_supported_platform():
# Always assume that the system is supported if the config directory or
# session file exist.
if (os.path.isdir(CONFIG_DIR) or os.path.isfile(SESSION_FILE_PATH) or
os.path.isfile(SYSTEM_SESSION_FILE_PATH)):
return True
# The host has been tested only on Ubuntu.
distribution = platform.linux_distribution()
return (distribution[0]).lower() == 'ubuntu'
def get_randr_supporting_x_server():
"""Returns a path to an X server that supports the RANDR extension, if this
is found on the system. Otherwise returns None."""
try:
xvfb = "/usr/bin/Xvfb-randr"
if not os.path.exists(xvfb):
xvfb = locate_executable("Xvfb-randr")
return xvfb
except Exception:
return None
class Config:
def __init__(self, path):
self.path = path
self.data = {}
self.changed = False
def load(self):
"""Loads the config from file.
Raises:
IOError: Error reading data
ValueError: Error parsing JSON
"""
settings_file = open(self.path, 'r')
self.data = json.load(settings_file)
self.changed = False
settings_file.close()
def save(self):
"""Saves the config to file.
Raises:
IOError: Error writing data
TypeError: Error serialising JSON
"""
if not self.changed:
return
old_umask = os.umask(0066)
try:
settings_file = open(self.path, 'w')
settings_file.write(json.dumps(self.data, indent=2))
settings_file.close()
self.changed = False
finally:
os.umask(old_umask)
def save_and_log_errors(self):
"""Calls self.save(), trapping and logging any errors."""
try:
self.save()
except (IOError, TypeError) as e:
logging.error("Failed to save config: " + str(e))
def get(self, key):
return self.data.get(key)
def __getitem__(self, key):
return self.data[key]
def __setitem__(self, key, value):
self.data[key] = value
self.changed = True
def clear(self):
self.data = {}
self.changed = True
class Authentication:
"""Manage authentication tokens for Chromoting/xmpp"""
def __init__(self):
# Note: Initial values are never used.
self.login = None
self.oauth_refresh_token = None
def copy_from(self, config):
"""Loads the config and returns false if the config is invalid."""
try:
self.login = config["xmpp_login"]
self.oauth_refresh_token = config["oauth_refresh_token"]
except KeyError:
return False
return True
def copy_to(self, config):
config["xmpp_login"] = self.login
config["oauth_refresh_token"] = self.oauth_refresh_token
class Host:
"""This manages the configuration for a host."""
def __init__(self):
# Note: Initial values are never used.
self.host_id = None
self.gcd_device_id = None
self.host_name = None
self.host_secret_hash = None
self.private_key = None
def copy_from(self, config):
try:
self.host_id = config.get("host_id")
self.gcd_device_id = config.get("gcd_device_id")
self.host_name = config["host_name"]
self.host_secret_hash = config.get("host_secret_hash")
self.private_key = config["private_key"]
except KeyError:
return False
return bool(self.host_id or self.gcd_device_id)
def copy_to(self, config):
if self.host_id:
config["host_id"] = self.host_id
if self.gcd_device_id:
config["gcd_device_id"] = self.gcd_device_id
config["host_name"] = self.host_name
config["host_secret_hash"] = self.host_secret_hash
config["private_key"] = self.private_key
class Desktop:
"""Manage a single virtual desktop"""
def __init__(self, sizes):
self.x_proc = None
self.session_proc = None
self.host_proc = None
self.child_env = None
self.sizes = sizes
self.pulseaudio_pipe = None
self.server_supports_exact_resize = False
self.host_ready = False
self.ssh_auth_sockname = None
g_desktops.append(self)
@staticmethod
def get_unused_display_number():
"""Return a candidate display number for which there is currently no
X Server lock file"""
display = FIRST_X_DISPLAY_NUMBER
while os.path.exists(X_LOCK_FILE_TEMPLATE % display):
display += 1
return display
def _init_child_env(self):
# Create clean environment for new session, so it is cleanly separated from
# the user's console X session.
self.child_env = {}
for key in [
"HOME",
"LANG",
"LOGNAME",
"PATH",
"SHELL",
"USER",
"USERNAME",
LOG_FILE_ENV_VAR]:
if os.environ.has_key(key):
self.child_env[key] = os.environ[key]
# Ensure that the software-rendering GL drivers are loaded by the desktop
# session, instead of any hardware GL drivers installed on the system.
self.child_env["LD_LIBRARY_PATH"] = (
"/usr/lib/%(arch)s-linux-gnu/mesa:"
"/usr/lib/%(arch)s-linux-gnu/dri:"
"/usr/lib/%(arch)s-linux-gnu/gallium-pipe" %
{ "arch": platform.machine() })
# Read from /etc/environment if it exists, as it is a standard place to
# store system-wide environment settings. During a normal login, this would
# typically be done by the pam_env PAM module, depending on the local PAM
# configuration.
env_filename = "/etc/environment"
try:
with open(env_filename, "r") as env_file:
for line in env_file:
line = line.rstrip("\n")
# Split at the first "=", leaving any further instances in the value.
key_value_pair = line.split("=", 1)
if len(key_value_pair) == 2:
key, value = tuple(key_value_pair)
# The file stores key=value assignments, but the value may be
# quoted, so strip leading & trailing quotes from it.
value = value.strip("'\"")
self.child_env[key] = value
except IOError:
logging.info("Failed to read %s, skipping." % env_filename)
def _setup_pulseaudio(self):
self.pulseaudio_pipe = None
# pulseaudio uses UNIX sockets for communication. Length of UNIX socket
# name is limited to 108 characters, so audio will not work properly if
# the path is too long. To workaround this problem we use only first 10
# symbols of the host hash.
pulse_path = os.path.join(CONFIG_DIR,
"pulseaudio#%s" % g_host_hash[0:10])
if len(pulse_path) + len("/native") >= 108:
logging.error("Audio will not be enabled because pulseaudio UNIX " +
"socket path is too long.")
return False
sink_name = "chrome_remote_desktop_session"
pipe_name = os.path.join(pulse_path, "fifo_output")
try:
if not os.path.exists(pulse_path):
os.mkdir(pulse_path)
except IOError, e:
logging.error("Failed to create pulseaudio pipe: " + str(e))
return False
try:
pulse_config = open(os.path.join(pulse_path, "daemon.conf"), "w")
pulse_config.write("default-sample-format = s16le\n")
pulse_config.write("default-sample-rate = 48000\n")
pulse_config.write("default-sample-channels = 2\n")
pulse_config.close()
pulse_script = open(os.path.join(pulse_path, "default.pa"), "w")
pulse_script.write("load-module module-native-protocol-unix\n")
pulse_script.write(
("load-module module-pipe-sink sink_name=%s file=\"%s\" " +
"rate=48000 channels=2 format=s16le\n") %
(sink_name, pipe_name))
pulse_script.close()
except IOError, e:
logging.error("Failed to write pulseaudio config: " + str(e))
return False
self.child_env["PULSE_CONFIG_PATH"] = pulse_path
self.child_env["PULSE_RUNTIME_PATH"] = pulse_path
self.child_env["PULSE_STATE_PATH"] = pulse_path
self.child_env["PULSE_SINK"] = sink_name
self.pulseaudio_pipe = pipe_name
return True
def _setup_gnubby(self):
self.ssh_auth_sockname = ("/tmp/chromoting.%s.ssh_auth_sock" %
os.environ["USER"])
def _launch_x_server(self, extra_x_args):
x_auth_file = os.path.expanduser("~/.Xauthority")
self.child_env["XAUTHORITY"] = x_auth_file
devnull = open(os.devnull, "rw")
display = self.get_unused_display_number()
# Run "xauth add" with |child_env| so that it modifies the same XAUTHORITY
# file which will be used for the X session.
ret_code = subprocess.call("xauth add :%d . `mcookie`" % display,
env=self.child_env, shell=True)
if ret_code != 0:
raise Exception("xauth failed with code %d" % ret_code)
max_width = max([width for width, height in self.sizes])
max_height = max([height for width, height in self.sizes])
xvfb = get_randr_supporting_x_server()
if xvfb:
self.server_supports_exact_resize = True
else:
xvfb = "Xvfb"
self.server_supports_exact_resize = False
# Disable the Composite extension iff the X session is the default
# Unity-2D, since it uses Metacity which fails to generate DAMAGE
# notifications correctly. See crbug.com/166468.
x_session = choose_x_session()
if (len(x_session) == 2 and
x_session[1] == "/usr/bin/gnome-session --session=ubuntu-2d"):
extra_x_args.extend(["-extension", "Composite"])
logging.info("Starting %s on display :%d" % (xvfb, display))
screen_option = "%dx%dx24" % (max_width, max_height)
self.x_proc = subprocess.Popen(
[xvfb, ":%d" % display,
"-auth", x_auth_file,
"-nolisten", "tcp",
"-noreset",
"-screen", "0", screen_option
] + extra_x_args)
if not self.x_proc.pid:
raise Exception("Could not start Xvfb.")
self.child_env["DISPLAY"] = ":%d" % display
self.child_env["CHROME_REMOTE_DESKTOP_SESSION"] = "1"
# Use a separate profile for any instances of Chrome that are started in
# the virtual session. Chrome doesn't support sharing a profile between
# multiple DISPLAYs, but Chrome Sync allows for a reasonable compromise.
chrome_profile = os.path.join(CONFIG_DIR, "chrome-profile")
self.child_env["CHROME_USER_DATA_DIR"] = chrome_profile
# Set SSH_AUTH_SOCK to the file name to listen on.
if self.ssh_auth_sockname:
self.child_env["SSH_AUTH_SOCK"] = self.ssh_auth_sockname
# Wait for X to be active.
for _test in range(20):
retcode = subprocess.call("xdpyinfo", env=self.child_env, stdout=devnull)
if retcode == 0:
break
time.sleep(0.5)
if retcode != 0:
raise Exception("Could not connect to Xvfb.")
else:
logging.info("Xvfb is active.")
# The remoting host expects the server to use "evdev" keycodes, but Xvfb
# starts configured to use the "base" ruleset, resulting in XKB configuring
# for "xfree86" keycodes, and screwing up some keys. See crbug.com/119013.
# Reconfigure the X server to use "evdev" keymap rules. The X server must
# be started with -noreset otherwise it'll reset as soon as the command
# completes, since there are no other X clients running yet.
retcode = subprocess.call("setxkbmap -rules evdev", env=self.child_env,
shell=True)
if retcode != 0:
logging.error("Failed to set XKB to 'evdev'")
if not self.server_supports_exact_resize:
return
# Register the screen sizes if the X server's RANDR extension supports it.
# Errors here are non-fatal; the X server will continue to run with the
# dimensions from the "-screen" option.
for width, height in self.sizes:
label = "%dx%d" % (width, height)
args = ["xrandr", "--newmode", label, "0", str(width), "0", "0", "0",
str(height), "0", "0", "0"]
subprocess.call(args, env=self.child_env, stdout=devnull, stderr=devnull)
args = ["xrandr", "--addmode", "screen", label]
subprocess.call(args, env=self.child_env, stdout=devnull, stderr=devnull)
# Set the initial mode to the first size specified, otherwise the X server
# would default to (max_width, max_height), which might not even be in the
# list.
initial_size = self.sizes[0]
label = "%dx%d" % initial_size
args = ["xrandr", "-s", label]
subprocess.call(args, env=self.child_env, stdout=devnull, stderr=devnull)
# Set the physical size of the display so that the initial mode is running
# at approximately 96 DPI, since some desktops require the DPI to be set to
# something realistic.
args = ["xrandr", "--dpi", "96"]
subprocess.call(args, env=self.child_env, stdout=devnull, stderr=devnull)
# Monitor for any automatic resolution changes from the desktop environment.
args = [SCRIPT_PATH, "--watch-resolution", str(initial_size[0]),
str(initial_size[1])]
# It is not necessary to wait() on the process here, as this script's main
# loop will reap the exit-codes of all child processes.
subprocess.Popen(args, env=self.child_env, stdout=devnull, stderr=devnull)
devnull.close()
def _launch_x_session(self):
# Start desktop session.
# The /dev/null input redirection is necessary to prevent the X session
# reading from stdin. If this code runs as a shell background job in a
# terminal, any reading from stdin causes the job to be suspended.
# Daemonization would solve this problem by separating the process from the
# controlling terminal.
xsession_command = choose_x_session()
if xsession_command is None:
raise Exception("Unable to choose suitable X session command.")
logging.info("Launching X session: %s" % xsession_command)
self.session_proc = subprocess.Popen(xsession_command,
stdin=open(os.devnull, "r"),
cwd=HOME_DIR,
env=self.child_env)
if not self.session_proc.pid:
raise Exception("Could not start X session")
def launch_session(self, x_args):
self._init_child_env()
self._setup_pulseaudio()
self._setup_gnubby()
self._launch_x_server(x_args)
self._launch_x_session()
def launch_host(self, host_config):
# Start remoting host
args = [locate_executable(HOST_BINARY_NAME), "--host-config=-"]
if self.pulseaudio_pipe:
args.append("--audio-pipe-name=%s" % self.pulseaudio_pipe)
if self.server_supports_exact_resize:
args.append("--server-supports-exact-resize")
if self.ssh_auth_sockname:
args.append("--ssh-auth-sockname=%s" % self.ssh_auth_sockname)
# Have the host process use SIGUSR1 to signal a successful start.
def sigusr1_handler(signum, frame):
_ = signum, frame
logging.info("Host ready to receive connections.")
self.host_ready = True
if (ParentProcessLogger.instance() and
False not in [desktop.host_ready for desktop in g_desktops]):
ParentProcessLogger.instance().release_parent()
signal.signal(signal.SIGUSR1, sigusr1_handler)
args.append("--signal-parent")
self.host_proc = subprocess.Popen(args, env=self.child_env,
stdin=subprocess.PIPE)
logging.info(args)
if not self.host_proc.pid:
raise Exception("Could not start Chrome Remote Desktop host")
try:
self.host_proc.stdin.write(json.dumps(host_config.data))
self.host_proc.stdin.flush()
except IOError as e:
# This can occur in rare situations, for example, if the machine is
# heavily loaded and the host process dies quickly (maybe if the X
# connection failed), the host process might be gone before this code
# writes to the host's stdin. Catch and log the exception, allowing
# the process to be retried instead of exiting the script completely.
logging.error("Failed writing to host's stdin: " + str(e))
finally:
self.host_proc.stdin.close()
def get_daemon_proc():
"""Checks if there is already an instance of this script running, and returns
a psutil.Process instance for it.
Returns:
A Process instance for the existing daemon process, or None if the daemon
is not running.
"""
uid = os.getuid()
this_pid = os.getpid()
# Support new & old psutil API. This is the right way to check, according to
# http://grodola.blogspot.com/2014/01/psutil-20-porting.html
if psutil.version_info >= (2, 0):
psget = lambda x: x()
else:
psget = lambda x: x
for process in psutil.process_iter():
# Skip any processes that raise an exception, as processes may terminate
# during iteration over the list.
try:
# Skip other users' processes.
if psget(process.uids).real != uid:
continue
# Skip the process for this instance.
if process.pid == this_pid:
continue
# |cmdline| will be [python-interpreter, script-file, other arguments...]
cmdline = psget(process.cmdline)
if len(cmdline) < 2:
continue
if cmdline[0] == sys.executable and cmdline[1] == sys.argv[0]:
return process
except (psutil.NoSuchProcess, psutil.AccessDenied):
continue
return None
def choose_x_session():
"""Chooses the most appropriate X session command for this system.
Returns:
A string containing the command to run, or a list of strings containing
the executable program and its arguments, which is suitable for passing as
the first parameter of subprocess.Popen(). If a suitable session cannot
be found, returns None.
"""
XSESSION_FILES = [
SESSION_FILE_PATH,
SYSTEM_SESSION_FILE_PATH ]
for startup_file in XSESSION_FILES:
startup_file = os.path.expanduser(startup_file)
if os.path.exists(startup_file):
if os.access(startup_file, os.X_OK):
# "/bin/sh -c" is smart about how to execute the session script and
# works in cases where plain exec() fails (for example, if the file is
# marked executable, but is a plain script with no shebang line).
return ["/bin/sh", "-c", pipes.quote(startup_file)]
else:
# If this is a system-wide session script, it should be run using the
# system shell, ignoring any login shell that might be set for the
# current user.
return ["/bin/sh", startup_file]
# Choose a session wrapper script to run the session. On some systems,
# /etc/X11/Xsession fails to load the user's .profile, so look for an
# alternative wrapper that is more likely to match the script that the
# system actually uses for console desktop sessions.
SESSION_WRAPPERS = [
"/usr/sbin/lightdm-session",
"/etc/gdm/Xsession",
"/etc/X11/Xsession" ]
for session_wrapper in SESSION_WRAPPERS:
if os.path.exists(session_wrapper):
if os.path.exists("/usr/bin/unity-2d-panel"):
# On Ubuntu 12.04, the default session relies on 3D-accelerated
# hardware. Trying to run this with a virtual X display produces
# weird results on some systems (for example, upside-down and
# corrupt displays). So if the ubuntu-2d session is available,
# choose it explicitly.
return [session_wrapper, "/usr/bin/gnome-session --session=ubuntu-2d"]
else:
# Use the session wrapper by itself, and let the system choose a
# session.
return [session_wrapper]
return None
def locate_executable(exe_name):
if IS_INSTALLED:
# If the script is running from its installed location, search the host
# binary only in the same directory.
paths_to_try = [ SCRIPT_DIR ]
else:
paths_to_try = map(lambda p: os.path.join(SCRIPT_DIR, p),
[".", "../../../out/Debug", "../../../out/Release" ])
for path in paths_to_try:
exe_path = os.path.join(path, exe_name)
if os.path.exists(exe_path):
return exe_path
raise Exception("Could not locate executable '%s'" % exe_name)
class ParentProcessLogger(object):
"""Redirects logs to the parent process, until the host is ready or quits.
This class creates a pipe to allow logging from the daemon process to be
copied to the parent process. The daemon process adds a log-handler that
directs logging output to the pipe. The parent process reads from this pipe
until and writes the content to stderr. When the pipe is no longer needed
(for example, the host signals successful launch or permanent failure), the
daemon removes the log-handler and closes the pipe, causing the the parent
process to reach end-of-file while reading the pipe and exit.
The (singleton) logger should be instantiated before forking. The parent
process should call wait_for_logs() before exiting. The (grand-)child process
should call start_logging() when it starts, and then use logging.* to issue
log statements, as usual. When the child has either succesfully started the
host or terminated, it must call release_parent() to allow the parent to exit.
"""
__instance = None
def __init__(self):
"""Constructor. Must be called before forking."""
read_pipe, write_pipe = os.pipe()
# Ensure write_pipe is closed on exec, otherwise it will be kept open by
# child processes (X, host), preventing the read pipe from EOF'ing.
old_flags = fcntl.fcntl(write_pipe, fcntl.F_GETFD)
fcntl.fcntl(write_pipe, fcntl.F_SETFD, old_flags | fcntl.FD_CLOEXEC)
self._read_file = os.fdopen(read_pipe, 'r')
self._write_file = os.fdopen(write_pipe, 'a')
self._logging_handler = None
ParentProcessLogger.__instance = self
def start_logging(self):
"""Installs a logging handler that sends log entries to a pipe.
Must be called by the child process.
"""
self._read_file.close()
self._logging_handler = logging.StreamHandler(self._write_file)
logging.getLogger().addHandler(self._logging_handler)
def release_parent(self):
"""Uninstalls logging handler and closes the pipe, releasing the parent.
Must be called by the child process.
"""
if self._logging_handler:
logging.getLogger().removeHandler(self._logging_handler)
self._logging_handler = None
if not self._write_file.closed:
self._write_file.close()
def wait_for_logs(self):
"""Waits and prints log lines from the daemon until the pipe is closed.
Must be called by the parent process.
"""
# If Ctrl-C is pressed, inform the user that the daemon is still running.
# This signal will cause the read loop below to stop with an EINTR IOError.
def sigint_handler(signum, frame):
_ = signum, frame
print >> sys.stderr, ("Interrupted. The daemon is still running in the "
"background.")
signal.signal(signal.SIGINT, sigint_handler)
# Install a fallback timeout to release the parent process, in case the
# daemon never responds (e.g. host crash-looping, daemon killed).
# This signal will cause the read loop below to stop with an EINTR IOError.
def sigalrm_handler(signum, frame):
_ = signum, frame
print >> sys.stderr, ("No response from daemon. It may have crashed, or "
"may still be running in the background.")
signal.signal(signal.SIGALRM, sigalrm_handler)
signal.alarm(30)
self._write_file.close()
# Print lines as they're logged to the pipe until EOF is reached or readline
# is interrupted by one of the signal handlers above.
try:
for line in iter(self._read_file.readline, ''):
sys.stderr.write(line)
except IOError as e:
if e.errno != errno.EINTR:
raise
print >> sys.stderr, "Log file: %s" % os.environ[LOG_FILE_ENV_VAR]
@staticmethod
def instance():
"""Returns the singleton instance, if it exists."""
return ParentProcessLogger.__instance
def daemonize():
"""Background this process and detach from controlling terminal, redirecting
stdout/stderr to a log file."""
# TODO(lambroslambrou): Having stdout/stderr redirected to a log file is not
# ideal - it could create a filesystem DoS if the daemon or a child process
# were to write excessive amounts to stdout/stderr. Ideally, stdout/stderr
# should be redirected to a pipe or socket, and a process at the other end
# should consume the data and write it to a logging facility which can do
# data-capping or log-rotation. The 'logger' command-line utility could be
# used for this, but it might cause too much syslog spam.
# Create new (temporary) file-descriptors before forking, so any errors get
# reported to the main process and set the correct exit-code.
# The mode is provided, since Python otherwise sets a default mode of 0777,
# which would result in the new file having permissions of 0777 & ~umask,
# possibly leaving the executable bits set.
if not os.environ.has_key(LOG_FILE_ENV_VAR):
log_file_prefix = "chrome_remote_desktop_%s_" % time.strftime(
'%Y%m%d_%H%M%S', time.localtime(time.time()))
log_file = tempfile.NamedTemporaryFile(prefix=log_file_prefix, delete=False)
os.environ[LOG_FILE_ENV_VAR] = log_file.name
log_fd = log_file.file.fileno()
else:
log_fd = os.open(os.environ[LOG_FILE_ENV_VAR],
os.O_WRONLY | os.O_CREAT | os.O_APPEND, 0600)
devnull_fd = os.open(os.devnull, os.O_RDONLY)
parent_logger = ParentProcessLogger()
pid = os.fork()
if pid == 0:
# Child process
os.setsid()
# The second fork ensures that the daemon isn't a session leader, so that
# it doesn't acquire a controlling terminal.
pid = os.fork()
if pid == 0:
# Grandchild process
pass
else:
# Child process
os._exit(0) # pylint: disable=W0212
else:
# Parent process
parent_logger.wait_for_logs()
os._exit(0) # pylint: disable=W0212
logging.info("Daemon process started in the background, logging to '%s'" %
os.environ[LOG_FILE_ENV_VAR])
os.chdir(HOME_DIR)
parent_logger.start_logging()
# Copy the file-descriptors to create new stdin, stdout and stderr. Note
# that dup2(oldfd, newfd) closes newfd first, so this will close the current
# stdin, stdout and stderr, detaching from the terminal.
os.dup2(devnull_fd, sys.stdin.fileno())
os.dup2(log_fd, sys.stdout.fileno())
os.dup2(log_fd, sys.stderr.fileno())
# Close the temporary file-descriptors.
os.close(devnull_fd)
os.close(log_fd)
def cleanup():
logging.info("Cleanup.")
global g_desktops
for desktop in g_desktops:
for proc, name in [(desktop.x_proc, "Xvfb"),
(desktop.session_proc, "session"),
(desktop.host_proc, "host")]:
if proc is not None:
logging.info("Terminating " + name)
try:
psutil_proc = psutil.Process(proc.pid)
psutil_proc.terminate()
# Use a short timeout, to avoid delaying service shutdown if the
# process refuses to die for some reason.
psutil_proc.wait(timeout=10)
except psutil.TimeoutExpired:
logging.error("Timed out - sending SIGKILL")
psutil_proc.kill()
except psutil.Error:
logging.error("Error terminating process")
g_desktops = []
if ParentProcessLogger.instance():
ParentProcessLogger.instance().release_parent()
class SignalHandler:
"""Reload the config file on SIGHUP. Since we pass the configuration to the
host processes via stdin, they can't reload it, so terminate them. They will
be relaunched automatically with the new config."""
def __init__(self, host_config):
self.host_config = host_config
def __call__(self, signum, _stackframe):
if signum == signal.SIGHUP:
logging.info("SIGHUP caught, restarting host.")
try:
self.host_config.load()
except (IOError, ValueError) as e:
logging.error("Failed to load config: " + str(e))
for desktop in g_desktops:
if desktop.host_proc:
desktop.host_proc.send_signal(signal.SIGTERM)
else:
# Exit cleanly so the atexit handler, cleanup(), gets called.
raise SystemExit
class RelaunchInhibitor:
"""Helper class for inhibiting launch of a child process before a timeout has
elapsed.
A managed process can be in one of these states:
running, not inhibited (running == True)
stopped and inhibited (running == False and is_inhibited() == True)
stopped but not inhibited (running == False and is_inhibited() == False)
Attributes:
label: Name of the tracked process. Only used for logging.
running: Whether the process is currently running.
earliest_relaunch_time: Time before which the process should not be
relaunched, or 0 if there is no limit.
failures: The number of times that the process ran for less than a
specified timeout, and had to be inhibited. This count is reset to 0
whenever the process has run for longer than the timeout.
"""
def __init__(self, label):
self.label = label
self.running = False
self.earliest_relaunch_time = 0
self.earliest_successful_termination = 0
self.failures = 0
def is_inhibited(self):
return (not self.running) and (time.time() < self.earliest_relaunch_time)
def record_started(self, minimum_lifetime, relaunch_delay):
"""Record that the process was launched, and set the inhibit time to
|timeout| seconds in the future."""
self.earliest_relaunch_time = time.time() + relaunch_delay
self.earliest_successful_termination = time.time() + minimum_lifetime
self.running = True
def record_stopped(self):
"""Record that the process was stopped, and adjust the failure count
depending on whether the process ran long enough."""
self.running = False
if time.time() < self.earliest_successful_termination:
self.failures += 1
else:
self.failures = 0
logging.info("Failure count for '%s' is now %d", self.label, self.failures)
def relaunch_self():
cleanup()
os.execvp(SCRIPT_PATH, sys.argv)
def waitpid_with_timeout(pid, deadline):
"""Wrapper around os.waitpid() which waits until either a child process dies
or the deadline elapses.
Args:
pid: Process ID to wait for, or -1 to wait for any child process.
deadline: Waiting stops when time.time() exceeds this value.
Returns:
(pid, status): Same as for os.waitpid(), except that |pid| is 0 if no child
changed state within the timeout.
Raises:
Same as for os.waitpid().
"""
while time.time() < deadline:
pid, status = os.waitpid(pid, os.WNOHANG)
if pid != 0:
return (pid, status)
time.sleep(1)
return (0, 0)
def waitpid_handle_exceptions(pid, deadline):
"""Wrapper around os.waitpid()/waitpid_with_timeout(), which waits until
either a child process exits or the deadline elapses, and retries if certain
exceptions occur.
Args:
pid: Process ID to wait for, or -1 to wait for any child process.
deadline: If non-zero, waiting stops when time.time() exceeds this value.
If zero, waiting stops when a child process exits.
Returns:
(pid, status): Same as for waitpid_with_timeout(). |pid| is non-zero if and
only if a child exited during the wait.
Raises:
Same as for os.waitpid(), except:
OSError with errno==EINTR causes the wait to be retried (this can happen,
for example, if this parent process receives SIGHUP).
OSError with errno==ECHILD means there are no child processes, and so
this function sleeps until |deadline|. If |deadline| is zero, this is an
error and the OSError exception is raised in this case.
"""
while True:
try:
if deadline == 0:
pid_result, status = os.waitpid(pid, 0)
else:
pid_result, status = waitpid_with_timeout(pid, deadline)
return (pid_result, status)
except OSError, e:
if e.errno == errno.EINTR:
continue
elif e.errno == errno.ECHILD:
now = time.time()
if deadline == 0:
# No time-limit and no child processes. This is treated as an error
# (see docstring).
raise
elif deadline > now:
time.sleep(deadline - now)
return (0, 0)
else:
# Anything else is an unexpected error.
raise
def watch_for_resolution_changes(initial_size):
"""Watches for any resolution-changes which set the maximum screen resolution,
and resets the initial size if this happens.
The Ubuntu desktop has a component (the 'xrandr' plugin of
unity-settings-daemon) which often changes the screen resolution to the
first listed mode. This is the built-in mode for the maximum screen size,
which can trigger excessive CPU usage in some situations. So this is a hack
which waits for any such events, and undoes the change if it occurs.
Sometimes, the user might legitimately want to use the maximum available
resolution, so this monitoring is limited to a short time-period.
"""
for _ in range(30):
time.sleep(1)
xrandr_output = subprocess.Popen(["xrandr"],
stdout=subprocess.PIPE).communicate()[0]
matches = re.search(r'current (\d+) x (\d+), maximum (\d+) x (\d+)',
xrandr_output)
# No need to handle ValueError. If xrandr fails to give valid output,
# there's no point in continuing to monitor.
current_size = (int(matches.group(1)), int(matches.group(2)))
maximum_size = (int(matches.group(3)), int(matches.group(4)))
if current_size != initial_size:
# Resolution change detected.
if current_size == maximum_size:
# This was probably an automated change from unity-settings-daemon, so
# undo it.
label = "%dx%d" % initial_size
args = ["xrandr", "-s", label]
subprocess.call(args)
args = ["xrandr", "--dpi", "96"]
subprocess.call(args)
# Stop monitoring after any change was detected.
break
def main():
EPILOG = """This script is not intended for use by end-users. To configure
Chrome Remote Desktop, please install the app from the Chrome
Web Store: https://chrome.google.com/remotedesktop"""
parser = optparse.OptionParser(
usage="Usage: %prog [options] [ -- [ X server options ] ]",
epilog=EPILOG)
parser.add_option("-s", "--size", dest="size", action="append",
help="Dimensions of virtual desktop. This can be specified "
"multiple times to make multiple screen resolutions "
"available (if the Xvfb server supports this).")
parser.add_option("-f", "--foreground", dest="foreground", default=False,
action="store_true",
help="Don't run as a background daemon.")
parser.add_option("", "--start", dest="start", default=False,
action="store_true",
help="Start the host.")
parser.add_option("-k", "--stop", dest="stop", default=False,
action="store_true",
help="Stop the daemon currently running.")
parser.add_option("", "--get-status", dest="get_status", default=False,
action="store_true",
help="Prints host status")
parser.add_option("", "--check-running", dest="check_running", default=False,
action="store_true",
help="Return 0 if the daemon is running, or 1 otherwise.")
parser.add_option("", "--config", dest="config", action="store",
help="Use the specified configuration file.")
parser.add_option("", "--reload", dest="reload", default=False,
action="store_true",
help="Signal currently running host to reload the config.")
parser.add_option("", "--add-user", dest="add_user", default=False,
action="store_true",
help="Add current user to the chrome-remote-desktop group.")
parser.add_option("", "--add-user-as-root", dest="add_user_as_root",
action="store", metavar="USER",
help="Adds the specified user to the chrome-remote-desktop "
"group (must be run as root).")
parser.add_option("", "--host-version", dest="host_version", default=False,
action="store_true",
help="Prints version of the host.")
parser.add_option("", "--watch-resolution", dest="watch_resolution",
type="int", nargs=2, default=False, action="store",
help=optparse.SUPPRESS_HELP)
(options, args) = parser.parse_args()
# Determine the filename of the host configuration and PID files.
if not options.config:
options.config = os.path.join(CONFIG_DIR, "host#%s.json" % g_host_hash)
# Check for a modal command-line option (start, stop, etc.)
if options.get_status:
proc = get_daemon_proc()
if proc is not None:
print "STARTED"
elif is_supported_platform():
print "STOPPED"
else:
print "NOT_IMPLEMENTED"
return 0
# TODO(sergeyu): Remove --check-running once NPAPI plugin and NM host are
# updated to always use get-status flag instead.
if options.check_running:
proc = get_daemon_proc()
return 1 if proc is None else 0
if options.stop:
proc = get_daemon_proc()
if proc is None:
print "The daemon is not currently running"
else:
print "Killing process %s" % proc.pid
proc.terminate()
try:
proc.wait(timeout=30)
except psutil.TimeoutExpired:
print "Timed out trying to kill daemon process"
return 1
return 0
if options.reload:
proc = get_daemon_proc()
if proc is None:
return 1
proc.send_signal(signal.SIGHUP)
return 0
if options.add_user:
user = getpass.getuser()
try:
if user in grp.getgrnam(CHROME_REMOTING_GROUP_NAME).gr_mem:
logging.info("User '%s' is already a member of '%s'." %
(user, CHROME_REMOTING_GROUP_NAME))
return 0
except KeyError:
logging.info("Group '%s' not found." % CHROME_REMOTING_GROUP_NAME)
command = [SCRIPT_PATH, '--add-user-as-root', user]
if os.getenv("DISPLAY"):
# TODO(rickyz): Add a Polkit policy that includes a more friendly message
# about what this command does.
command = ["/usr/bin/pkexec"] + command
else:
command = ["/usr/bin/sudo", "-k", "--"] + command
# Run with an empty environment out of paranoia, though if an attacker
# controls the environment this script is run under, we're already screwed
# anyway.
os.execve(command[0], command, {})
return 1
if options.add_user_as_root is not None:
if os.getuid() != 0:
logging.error("--add-user-as-root can only be specified as root.")
return 1;
user = options.add_user_as_root
try:
pwd.getpwnam(user)
except KeyError:
logging.error("user '%s' does not exist." % user)
return 1
try:
subprocess.check_call(["/usr/sbin/groupadd", "-f",
CHROME_REMOTING_GROUP_NAME])
subprocess.check_call(["/usr/bin/gpasswd", "--add", user,
CHROME_REMOTING_GROUP_NAME])
except (ValueError, OSError, subprocess.CalledProcessError) as e:
logging.error("Command failed: " + str(e))
return 1
return 0
if options.host_version:
# TODO(sergeyu): Also check RPM package version once we add RPM package.
return os.system(locate_executable(HOST_BINARY_NAME) + " --version") >> 8
if options.watch_resolution:
watch_for_resolution_changes(options.watch_resolution)
return 0
if not options.start:
# If no modal command-line options specified, print an error and exit.
print >> sys.stderr, EPILOG
return 1
# If a RANDR-supporting Xvfb is not available, limit the default size to
# something more sensible.
if get_randr_supporting_x_server():
default_sizes = DEFAULT_SIZES
else:
default_sizes = DEFAULT_SIZE_NO_RANDR
# Collate the list of sizes that XRANDR should support.
if not options.size:
if os.environ.has_key(DEFAULT_SIZES_ENV_VAR):
default_sizes = os.environ[DEFAULT_SIZES_ENV_VAR]
options.size = default_sizes.split(",")
sizes = []
for size in options.size:
size_components = size.split("x")
if len(size_components) != 2:
parser.error("Incorrect size format '%s', should be WIDTHxHEIGHT" % size)
try:
width = int(size_components[0])
height = int(size_components[1])
# Enforce minimum desktop size, as a sanity-check. The limit of 100 will
# detect typos of 2 instead of 3 digits.
if width < 100 or height < 100:
raise ValueError
except ValueError:
parser.error("Width and height should be 100 pixels or greater")
sizes.append((width, height))
# Register an exit handler to clean up session process and the PID file.
atexit.register(cleanup)
# Load the initial host configuration.
host_config = Config(options.config)
try:
host_config.load()
except (IOError, ValueError) as e:
print >> sys.stderr, "Failed to load config: " + str(e)
return 1
# Register handler to re-load the configuration in response to signals.
for s in [signal.SIGHUP, signal.SIGINT, signal.SIGTERM]:
signal.signal(s, SignalHandler(host_config))
# Verify that the initial host configuration has the necessary fields.
auth = Authentication()
auth_config_valid = auth.copy_from(host_config)
host = Host()
host_config_valid = host.copy_from(host_config)
if not host_config_valid or not auth_config_valid:
logging.error("Failed to load host configuration.")
return 1
# Determine whether a desktop is already active for the specified host
# host configuration.
proc = get_daemon_proc()
if proc is not None:
# Debian policy requires that services should "start" cleanly and return 0
# if they are already running.
print "Service already running."
return 0
# Detach a separate "daemon" process to run the session, unless specifically
# requested to run in the foreground.
if not options.foreground:
daemonize()
if host.host_id:
logging.info("Using host_id: " + host.host_id)
if host.gcd_device_id:
logging.info("Using gcd_device_id: " + host.gcd_device_id)
desktop = Desktop(sizes)
# Keep track of the number of consecutive failures of any child process to
# run for longer than a set period of time. The script will exit after a
# threshold is exceeded.
# There is no point in tracking the X session process separately, since it is
# launched at (roughly) the same time as the X server, and the termination of
# one of these triggers the termination of the other.
x_server_inhibitor = RelaunchInhibitor("X server")
host_inhibitor = RelaunchInhibitor("host")
all_inhibitors = [x_server_inhibitor, host_inhibitor]
# Don't allow relaunching the script on the first loop iteration.
allow_relaunch_self = False
while True:
# Set the backoff interval and exit if a process failed too many times.
backoff_time = SHORT_BACKOFF_TIME
for inhibitor in all_inhibitors:
if inhibitor.failures >= MAX_LAUNCH_FAILURES:
logging.error("Too many launch failures of '%s', exiting."
% inhibitor.label)
return 1
elif inhibitor.failures >= SHORT_BACKOFF_THRESHOLD:
backoff_time = LONG_BACKOFF_TIME
relaunch_times = []
# If the session process or X server stops running (e.g. because the user
# logged out), kill the other. This will trigger the next conditional block
# as soon as os.waitpid() reaps its exit-code.
if desktop.session_proc is None and desktop.x_proc is not None:
logging.info("Terminating X server")
desktop.x_proc.terminate()
elif desktop.x_proc is None and desktop.session_proc is not None:
logging.info("Terminating X session")
desktop.session_proc.terminate()
elif desktop.x_proc is None and desktop.session_proc is None:
# Both processes have terminated.
if (allow_relaunch_self and x_server_inhibitor.failures == 0 and
host_inhibitor.failures == 0):
# Since the user's desktop is already gone at this point, there's no
# state to lose and now is a good time to pick up any updates to this
# script that might have been installed.
logging.info("Relaunching self")
relaunch_self()
else:
# If there is a non-zero |failures| count, restarting the whole script
# would lose this information, so just launch the session as normal.
if x_server_inhibitor.is_inhibited():
logging.info("Waiting before launching X server")
relaunch_times.append(x_server_inhibitor.earliest_relaunch_time)
else:
logging.info("Launching X server and X session.")
desktop.launch_session(args)
x_server_inhibitor.record_started(MINIMUM_PROCESS_LIFETIME,
backoff_time)
allow_relaunch_self = True
if desktop.host_proc is None:
if host_inhibitor.is_inhibited():
logging.info("Waiting before launching host process")
relaunch_times.append(host_inhibitor.earliest_relaunch_time)
else:
logging.info("Launching host process")
desktop.launch_host(host_config)
host_inhibitor.record_started(MINIMUM_PROCESS_LIFETIME,
backoff_time)
deadline = min(relaunch_times) if relaunch_times else 0
pid, status = waitpid_handle_exceptions(-1, deadline)
if pid == 0:
continue
logging.info("wait() returned (%s,%s)" % (pid, status))
# When a process has terminated, and we've reaped its exit-code, any Popen
# instance for that process is no longer valid. Reset any affected instance
# to None.
if desktop.x_proc is not None and pid == desktop.x_proc.pid:
logging.info("X server process terminated")
desktop.x_proc = None
x_server_inhibitor.record_stopped()
if desktop.session_proc is not None and pid == desktop.session_proc.pid:
logging.info("Session process terminated")
desktop.session_proc = None
if desktop.host_proc is not None and pid == desktop.host_proc.pid:
logging.info("Host process terminated")
desktop.host_proc = None
desktop.host_ready = False
host_inhibitor.record_stopped()
# These exit-codes must match the ones used by the host.
# See remoting/host/host_error_codes.h.
# Delete the host or auth configuration depending on the returned error
# code, so the next time this script is run, a new configuration
# will be created and registered.
if os.WIFEXITED(status):
if os.WEXITSTATUS(status) == 100:
logging.info("Host configuration is invalid - exiting.")
return 0
elif os.WEXITSTATUS(status) == 101:
logging.info("Host ID has been deleted - exiting.")
host_config.clear()
host_config.save_and_log_errors()
return 0
elif os.WEXITSTATUS(status) == 102:
logging.info("OAuth credentials are invalid - exiting.")
return 0
elif os.WEXITSTATUS(status) == 103:
logging.info("Host domain is blocked by policy - exiting.")
return 0
# Nothing to do for Mac-only status 104 (login screen unsupported)
elif os.WEXITSTATUS(status) == 105:
logging.info("Username is blocked by policy - exiting.")
return 0
else:
logging.info("Host exited with status %s." % os.WEXITSTATUS(status))
elif os.WIFSIGNALED(status):
logging.info("Host terminated by signal %s." % os.WTERMSIG(status))
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG,
format="%(asctime)s:%(levelname)s:%(message)s")
sys.exit(main())
| bsd-3-clause | -6,678,318,860,777,292,000 | 301,674,055,216,744,260 | 35.996356 | 80 | 0.657676 | false |
IAAAIAAIA/CS231n | thomas/assignment1/q3_sgd.py | 2 | 3751 | #!/usr/bin/env python
# Save parameters every a few SGD iterations as fail-safe
SAVE_PARAMS_EVERY = 5000
import glob
import random
import numpy as np
import os.path as op
import pickle as pickle
def load_saved_params():
"""
A helper function that loads previously saved parameters and resets
iteration start.
"""
st = 0
for f in glob.glob("saved_params_*.npy"):
iter = int(op.splitext(op.basename(f))[0].split("_")[2])
if (iter > st):
st = iter
if st > 0:
with open("saved_params_%d.npy" % st, "r") as f:
params = pickle.load(f)
state = pickle.load(f)
return st, params, state
else:
return st, None, None
def save_params(iter, params):
with open("saved_params_%d.npy" % iter, "w") as f:
pickle.dump(params, f)
pickle.dump(random.getstate(), f)
def sgd(f, x0, step, iterations, postprocessing=None, useSaved=False,
PRINT_EVERY=10):
""" Stochastic Gradient Descent
Implement the stochastic gradient descent method in this function.
Arguments:
f -- the function to optimize, it should take a single
argument and yield two outputs, a cost and the gradient
with respect to the arguments
x0 -- the initial point to start SGD from
step -- the step size for SGD
iterations -- total iterations to run SGD for
postprocessing -- postprocessing function for the parameters
if necessary. In the case of word2vec we will need to
normalize the word vectors to have unit length.
PRINT_EVERY -- specifies how many iterations to output loss
Return:
x -- the parameter value after SGD finishes
"""
# Anneal learning rate every several iterations
ANNEAL_EVERY = 20000
if useSaved:
start_iter, oldx, state = load_saved_params()
if start_iter > 0:
x0 = oldx
step *= 0.5 ** (start_iter / ANNEAL_EVERY)
if state:
random.setstate(state)
else:
start_iter = 0
x = x0
if not postprocessing:
postprocessing = lambda x: x
expcost = None
for iter in range(start_iter + 1, iterations + 1):
# Don't forget to apply the postprocessing after every iteration!
# You might want to print the progress every few iterations.
cost = None
### YOUR CODE HERE
raise NotImplementedError
### END YOUR CODE
if iter % PRINT_EVERY == 0:
if not expcost:
expcost = cost
else:
expcost = .95 * expcost + .05 * cost
print("iter %d: %f" % (iter, expcost))
if iter % SAVE_PARAMS_EVERY == 0 and useSaved:
save_params(iter, x)
if iter % ANNEAL_EVERY == 0:
step *= 0.5
return x
def sanity_check():
quad = lambda x: (np.sum(x ** 2), x * 2)
print("Running sanity checks...")
t1 = sgd(quad, 0.5, 0.01, 1000, PRINT_EVERY=100)
print("test 1 result:", t1)
assert abs(t1) <= 1e-6
t2 = sgd(quad, 0.0, 0.01, 1000, PRINT_EVERY=100)
print("test 2 result:", t2)
assert abs(t2) <= 1e-6
t3 = sgd(quad, -1.5, 0.01, 1000, PRINT_EVERY=100)
print("test 3 result:", t3)
assert abs(t3) <= 1e-6
print("")
def your_sanity_checks():
"""
Use this space add any additional sanity checks by running:
python q3_sgd.py
This function will not be called by the autograder, nor will
your additional tests be graded.
"""
print("Running your sanity checks...")
### YOUR CODE HERE
raise NotImplementedError
### END YOUR CODE
if __name__ == "__main__":
sanity_check()
your_sanity_checks()
| mit | -169,772,691,434,996,380 | -3,068,206,373,933,899,300 | 25.602837 | 75 | 0.591576 | false |
lz1988/django-web2015 | django/contrib/grappelli/tests/test_related.py | 3 | 12627 | # coding: utf-8
# PYTHON IMPORTS
import datetime
# DJANGO IMPORTS
from django.test import TestCase
from django.test.utils import override_settings
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.utils import six, translation, timezone
try:
import json
except ImportError:
from django.utils import simplejson as json
# GRAPPELLI IMPORTS
from grappelli.tests.models import Category, Entry
@override_settings(GRAPPELLI_AUTOCOMPLETE_LIMIT=10)
@override_settings(GRAPPELLI_AUTOCOMPLETE_SEARCH_FIELDS={})
class RelatedTests(TestCase):
urls = "grappelli.tests.urls"
def setUp(self):
"""
Create users, categories and entries
"""
self.superuser_1 = User.objects.create_superuser('Superuser001', 'superuser001@example.com', 'superuser001')
self.editor_1 = User.objects.create_user('Editor001', 'editor001@example.com', 'editor001')
self.editor_1.is_staff = True
self.editor_1.save()
self.user_1 = User.objects.create_user('User001', 'user001@example.com', 'user001')
self.user_1.is_staff = False
self.user_1.save()
# add categories
for i in range(100):
Category.objects.create(name="Category No %s" % (i))
# add entries
self.entry_superuser = Entry.objects.create(title="Entry Superuser",
date=timezone.now(),
user=self.superuser_1)
self.entry_editor = Entry.objects.create(title="Entry Editor",
date=timezone.now(),
user=self.editor_1)
# set to en to check error messages
translation.activate("en")
def test_setup(self):
"""
Test setup
"""
self.assertEqual(User.objects.all().count(), 3)
self.assertEqual(Category.objects.all().count(), 100)
self.assertEqual(Entry.objects.all().count(), 2)
def test_related_lookup(self):
"""
Test related lookup
"""
self.client.login(username="User001", password="user001")
response = self.client.get(reverse("grp_related_lookup"))
self.assertEqual(response.status_code, 403)
self.client.login(username="Superuser001", password="superuser001")
response = self.client.get(reverse("grp_related_lookup"))
self.assertEqual(response.status_code, 200)
self.assertJSONEqual(response.content.decode('utf-8'), [{"value": None, "label": ""}])
# ok
response = self.client.get("%s?object_id=1&app_label=%s&model_name=%s" % (reverse("grp_related_lookup"), "grappelli", "category"))
self.assertEqual(response.status_code, 200)
self.assertJSONEqual(response.content.decode('utf-8'), [{"value": "1", "label": "Category No 0 (1)"}])
# wrong object_id
response = self.client.get("%s?object_id=10000&app_label=%s&model_name=%s" % (reverse("grp_related_lookup"), "grappelli", "category"))
self.assertEqual(response.status_code, 200)
self.assertJSONEqual(response.content.decode('utf-8'), [{"value": "10000", "label": "?"}])
# filtered queryset (single filter) fails
response = self.client.get("%s?object_id=1&app_label=%s&model_name=%s&query_string=id__gte=99" % (reverse("grp_related_lookup"), "grappelli", "category"))
self.assertEqual(response.status_code, 200)
self.assertJSONEqual(response.content.decode('utf-8'), [{"value": "1", "label": "?"}])
# filtered queryset (single filter) works
response = self.client.get("%s?object_id=100&app_label=%s&model_name=%s&query_string=id__gte=99" % (reverse("grp_related_lookup"), "grappelli", "category"))
self.assertEqual(response.status_code, 200)
self.assertJSONEqual(response.content.decode('utf-8'), [{"value": "100", "label": "Category No 99 (100)"}])
# filtered queryset (multiple filters) fails
response = self.client.get("%s?object_id=1&app_label=%s&model_name=%s&query_string=name__icontains=99:id__gte=99" % (reverse("grp_related_lookup"), "grappelli", "category"))
self.assertEqual(response.status_code, 200)
self.assertJSONEqual(response.content.decode('utf-8'), [{"value": "1", "label": "?"}])
# filtered queryset (multiple filters) works
response = self.client.get("%s?object_id=100&app_label=%s&model_name=%s&query_string=name__icontains=99:id__gte=99" % (reverse("grp_related_lookup"), "grappelli", "category"))
self.assertEqual(response.status_code, 200)
self.assertJSONEqual(response.content.decode('utf-8'), [{"value": "100", "label": "Category No 99 (100)"}])
# custom queryset (Superuser)
response = self.client.get("%s?object_id=1&app_label=%s&model_name=%s" % (reverse("grp_related_lookup"), "grappelli", "entry"))
self.assertEqual(response.status_code, 200)
self.assertJSONEqual(response.content.decode('utf-8'), [{"value": "1", "label": "Entry Superuser"}])
response = self.client.get("%s?object_id=2&app_label=%s&model_name=%s" % (reverse("grp_related_lookup"), "grappelli", "entry"))
self.assertEqual(response.status_code, 200)
self.assertJSONEqual(response.content.decode('utf-8'), [{"value": "2", "label": "Entry Editor"}])
# custom queryset (Editor)
# FIXME: this should fail, because the custom admin queryset
# limits the entry to the logged in user (but we currently do not make use
# of custom admin querysets)
self.client.login(username="Editor001", password="editor001")
response = self.client.get("%s?object_id=1&app_label=%s&model_name=%s" % (reverse("grp_related_lookup"), "grappelli", "entry"))
self.assertEqual(response.status_code, 200)
self.assertJSONEqual(response.content.decode('utf-8'), [{"value": "1", "label": "Entry Superuser"}])
# wrong app_label/model_name
response = self.client.get("%s?object_id=1&app_label=false&model_name=false" % (reverse("grp_related_lookup")))
self.assertEqual(response.status_code, 200)
self.assertJSONEqual(response.content.decode('utf-8'), [{"value": None, "label": ""}])
response = self.client.get("%s?object_id=&app_label=false&model_name=false" % (reverse("grp_related_lookup")))
self.assertEqual(response.status_code, 200)
self.assertJSONEqual(response.content.decode('utf-8'), [{"value": None, "label": ""}])
def test_m2m_lookup(self):
"""
Test M2M lookup
"""
self.client.login(username="User001", password="user001")
response = self.client.get(reverse("grp_related_lookup"))
self.assertEqual(response.status_code, 403)
self.client.login(username="Superuser001", password="superuser001")
response = self.client.get(reverse("grp_related_lookup"))
self.assertEqual(response.status_code, 200)
self.assertJSONEqual(response.content.decode('utf-8'), [{"value": None, "label": ""}])
# ok (single)
response = self.client.get("%s?object_id=1&app_label=%s&model_name=%s" % (reverse("grp_m2m_lookup"), "grappelli", "category"))
self.assertEqual(response.status_code, 200)
self.assertJSONEqual(response.content.decode('utf-8'), [{"value": "1", "label": "Category No 0 (1)"}])
# wrong object_id (single)
response = self.client.get("%s?object_id=10000&app_label=%s&model_name=%s" % (reverse("grp_m2m_lookup"), "grappelli", "category"))
self.assertEqual(response.status_code, 200)
self.assertJSONEqual(response.content.decode('utf-8'), [{"value": "10000", "label": "?"}])
# ok (multiple)
response = self.client.get("%s?object_id=1,2,3&app_label=%s&model_name=%s" % (reverse("grp_m2m_lookup"), "grappelli", "category"))
self.assertEqual(response.status_code, 200)
self.assertJSONEqual(response.content.decode('utf-8'), [{"value": "1", "label": "Category No 0 (1)"}, {"value": "2", "label": "Category No 1 (2)"}, {"value": "3", "label": "Category No 2 (3)"}])
# wrong object_id (multiple)
response = self.client.get("%s?object_id=1,10000,3&app_label=%s&model_name=%s" % (reverse("grp_m2m_lookup"), "grappelli", "category"))
self.assertEqual(response.status_code, 200)
self.assertJSONEqual(response.content.decode('utf-8'), [{"value": "1", "label": "Category No 0 (1)"}, {"value": "10000", "label": "?"}, {"value": "3", "label": "Category No 2 (3)"}])
# filtered queryset (single filter) fails
response = self.client.get("%s?object_id=1,2,3&app_label=%s&model_name=%s&query_string=id__gte=99" % (reverse("grp_m2m_lookup"), "grappelli", "category"))
self.assertEqual(response.status_code, 200)
self.assertJSONEqual(response.content.decode('utf-8'), [{"value": "1", "label": "?"}, {"value": "2", "label": "?"}, {"value": "3", "label": "?"}])
# filtered queryset (single filter) works
response = self.client.get("%s?object_id=1,2,3&app_label=%s&model_name=%s&query_string=id__lte=3" % (reverse("grp_m2m_lookup"), "grappelli", "category"))
self.assertEqual(response.status_code, 200)
self.assertJSONEqual(response.content.decode('utf-8'), [{"value": "1", "label": "Category No 0 (1)"}, {"value": "2", "label": "Category No 1 (2)"}, {"value": "3", "label": "Category No 2 (3)"}])
# filtered queryset (multiple filters) fails
response = self.client.get("%s?object_id=1,2,3&app_label=%s&model_name=%s&query_string=name__icontains=99:id__gte=99" % (reverse("grp_m2m_lookup"), "grappelli", "category"))
self.assertEqual(response.status_code, 200)
self.assertJSONEqual(response.content.decode('utf-8'), [{"value": "1", "label": "?"}, {"value": "2", "label": "?"}, {"value": "3", "label": "?"}])
# filtered queryset (multiple filters) works
response = self.client.get("%s?object_id=1,2,3&app_label=%s&model_name=%s&query_string=name__icontains=Category:id__lte=3" % (reverse("grp_m2m_lookup"), "grappelli", "category"))
self.assertEqual(response.status_code, 200)
self.assertJSONEqual(response.content.decode('utf-8'), [{"value": "1", "label": "Category No 0 (1)"}, {"value": "2", "label": "Category No 1 (2)"}, {"value": "3", "label": "Category No 2 (3)"}])
def test_autocomplete_lookup(self):
"""
Test autocomplete lookup
"""
self.client.login(username="User001", password="user001")
response = self.client.get(reverse("grp_related_lookup"))
self.assertEqual(response.status_code, 403)
self.client.login(username="Superuser001", password="superuser001")
response = self.client.get(reverse("grp_related_lookup"))
self.assertEqual(response.status_code, 200)
self.assertJSONEqual(response.content.decode('utf-8'), [{"value": None, "label": ""}])
# term not found
response = self.client.get("%s?term=XXXXXXXXXX&app_label=%s&model_name=%s" % (reverse("grp_autocomplete_lookup"), "grappelli", "category"))
self.assertEqual(response.status_code, 200)
self.assertJSONEqual(response.content.decode('utf-8'), [{"value": None, "label": "0 results"}])
# ok (99 finds the id and the title, therefore 2 results)
response = self.client.get("%s?term=Category No 99&app_label=%s&model_name=%s" % (reverse("grp_autocomplete_lookup"), "grappelli", "category"))
self.assertEqual(response.status_code, 200)
self.assertJSONEqual(response.content.decode('utf-8'), [{"value": 99, "label": "Category No 98 (99)"}, {"value": 100, "label": "Category No 99 (100)"}])
# filtered queryset (single filter)
response = self.client.get("%s?term=Category&app_label=%s&model_name=%s&query_string=id__gte=99" % (reverse("grp_autocomplete_lookup"), "grappelli", "category"))
self.assertEqual(response.status_code, 200)
self.assertJSONEqual(response.content.decode('utf-8'), [{"value": 99, "label": "Category No 98 (99)"}, {"value": 100, "label": "Category No 99 (100)"}])
# filtered queryset (multiple filters)
response = self.client.get("%s?term=Category&app_label=%s&model_name=%s&query_string=name__icontains=99:id__gte=99" % (reverse("grp_autocomplete_lookup"), "grappelli", "category"))
self.assertEqual(response.status_code, 200)
self.assertJSONEqual(response.content.decode('utf-8'), [{"value": 100, "label": "Category No 99 (100)"}])
| bsd-2-clause | 4,214,287,991,348,100,000 | -2,497,612,833,385,922,000 | 58.004673 | 202 | 0.630633 | false |
cfehring/slack-onnow | slack-onnow.py | 1 | 6863 | '''
This function handles a Slack slash command named "/onnow" and echoes the details back to the Slack user.
Follow these steps to configure the slash command in Slack:
1. Navigate to https://<your-team-domain>.slack.com/services/new
2. Search for and select "Slash Commands".
3. Enter a name for your command (/onnow) and click "Add Slash Command Integration".
4. Copy the token string from the integration settings and use it in the next section.
5. After you complete this blueprint, enter the provided API endpoint URL in the URL field.
6. Method: POST
7. Customize Name: Pac-12 API
8. Show this command in the autocomplete list (checked)
a. Description: returns live events currently airing on Pac-12.com
b. Usage hint: abbreviate networks: n a b l m o w
Follow these steps to encrypt your Slack token for use in this function:
1. Create a KMS key - http://docs.aws.amazon.com/kms/latest/developerguide/create-keys.html.
2. Encrypt the token using the AWS CLI.
$ aws kms encrypt --key-id alias/<KMS key name> --plaintext "<COMMAND_TOKEN>"
3. Copy the base-64 encoded, encrypted key (CiphertextBlob) to the kmsEncyptedToken variable.
4. Give your function's role permission for the kms:Decrypt action.
Example:
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"kms:Decrypt"
],
"Resource": [
"<your KMS key ARN>"
]
}
]
}
Follow these steps to complete the configuration of your command API endpoint
1. When completing the blueprint configuration select "POST" for method and
"Open" for security on the Endpoint Configuration page.
2. After completing the function creation, open the newly created API in the
API Gateway console.
3. Add a mapping template for the application/x-www-form-urlencoded content type with the
following body: { "body": $input.json("$") }
4. Deploy the API to the prod stage.
5. Update the URL for your Slack slash command with the invocation URL for the
created API resource in the prod stage.
'''
import boto3
from base64 import b64decode
from urlparse import parse_qs
import logging
import requests #Digital recommends the request lib to interact w/ apis over http. http://docs.python-requests.org/
import sys # for sys.argv to pass arguments from command line
ENCRYPTED_EXPECTED_TOKEN = "CiDwhvH0HpF1WdM/Qj2SO8kDmtA9FMF7ZIZN8ambj2J3gRKfAQEBAgB48Ibx9B6RdVnTP0I9kjvJA5rQPRTBe2SGTfGpm49id4EAAAB2MHQGCSqGSIb3DQEHBqBnMGUCAQAwYAYJKoZIhvcNAQcBMB4GCWCGSAFlAwQBLjARBAyjjtXp0tXAwOk3J9wCARCAMxstVfCsQUyB8g0ZnEuRs1rb6Fbor5JusgRbHuKJc6HsWDWr2vQGnBU05JowdV1mP0L5yQ==" # Enter the base-64 encoded, encrypted Slack command token (CiphertextBlob)
kms = boto3.client('kms')
expected_token = kms.decrypt(CiphertextBlob = b64decode(ENCRYPTED_EXPECTED_TOKEN))['Plaintext']
logger = logging.getLogger()
logger.setLevel(logging.INFO)
# Define the lambda function, which returns a python dict key named 'ret'
# that gets translated to a json formatted object named 'ret'.
# 'ret' includes two items in a python list, which gets translated as two json
# arrays named 'text' and 'attachments'
def lambda_handler(event, context):
req_body = event['body']
params = parse_qs(req_body,keep_blank_values=1)
token = params['token'][0]
if token != expected_token:
logger.error("Request token (%s) does not match exptected", token)
raise Exception("Invalid request token")
# could get additional info from slack if needed in the future:
# user = params['user_name'][0]
# command = params['command'][0]
# channel = params['channel_name'][0]
command_text = params['text'][0]
# Split the arguments passed from the user into a list of networks named 'args'
if command_text:
args = command_text.split(' ')
# If no arguments are included, return data for all networks
else:
args = ["n", "a", "b", "l", "m", "o", "w"]
# initialize static variables for urls - note version 3
epg_url = 'http://api.pac-12.com/v3/epg' #production server
event_url = 'http://api.pac-12.com/v3/events' #production server
onnow_url = 'http://api.pac-12.com/v3/onnow'
# initialize the dictionary named 'ret', formatted the way slack likes the JSON
ret = {}
ret["text"] = "Currently airing on Pac-12.com:"
ret["attachments"] = []
# Iterate through the list of 'args' (networks)
# and set up the "networks" variable to pass to the Pac-12 API
# and the "net_name" variable to include as text back to the user
for i in range(len(args)):
net = "{}".format(args[i])
if net == 'n':
networks = 88 #Pac-12 Networks
net_name = "NAT"
elif net == 'b':
networks = 86 #Pac-12 Bay Area
net_name = "BAY"
elif net == 'a':
networks = 91 #Pac-12 Arizona
net_name = "ARZ"
elif net == 'l':
networks = 92 #Pac-12 Los Angeles
net_name = "LAX"
elif net == 'm':
networks = 87 #Pac-12 Mountain
net_name = "MTN"
elif net == 'o':
networks = 89 #Pac-12 Oregon
net_name = "ORG"
elif net == 'w':
networks = 90 #Pac-12 Washington
net_name = "WAS"
# elif net == 'help':
# If network letters are entered incorrectly, display help and exit
# Also works with /onnow help -or- /onnow ?
else:
help1 = "Separating with spaces, enter the first letter of each network after /onnow, as shown below:"
help2 = "/onnow n"
help3 = "/onnow b l"
help4 = "/onnow n a b l m o w"
message = "{}\n{}\n{}\n{}".format(help1,help2,help3,help4)
ret["attachments"].append({"text": message})
return ret
sys.exit
# limit results to just the next event
pagesize = 1
# request to p12 api for onnow data
onnow_get_params = {'pagesize': pagesize , 'networks': networks}
onnow_get_request = requests.get(onnow_url, params=onnow_get_params)
onnow_get_response = onnow_get_request.json()
# Pull out the override_url from the onnow call to the pac-12 api
override_url = onnow_get_response['programs'][0]['override_url']
# If the override_url doesn't exist, use the 'url'
if override_url is None:
return_url = onnow_get_response['programs'][0]['url']
else:
return_url = override_url
#Format the response returned to Slack
link = "{}: <{}>".format(net_name,return_url)
ret["attachments"].append({"text": link})
return ret
| gpl-3.0 | -8,764,494,107,190,408,000 | -8,947,171,999,495,796,000 | 35.897849 | 369 | 0.641556 | false |
IntelLabs/numba | numba/cuda/tests/cudapy/test_powi.py | 5 | 3276 | import math
import numpy as np
from numba import cuda, float64, int8, int32, void
from numba.cuda.testing import unittest, CUDATestCase
def cu_mat_power(A, power, power_A):
y, x = cuda.grid(2)
m, n = power_A.shape
if x >= n or y >= m:
return
power_A[y, x] = math.pow(A[y, x], int32(power))
def cu_mat_power_binop(A, power, power_A):
y, x = cuda.grid(2)
m, n = power_A.shape
if x >= n or y >= m:
return
power_A[y, x] = A[y, x] ** power
def vec_pow(r, x, y):
i = cuda.grid(1)
if i < len(r):
r[i] = pow(x[i], y[i])
def vec_pow_binop(r, x, y):
i = cuda.grid(1)
if i < len(r):
r[i] = x[i] ** y[i]
def vec_pow_inplace_binop(r, x):
i = cuda.grid(1)
if i < len(r):
r[i] **= x[i]
def random_complex(N):
np.random.seed(123)
return (np.random.random(1) + np.random.random(1) * 1j)
class TestCudaPowi(CUDATestCase):
def test_powi(self):
dec = cuda.jit(void(float64[:, :], int8, float64[:, :]))
kernel = dec(cu_mat_power)
power = 2
A = np.arange(10, dtype=np.float64).reshape(2, 5)
Aout = np.empty_like(A)
kernel[1, A.shape](A, power, Aout)
self.assertTrue(np.allclose(Aout, A ** power))
def test_powi_binop(self):
dec = cuda.jit(void(float64[:, :], int8, float64[:, :]))
kernel = dec(cu_mat_power_binop)
power = 2
A = np.arange(10, dtype=np.float64).reshape(2, 5)
Aout = np.empty_like(A)
kernel[1, A.shape](A, power, Aout)
self.assertTrue(np.allclose(Aout, A ** power))
# Relative tolerance kwarg is provided because 1.0e-7 (the default for
# assert_allclose) is a bit tight for single precision.
def _test_cpow(self, dtype, func, rtol=1.0e-7):
N = 32
x = random_complex(N).astype(dtype)
y = random_complex(N).astype(dtype)
r = np.zeros_like(x)
cfunc = cuda.jit(func)
cfunc[1, N](r, x, y)
np.testing.assert_allclose(r, x ** y, rtol=rtol)
# Checks special cases
x = np.asarray([0.0j, 1.0j], dtype=dtype)
y = np.asarray([0.0j, 1.0], dtype=dtype)
r = np.zeros_like(x)
cfunc[1, 2](r, x, y)
np.testing.assert_allclose(r, x ** y, rtol=rtol)
def test_cpow_complex64_pow(self):
self._test_cpow(np.complex64, vec_pow, rtol=3.0e-7)
def test_cpow_complex64_binop(self):
self._test_cpow(np.complex64, vec_pow_binop, rtol=3.0e-7)
def test_cpow_complex128_pow(self):
self._test_cpow(np.complex128, vec_pow)
def test_cpow_complex128_binop(self):
self._test_cpow(np.complex128, vec_pow_binop)
def _test_cpow_inplace_binop(self, dtype, rtol=1.0e-7):
N = 32
x = random_complex(N).astype(dtype)
y = random_complex(N).astype(dtype)
r = x ** y
cfunc = cuda.jit(vec_pow_inplace_binop)
cfunc[1, N](x, y)
np.testing.assert_allclose(x, r, rtol=rtol)
def test_cpow_complex64_inplace_binop(self):
self._test_cpow_inplace_binop(np.complex64, rtol=3.0e-7)
def test_cpow_complex128_inplace_binop(self):
self._test_cpow_inplace_binop(np.complex128, rtol=3.0e-7)
if __name__ == '__main__':
unittest.main()
| bsd-2-clause | -1,722,343,672,891,849,700 | -7,941,603,476,526,890,000 | 25.419355 | 74 | 0.570513 | false |
AlexHAHA/ardupilot | Tools/LogAnalyzer/LogAnalyzer.py | 86 | 12419 | #!/usr/bin/env python
#
# A module to analyze and identify any common problems which can be determined from log files
#
# Initial code by Andrew Chapman (amchapman@gmail.com), 16th Jan 2014
#
# some logging oddities noticed while doing this, to be followed up on:
# - tradheli MOT labels Mot1,Mot2,Mot3,Mot4,GGain
# - Pixhawk doesn't output one of the FMT labels... forget which one
# - MAG offsets seem to be constant (only seen data on Pixhawk)
# - MAG offsets seem to be cast to int before being output? (param is -84.67, logged as -84)
# - copter+plane use 'V' in their vehicle type/version/build line, rover uses lower case 'v'. Copter+Rover give a build number, plane does not
# - CTUN.ThrOut on copter is 0-1000, on plane+rover it is 0-100
# TODO: add test for noisy baro values
# TODO: support loading binary log files (use Tridge's mavlogdump?)
import DataflashLog
import pprint # temp
import imp
import glob
import inspect
import os, sys
import argparse
import datetime
import time
from xml.sax.saxutils import escape
class TestResult(object):
'''all tests return a standardized result type'''
class StatusType:
# NA means not applicable for this log (e.g. copter tests against a plane log), UNKNOWN means it is missing data required for the test
GOOD, FAIL, WARN, UNKNOWN, NA = range(5)
status = None
statusMessage = "" # can be multi-line
class Test(object):
'''base class to be inherited by log tests. Each test should be quite granular so we have lots of small tests with clear results'''
def __init__(self):
self.name = ""
self.result = None # will be an instance of TestResult after being run
self.execTime = None
self.enable = True
def run(self, logdata, verbose=False):
pass
class TestSuite(object):
'''registers test classes, loading using a basic plugin architecture, and can run them all in one run() operation'''
def __init__(self):
self.tests = []
self.logfile = None
self.logdata = None
# dynamically load in Test subclasses from the 'tests' folder
# to prevent one being loaded, move it out of that folder, or set that test's .enable attribute to False
dirName = os.path.dirname(os.path.abspath(__file__))
testScripts = glob.glob(dirName + '/tests/*.py')
testClasses = []
for script in testScripts:
m = imp.load_source("m",script)
for name, obj in inspect.getmembers(m, inspect.isclass):
if name not in testClasses and inspect.getsourcefile(obj) == script:
testClasses.append(name)
self.tests.append(obj())
# and here's an example of explicitly loading a Test class if you wanted to do that
# m = imp.load_source("m", dirName + '/tests/TestBadParams.py')
# self.tests.append(m.TestBadParams())
def run(self, logdata, verbose):
'''run all registered tests in a single call, gathering execution timing info'''
self.logdata = logdata
if 'GPS' not in self.logdata.channels and 'GPS2' in self.logdata.channels:
# *cough*
self.logdata.channels['GPS'] = self.logdata.channels['GPS2']
self.logfile = logdata.filename
for test in self.tests:
# run each test in turn, gathering timing info
if test.enable:
startTime = time.time()
test.run(self.logdata, verbose) # RUN THE TEST
endTime = time.time()
test.execTime = 1000 * (endTime-startTime)
def outputPlainText(self, outputStats):
'''output test results in plain text'''
print 'Dataflash log analysis report for file: ' + self.logfile
print 'Log size: %.2fmb (%d lines)' % (self.logdata.filesizeKB / 1024.0, self.logdata.lineCount)
print 'Log duration: %s' % str(datetime.timedelta(seconds=self.logdata.durationSecs)) + '\n'
if self.logdata.vehicleType == "ArduCopter" and self.logdata.getCopterType():
print 'Vehicle Type: %s (%s)' % (self.logdata.vehicleType, self.logdata.getCopterType())
else:
print 'Vehicle Type: %s' % self.logdata.vehicleType
print 'Firmware Version: %s (%s)' % (self.logdata.firmwareVersion, self.logdata.firmwareHash)
print 'Hardware: %s' % self.logdata.hardwareType
print 'Free RAM: %s' % self.logdata.freeRAM
if self.logdata.skippedLines:
print "\nWARNING: %d malformed log lines skipped during read" % self.logdata.skippedLines
print '\n'
print "Test Results:"
for test in self.tests:
if not test.enable:
continue
statusMessageFirstLine = test.result.statusMessage.strip('\n\r').split('\n')[0]
statusMessageExtra = test.result.statusMessage.strip('\n\r').split('\n')[1:]
execTime = ""
if outputStats:
execTime = " (%6.2fms)" % (test.execTime)
if test.result.status == TestResult.StatusType.GOOD:
print " %20s: GOOD %-55s%s" % (test.name, statusMessageFirstLine, execTime)
elif test.result.status == TestResult.StatusType.FAIL:
print " %20s: FAIL %-55s%s [GRAPH]" % (test.name, statusMessageFirstLine, execTime)
elif test.result.status == TestResult.StatusType.WARN:
print " %20s: WARN %-55s%s [GRAPH]" % (test.name, statusMessageFirstLine, execTime)
elif test.result.status == TestResult.StatusType.NA:
# skip any that aren't relevant for this vehicle/hardware/etc
continue
else:
print " %20s: UNKNOWN %-55s%s" % (test.name, statusMessageFirstLine, execTime)
#if statusMessageExtra:
for line in statusMessageExtra:
print " %29s %s" % ("",line)
print '\n'
print 'The Log Analyzer is currently BETA code.\nFor any support or feedback on the log analyzer please email Andrew Chapman (amchapman@gmail.com)'
print '\n'
def outputXML(self, xmlFile):
'''output test results to an XML file'''
# open the file for writing
xml = None
try:
if xmlFile == '-':
xml = sys.stdout
else:
xml = open(xmlFile, 'w')
except:
sys.stderr.write("Error opening output xml file: %s" % xmlFile)
sys.exit(1)
# output header info
print >>xml, "<?xml version=\"1.0\" encoding=\"UTF-8\"?>"
print >>xml, "<loganalysis>"
print >>xml, "<header>"
print >>xml, " <logfile>" + escape(self.logfile) + "</logfile>"
print >>xml, " <sizekb>" + escape(`self.logdata.filesizeKB`) + "</sizekb>"
print >>xml, " <sizelines>" + escape(`self.logdata.lineCount`) + "</sizelines>"
print >>xml, " <duration>" + escape(str(datetime.timedelta(seconds=self.logdata.durationSecs))) + "</duration>"
print >>xml, " <vehicletype>" + escape(self.logdata.vehicleType) + "</vehicletype>"
if self.logdata.vehicleType == "ArduCopter" and self.logdata.getCopterType():
print >>xml, " <coptertype>" + escape(self.logdata.getCopterType()) + "</coptertype>"
print >>xml, " <firmwareversion>" + escape(self.logdata.firmwareVersion) + "</firmwareversion>"
print >>xml, " <firmwarehash>" + escape(self.logdata.firmwareHash) + "</firmwarehash>"
print >>xml, " <hardwaretype>" + escape(self.logdata.hardwareType) + "</hardwaretype>"
print >>xml, " <freemem>" + escape(`self.logdata.freeRAM`) + "</freemem>"
print >>xml, " <skippedlines>" + escape(`self.logdata.skippedLines`) + "</skippedlines>"
print >>xml, "</header>"
# output parameters
print >>xml, "<params>"
for param, value in self.logdata.parameters.items():
print >>xml, " <param name=\"%s\" value=\"%s\" />" % (param,escape(`value`))
print >>xml, "</params>"
# output test results
print >>xml, "<results>"
for test in self.tests:
if not test.enable:
continue
print >>xml, " <result>"
if test.result.status == TestResult.StatusType.GOOD:
print >>xml, " <name>" + escape(test.name) + "</name>"
print >>xml, " <status>GOOD</status>"
print >>xml, " <message>" + escape(test.result.statusMessage) + "</message>"
elif test.result.status == TestResult.StatusType.FAIL:
print >>xml, " <name>" + escape(test.name) + "</name>"
print >>xml, " <status>FAIL</status>"
print >>xml, " <message>" + escape(test.result.statusMessage) + "</message>"
print >>xml, " <data>(test data will be embeded here at some point)</data>"
elif test.result.status == TestResult.StatusType.WARN:
print >>xml, " <name>" + escape(test.name) + "</name>"
print >>xml, " <status>WARN</status>"
print >>xml, " <message>" + escape(test.result.statusMessage) + "</message>"
print >>xml, " <data>(test data will be embeded here at some point)</data>"
elif test.result.status == TestResult.StatusType.NA:
print >>xml, " <name>" + escape(test.name) + "</name>"
print >>xml, " <status>NA</status>"
else:
print >>xml, " <name>" + escape(test.name) + "</name>"
print >>xml, " <status>UNKNOWN</status>"
print >>xml, " <message>" + escape(test.result.statusMessage) + "</message>"
print >>xml, " </result>"
print >>xml, "</results>"
print >>xml, "</loganalysis>"
xml.close()
def main():
dirName = os.path.dirname(os.path.abspath(__file__))
# deal with command line arguments
parser = argparse.ArgumentParser(description='Analyze an APM Dataflash log for known issues')
parser.add_argument('logfile', type=argparse.FileType('r'), help='path to Dataflash log file (or - for stdin)')
parser.add_argument('-f', '--format', metavar='', type=str, action='store', choices=['bin','log','auto'], default='auto', help='log file format: \'bin\',\'log\' or \'auto\'')
parser.add_argument('-q', '--quiet', metavar='', action='store_const', const=True, help='quiet mode, do not print results')
parser.add_argument('-p', '--profile', metavar='', action='store_const', const=True, help='output performance profiling data')
parser.add_argument('-s', '--skip_bad', metavar='', action='store_const', const=True, help='skip over corrupt dataflash lines')
parser.add_argument('-e', '--empty', metavar='', action='store_const', const=True, help='run an initial check for an empty log')
parser.add_argument('-x', '--xml', type=str, metavar='XML file', nargs='?', const='', default='', help='write output to specified XML file (or - for stdout)')
parser.add_argument('-v', '--verbose', metavar='', action='store_const', const=True, help='verbose output')
args = parser.parse_args()
# load the log
startTime = time.time()
logdata = DataflashLog.DataflashLog(args.logfile.name, format=args.format, ignoreBadlines=args.skip_bad) # read log
endTime = time.time()
if args.profile:
print "Log file read time: %.2f seconds" % (endTime-startTime)
# check for empty log if requested
if args.empty:
emptyErr = DataflashLog.DataflashLogHelper.isLogEmpty(logdata)
if emptyErr:
sys.stderr.write("Empty log file: %s, %s" % (logdata.filename, emptyErr))
sys.exit(1)
#run the tests, and gather timings
testSuite = TestSuite()
startTime = time.time()
testSuite.run(logdata, args.verbose) # run tests
endTime = time.time()
if args.profile:
print "Test suite run time: %.2f seconds" % (endTime-startTime)
# deal with output
if not args.quiet:
testSuite.outputPlainText(args.profile)
if args.xml:
testSuite.outputXML(args.xml)
if not args.quiet:
print "XML output written to file: %s\n" % args.xml
if __name__ == "__main__":
main()
| gpl-3.0 | -1,330,286,227,001,090,800 | -1,831,769,078,817,156,400 | 46.765385 | 179 | 0.600531 | false |
Tong-Chen/scikit-learn | sklearn/cluster/bicluster/tests/test_utils.py | 10 | 1427 | """Tests for bicluster utilities."""
import numpy as np
from scipy.sparse import csr_matrix, issparse
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_true
from sklearn.cluster.bicluster.utils import get_indicators
from sklearn.cluster.bicluster.utils import get_shape
from sklearn.cluster.bicluster.utils import get_submatrix
def test_get_indicators():
rows = [2, 4, 5]
columns = [0, 1, 3]
shape = (6, 4)
row_ind, col_ind = get_indicators(rows, columns, shape)
assert_array_equal(row_ind, [False, False, True, False, True, True])
assert_array_equal(col_ind, [True, True, False, True])
def test_get_shape():
rows = [True, True, False, False]
cols = [True, False, True, True]
assert_equal(get_shape(rows, cols), (2, 3))
def test_get_submatrix():
data = np.arange(20).reshape(5, 4)
rows = [True, True, False, False, True]
cols = [False, False, True, True]
for X in (data, csr_matrix(data)):
submatrix = get_submatrix(rows, cols, X)
if issparse(submatrix):
submatrix = submatrix.todense()
assert_array_equal(submatrix, [[2, 3],
[6, 7],
[18, 19]])
submatrix[:] = -1
if issparse(X):
X = X.todense()
assert_true(np.all(X != -1))
| bsd-3-clause | -3,709,576,657,758,307,300 | 5,049,915,136,285,752,000 | 30.711111 | 72 | 0.614576 | false |
felipsmartins/namebench | libnamebench/util.py | 172 | 5175 | # Copyright 2009 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Little utility functions."""
__author__ = 'tstromberg@google.com (Thomas Stromberg)'
import datetime
import math
import os.path
import sys
import time
import tempfile
def CalculateListAverage(values):
"""Computes the arithmetic mean of a list of numbers."""
if not values:
return 0
return sum(values) / float(len(values))
def DrawTextBar(value, max_value, max_width=53):
"""Return a simple ASCII bar graph, making sure it fits within max_width.
Args:
value: integer or float representing the value of this bar.
max_value: integer or float representing the largest bar.
max_width: How many characters this graph can use (int)
Returns:
string
"""
hash_width = max_value / max_width
return int(math.ceil(value/hash_width)) * '#'
def SecondsToMilliseconds(seconds):
return seconds * 1000
def SplitSequence(seq, size):
"""Split a list.
Args:
seq: sequence
size: int
Returns:
New list.
Recipe From http://code.activestate.com/recipes/425397/ (Modified to not return blank values)
"""
newseq = []
splitsize = 1.0/size*len(seq)
for i in range(size):
newseq.append(seq[int(round(i*splitsize)):int(round((i+1)*splitsize))])
return [x for x in newseq if x]
def FindDataFile(filename):
"""Find a datafile, searching various relative and OS paths."""
filename = os.path.expanduser(filename)
if os.path.exists(filename):
return filename
# If it's not a relative path, we can't do anything useful.
if os.path.isabs(filename):
return filename
other_places = [os.getcwd(),
os.path.join(os.path.dirname(os.path.dirname(sys.argv[0])), 'Contents', 'Resources'),
os.path.join(os.getcwd(), 'namebench.app', 'Contents', 'Resources'),
os.path.join(os.getcwd(), '..'),
os.path.join(sys.prefix, 'namebench'),
'/usr/local/share/namebench'
'/usr/local/etc/namebench',
'/usr/local/namebench',
'/etc/namebench',
'/usr/share/namebench',
'/usr/namebench']
for directory in reversed(sys.path):
other_places.append(directory)
other_places.append(os.path.join(directory, 'namebench'))
for place in other_places:
path = os.path.join(place, filename)
if os.path.exists(path):
return path
print 'I could not find "%s". Tried:' % filename
for path in other_places:
print ' %s' % path
return filename
def GenerateOutputFilename(extension):
"""Generate a decent default output filename for a given extensio."""
# used for resolv.conf
if '.' in extension:
filename = extension
else:
output_base = 'namebench_%s' % datetime.datetime.strftime(datetime.datetime.now(),
'%Y-%m-%d %H%M')
output_base = output_base.replace(':', '').replace(' ', '_')
filename = '.'.join((output_base, extension))
output_dir = tempfile.gettempdir()
return os.path.join(output_dir, filename)
def GetLastExceptionString():
"""Get the last exception and return a good looking string for it."""
(exc, error) = sys.exc_info()[0:2]
exc_msg = str(exc)
if '<class' in exc_msg:
exc_msg = exc_msg.split("'")[1]
exc_msg = exc_msg.replace('dns.exception.', '')
error = '%s %s' % (exc_msg, error)
# We need to remove the trailing space at some point.
return error.rstrip()
def DoesClockGoBackwards():
"""Detect buggy Windows systems where time.clock goes backwards"""
reference = 0
print "Checking if time.clock() goes backwards (broken hardware)..."
for x in range(0, 200):
counter = time.clock()
if counter < reference:
print "Clock went backwards by %fms" % (counter - reference)
return True
reference = counter
time.sleep(random.random() / 500)
return False
def GetMostAccurateTimerFunction():
"""Pick the most accurate timer for a platform."""
if sys.platform[:3] == 'win' and not _DoesClockGoBackwards():
return time.clock
else:
return time.time
def DistanceBetweenCoordinates(lat1, lon1, lat2, lon2):
"""Distance between two coordinate pairs (in km)
Based on:
http://cyberpython.wordpress.com/2010/03/31/python-calculate-the-distance-between-2-points-given-their-coordinates/
"""
lat1_r = math.radians(lat1)
lat2_r = math.radians(lat2)
lon_diff = math.radians(lon2 - lon1)
x = math.sin(lat1_r) * math.sin(lat2_r) + math.cos(lat1_r) * math.cos(lat2_r) * math.cos(lon_diff)
return math.degrees(math.acos(x)) * 60 * 1.852
| apache-2.0 | 3,125,243,132,701,335,000 | -7,493,456,882,107,181,000 | 29.621302 | 117 | 0.65971 | false |
Bismarrck/tensorflow | tensorflow/contrib/slim/python/slim/nets/inception_v2.py | 96 | 26999 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains the definition for inception v2 classification network."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib import layers
from tensorflow.contrib.framework.python.ops import arg_scope
from tensorflow.contrib.layers.python.layers import initializers
from tensorflow.contrib.layers.python.layers import layers as layers_lib
from tensorflow.contrib.layers.python.layers import regularizers
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import variable_scope
trunc_normal = lambda stddev: init_ops.truncated_normal_initializer(0.0, stddev)
def inception_v2_base(inputs,
final_endpoint='Mixed_5c',
min_depth=16,
depth_multiplier=1.0,
scope=None):
"""Inception v2 (6a2).
Constructs an Inception v2 network from inputs to the given final endpoint.
This method can construct the network up to the layer inception(5b) as
described in http://arxiv.org/abs/1502.03167.
Args:
inputs: a tensor of shape [batch_size, height, width, channels].
final_endpoint: specifies the endpoint to construct the network up to. It
can be one of ['Conv2d_1a_7x7', 'MaxPool_2a_3x3', 'Conv2d_2b_1x1',
'Conv2d_2c_3x3', 'MaxPool_3a_3x3', 'Mixed_3b', 'Mixed_3c', 'Mixed_4a',
'Mixed_4b', 'Mixed_4c', 'Mixed_4d', 'Mixed_4e', 'Mixed_5a', 'Mixed_5b',
'Mixed_5c'].
min_depth: Minimum depth value (number of channels) for all convolution ops.
Enforced when depth_multiplier < 1, and not an active constraint when
depth_multiplier >= 1.
depth_multiplier: Float multiplier for the depth (number of channels)
for all convolution ops. The value must be greater than zero. Typical
usage will be to set this value in (0, 1) to reduce the number of
parameters or computation cost of the model.
scope: Optional variable_scope.
Returns:
tensor_out: output tensor corresponding to the final_endpoint.
end_points: a set of activations for external use, for example summaries or
losses.
Raises:
ValueError: if final_endpoint is not set to one of the predefined values,
or depth_multiplier <= 0
"""
# end_points will collect relevant activations for external use, for example
# summaries or losses.
end_points = {}
# Used to find thinned depths for each layer.
if depth_multiplier <= 0:
raise ValueError('depth_multiplier is not greater than zero.')
depth = lambda d: max(int(d * depth_multiplier), min_depth)
with variable_scope.variable_scope(scope, 'InceptionV2', [inputs]):
with arg_scope(
[
layers.conv2d, layers_lib.max_pool2d, layers_lib.avg_pool2d,
layers.separable_conv2d
],
stride=1,
padding='SAME'):
# Note that sizes in the comments below assume an input spatial size of
# 224x224, however, the inputs can be of any size greater 32x32.
# 224 x 224 x 3
end_point = 'Conv2d_1a_7x7'
# depthwise_multiplier here is different from depth_multiplier.
# depthwise_multiplier determines the output channels of the initial
# depthwise conv (see docs for tf.nn.separable_conv2d), while
# depth_multiplier controls the # channels of the subsequent 1x1
# convolution. Must have
# in_channels * depthwise_multipler <= out_channels
# so that the separable convolution is not overparameterized.
depthwise_multiplier = min(int(depth(64) / 3), 8)
net = layers.separable_conv2d(
inputs,
depth(64), [7, 7],
depth_multiplier=depthwise_multiplier,
stride=2,
weights_initializer=trunc_normal(1.0),
scope=end_point)
end_points[end_point] = net
if end_point == final_endpoint:
return net, end_points
# 112 x 112 x 64
end_point = 'MaxPool_2a_3x3'
net = layers_lib.max_pool2d(net, [3, 3], scope=end_point, stride=2)
end_points[end_point] = net
if end_point == final_endpoint:
return net, end_points
# 56 x 56 x 64
end_point = 'Conv2d_2b_1x1'
net = layers.conv2d(
net,
depth(64), [1, 1],
scope=end_point,
weights_initializer=trunc_normal(0.1))
end_points[end_point] = net
if end_point == final_endpoint:
return net, end_points
# 56 x 56 x 64
end_point = 'Conv2d_2c_3x3'
net = layers.conv2d(net, depth(192), [3, 3], scope=end_point)
end_points[end_point] = net
if end_point == final_endpoint:
return net, end_points
# 56 x 56 x 192
end_point = 'MaxPool_3a_3x3'
net = layers_lib.max_pool2d(net, [3, 3], scope=end_point, stride=2)
end_points[end_point] = net
if end_point == final_endpoint:
return net, end_points
# 28 x 28 x 192
# Inception module.
end_point = 'Mixed_3b'
with variable_scope.variable_scope(end_point):
with variable_scope.variable_scope('Branch_0'):
branch_0 = layers.conv2d(
net, depth(64), [1, 1], scope='Conv2d_0a_1x1')
with variable_scope.variable_scope('Branch_1'):
branch_1 = layers.conv2d(
net,
depth(64), [1, 1],
weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1')
branch_1 = layers.conv2d(
branch_1, depth(64), [3, 3], scope='Conv2d_0b_3x3')
with variable_scope.variable_scope('Branch_2'):
branch_2 = layers.conv2d(
net,
depth(64), [1, 1],
weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1')
branch_2 = layers.conv2d(
branch_2, depth(96), [3, 3], scope='Conv2d_0b_3x3')
branch_2 = layers.conv2d(
branch_2, depth(96), [3, 3], scope='Conv2d_0c_3x3')
with variable_scope.variable_scope('Branch_3'):
branch_3 = layers_lib.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = layers.conv2d(
branch_3,
depth(32), [1, 1],
weights_initializer=trunc_normal(0.1),
scope='Conv2d_0b_1x1')
net = array_ops.concat([branch_0, branch_1, branch_2, branch_3], 3)
end_points[end_point] = net
if end_point == final_endpoint:
return net, end_points
# 28 x 28 x 256
end_point = 'Mixed_3c'
with variable_scope.variable_scope(end_point):
with variable_scope.variable_scope('Branch_0'):
branch_0 = layers.conv2d(
net, depth(64), [1, 1], scope='Conv2d_0a_1x1')
with variable_scope.variable_scope('Branch_1'):
branch_1 = layers.conv2d(
net,
depth(64), [1, 1],
weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1')
branch_1 = layers.conv2d(
branch_1, depth(96), [3, 3], scope='Conv2d_0b_3x3')
with variable_scope.variable_scope('Branch_2'):
branch_2 = layers.conv2d(
net,
depth(64), [1, 1],
weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1')
branch_2 = layers.conv2d(
branch_2, depth(96), [3, 3], scope='Conv2d_0b_3x3')
branch_2 = layers.conv2d(
branch_2, depth(96), [3, 3], scope='Conv2d_0c_3x3')
with variable_scope.variable_scope('Branch_3'):
branch_3 = layers_lib.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = layers.conv2d(
branch_3,
depth(64), [1, 1],
weights_initializer=trunc_normal(0.1),
scope='Conv2d_0b_1x1')
net = array_ops.concat([branch_0, branch_1, branch_2, branch_3], 3)
end_points[end_point] = net
if end_point == final_endpoint:
return net, end_points
# 28 x 28 x 320
end_point = 'Mixed_4a'
with variable_scope.variable_scope(end_point):
with variable_scope.variable_scope('Branch_0'):
branch_0 = layers.conv2d(
net,
depth(128), [1, 1],
weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1')
branch_0 = layers.conv2d(
branch_0, depth(160), [3, 3], stride=2, scope='Conv2d_1a_3x3')
with variable_scope.variable_scope('Branch_1'):
branch_1 = layers.conv2d(
net,
depth(64), [1, 1],
weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1')
branch_1 = layers.conv2d(
branch_1, depth(96), [3, 3], scope='Conv2d_0b_3x3')
branch_1 = layers.conv2d(
branch_1, depth(96), [3, 3], stride=2, scope='Conv2d_1a_3x3')
with variable_scope.variable_scope('Branch_2'):
branch_2 = layers_lib.max_pool2d(
net, [3, 3], stride=2, scope='MaxPool_1a_3x3')
net = array_ops.concat([branch_0, branch_1, branch_2], 3)
end_points[end_point] = net
if end_point == final_endpoint:
return net, end_points
# 14 x 14 x 576
end_point = 'Mixed_4b'
with variable_scope.variable_scope(end_point):
with variable_scope.variable_scope('Branch_0'):
branch_0 = layers.conv2d(
net, depth(224), [1, 1], scope='Conv2d_0a_1x1')
with variable_scope.variable_scope('Branch_1'):
branch_1 = layers.conv2d(
net,
depth(64), [1, 1],
weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1')
branch_1 = layers.conv2d(
branch_1, depth(96), [3, 3], scope='Conv2d_0b_3x3')
with variable_scope.variable_scope('Branch_2'):
branch_2 = layers.conv2d(
net,
depth(96), [1, 1],
weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1')
branch_2 = layers.conv2d(
branch_2, depth(128), [3, 3], scope='Conv2d_0b_3x3')
branch_2 = layers.conv2d(
branch_2, depth(128), [3, 3], scope='Conv2d_0c_3x3')
with variable_scope.variable_scope('Branch_3'):
branch_3 = layers_lib.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = layers.conv2d(
branch_3,
depth(128), [1, 1],
weights_initializer=trunc_normal(0.1),
scope='Conv2d_0b_1x1')
net = array_ops.concat([branch_0, branch_1, branch_2, branch_3], 3)
end_points[end_point] = net
if end_point == final_endpoint:
return net, end_points
# 14 x 14 x 576
end_point = 'Mixed_4c'
with variable_scope.variable_scope(end_point):
with variable_scope.variable_scope('Branch_0'):
branch_0 = layers.conv2d(
net, depth(192), [1, 1], scope='Conv2d_0a_1x1')
with variable_scope.variable_scope('Branch_1'):
branch_1 = layers.conv2d(
net,
depth(96), [1, 1],
weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1')
branch_1 = layers.conv2d(
branch_1, depth(128), [3, 3], scope='Conv2d_0b_3x3')
with variable_scope.variable_scope('Branch_2'):
branch_2 = layers.conv2d(
net,
depth(96), [1, 1],
weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1')
branch_2 = layers.conv2d(
branch_2, depth(128), [3, 3], scope='Conv2d_0b_3x3')
branch_2 = layers.conv2d(
branch_2, depth(128), [3, 3], scope='Conv2d_0c_3x3')
with variable_scope.variable_scope('Branch_3'):
branch_3 = layers_lib.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = layers.conv2d(
branch_3,
depth(128), [1, 1],
weights_initializer=trunc_normal(0.1),
scope='Conv2d_0b_1x1')
net = array_ops.concat([branch_0, branch_1, branch_2, branch_3], 3)
end_points[end_point] = net
if end_point == final_endpoint:
return net, end_points
# 14 x 14 x 576
end_point = 'Mixed_4d'
with variable_scope.variable_scope(end_point):
with variable_scope.variable_scope('Branch_0'):
branch_0 = layers.conv2d(
net, depth(160), [1, 1], scope='Conv2d_0a_1x1')
with variable_scope.variable_scope('Branch_1'):
branch_1 = layers.conv2d(
net,
depth(128), [1, 1],
weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1')
branch_1 = layers.conv2d(
branch_1, depth(160), [3, 3], scope='Conv2d_0b_3x3')
with variable_scope.variable_scope('Branch_2'):
branch_2 = layers.conv2d(
net,
depth(128), [1, 1],
weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1')
branch_2 = layers.conv2d(
branch_2, depth(160), [3, 3], scope='Conv2d_0b_3x3')
branch_2 = layers.conv2d(
branch_2, depth(160), [3, 3], scope='Conv2d_0c_3x3')
with variable_scope.variable_scope('Branch_3'):
branch_3 = layers_lib.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = layers.conv2d(
branch_3,
depth(96), [1, 1],
weights_initializer=trunc_normal(0.1),
scope='Conv2d_0b_1x1')
net = array_ops.concat([branch_0, branch_1, branch_2, branch_3], 3)
end_points[end_point] = net
if end_point == final_endpoint:
return net, end_points
# 14 x 14 x 576
end_point = 'Mixed_4e'
with variable_scope.variable_scope(end_point):
with variable_scope.variable_scope('Branch_0'):
branch_0 = layers.conv2d(
net, depth(96), [1, 1], scope='Conv2d_0a_1x1')
with variable_scope.variable_scope('Branch_1'):
branch_1 = layers.conv2d(
net,
depth(128), [1, 1],
weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1')
branch_1 = layers.conv2d(
branch_1, depth(192), [3, 3], scope='Conv2d_0b_3x3')
with variable_scope.variable_scope('Branch_2'):
branch_2 = layers.conv2d(
net,
depth(160), [1, 1],
weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1')
branch_2 = layers.conv2d(
branch_2, depth(192), [3, 3], scope='Conv2d_0b_3x3')
branch_2 = layers.conv2d(
branch_2, depth(192), [3, 3], scope='Conv2d_0c_3x3')
with variable_scope.variable_scope('Branch_3'):
branch_3 = layers_lib.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = layers.conv2d(
branch_3,
depth(96), [1, 1],
weights_initializer=trunc_normal(0.1),
scope='Conv2d_0b_1x1')
net = array_ops.concat([branch_0, branch_1, branch_2, branch_3], 3)
end_points[end_point] = net
if end_point == final_endpoint:
return net, end_points
# 14 x 14 x 576
end_point = 'Mixed_5a'
with variable_scope.variable_scope(end_point):
with variable_scope.variable_scope('Branch_0'):
branch_0 = layers.conv2d(
net,
depth(128), [1, 1],
weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1')
branch_0 = layers.conv2d(
branch_0, depth(192), [3, 3], stride=2, scope='Conv2d_1a_3x3')
with variable_scope.variable_scope('Branch_1'):
branch_1 = layers.conv2d(
net,
depth(192), [1, 1],
weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1')
branch_1 = layers.conv2d(
branch_1, depth(256), [3, 3], scope='Conv2d_0b_3x3')
branch_1 = layers.conv2d(
branch_1, depth(256), [3, 3], stride=2, scope='Conv2d_1a_3x3')
with variable_scope.variable_scope('Branch_2'):
branch_2 = layers_lib.max_pool2d(
net, [3, 3], stride=2, scope='MaxPool_1a_3x3')
net = array_ops.concat([branch_0, branch_1, branch_2], 3)
end_points[end_point] = net
if end_point == final_endpoint:
return net, end_points
# 7 x 7 x 1024
end_point = 'Mixed_5b'
with variable_scope.variable_scope(end_point):
with variable_scope.variable_scope('Branch_0'):
branch_0 = layers.conv2d(
net, depth(352), [1, 1], scope='Conv2d_0a_1x1')
with variable_scope.variable_scope('Branch_1'):
branch_1 = layers.conv2d(
net,
depth(192), [1, 1],
weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1')
branch_1 = layers.conv2d(
branch_1, depth(320), [3, 3], scope='Conv2d_0b_3x3')
with variable_scope.variable_scope('Branch_2'):
branch_2 = layers.conv2d(
net,
depth(160), [1, 1],
weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1')
branch_2 = layers.conv2d(
branch_2, depth(224), [3, 3], scope='Conv2d_0b_3x3')
branch_2 = layers.conv2d(
branch_2, depth(224), [3, 3], scope='Conv2d_0c_3x3')
with variable_scope.variable_scope('Branch_3'):
branch_3 = layers_lib.avg_pool2d(net, [3, 3], scope='AvgPool_0a_3x3')
branch_3 = layers.conv2d(
branch_3,
depth(128), [1, 1],
weights_initializer=trunc_normal(0.1),
scope='Conv2d_0b_1x1')
net = array_ops.concat([branch_0, branch_1, branch_2, branch_3], 3)
end_points[end_point] = net
if end_point == final_endpoint:
return net, end_points
# 7 x 7 x 1024
end_point = 'Mixed_5c'
with variable_scope.variable_scope(end_point):
with variable_scope.variable_scope('Branch_0'):
branch_0 = layers.conv2d(
net, depth(352), [1, 1], scope='Conv2d_0a_1x1')
with variable_scope.variable_scope('Branch_1'):
branch_1 = layers.conv2d(
net,
depth(192), [1, 1],
weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1')
branch_1 = layers.conv2d(
branch_1, depth(320), [3, 3], scope='Conv2d_0b_3x3')
with variable_scope.variable_scope('Branch_2'):
branch_2 = layers.conv2d(
net,
depth(192), [1, 1],
weights_initializer=trunc_normal(0.09),
scope='Conv2d_0a_1x1')
branch_2 = layers.conv2d(
branch_2, depth(224), [3, 3], scope='Conv2d_0b_3x3')
branch_2 = layers.conv2d(
branch_2, depth(224), [3, 3], scope='Conv2d_0c_3x3')
with variable_scope.variable_scope('Branch_3'):
branch_3 = layers_lib.max_pool2d(net, [3, 3], scope='MaxPool_0a_3x3')
branch_3 = layers.conv2d(
branch_3,
depth(128), [1, 1],
weights_initializer=trunc_normal(0.1),
scope='Conv2d_0b_1x1')
net = array_ops.concat([branch_0, branch_1, branch_2, branch_3], 3)
end_points[end_point] = net
if end_point == final_endpoint:
return net, end_points
raise ValueError('Unknown final endpoint %s' % final_endpoint)
def inception_v2(inputs,
num_classes=1000,
is_training=True,
dropout_keep_prob=0.8,
min_depth=16,
depth_multiplier=1.0,
prediction_fn=layers_lib.softmax,
spatial_squeeze=True,
reuse=None,
scope='InceptionV2'):
"""Inception v2 model for classification.
Constructs an Inception v2 network for classification as described in
http://arxiv.org/abs/1502.03167.
The recommended image size used to train this network is 224x224. For image
sizes that differ substantially, it is recommended to use inception_v2_base()
and connect custom final layers to the output.
Args:
inputs: a tensor of shape [batch_size, height, width, channels].
num_classes: number of predicted classes.
is_training: whether is training or not.
dropout_keep_prob: the percentage of activation values that are retained.
min_depth: Minimum depth value (number of channels) for all convolution ops.
Enforced when depth_multiplier < 1, and not an active constraint when
depth_multiplier >= 1.
depth_multiplier: Float multiplier for the depth (number of channels)
for all convolution ops. The value must be greater than zero. Typical
usage will be to set this value in (0, 1) to reduce the number of
parameters or computation cost of the model.
prediction_fn: a function to get predictions out of logits.
spatial_squeeze: if True, logits is of shape [B, C], if false logits is
of shape [B, 1, 1, C], where B is batch_size and C is number of classes.
Note that input image sizes other than 224x224 might lead to different
spatial dimensions, and hence cannot be squeezed. In this event,
it is best to set spatial_squeeze as False, and perform a reduce_mean
over the resulting spatial dimensions with sizes exceeding 1.
reuse: whether or not the network and its variables should be reused. To be
able to reuse 'scope' must be given.
scope: Optional variable_scope.
Returns:
logits: the pre-softmax activations, a tensor of size
[batch_size, num_classes]
end_points: a dictionary from components of the network to the corresponding
activation.
Raises:
ValueError: if depth_multiplier <= 0.
"""
if depth_multiplier <= 0:
raise ValueError('depth_multiplier is not greater than zero.')
# Final pooling and prediction
with variable_scope.variable_scope(
scope, 'InceptionV2', [inputs, num_classes], reuse=reuse) as scope:
with arg_scope(
[layers_lib.batch_norm, layers_lib.dropout], is_training=is_training):
net, end_points = inception_v2_base(
inputs,
scope=scope,
min_depth=min_depth,
depth_multiplier=depth_multiplier)
with variable_scope.variable_scope('Logits'):
kernel_size = _reduced_kernel_size_for_small_input(net, [7, 7])
net = layers_lib.avg_pool2d(
net,
kernel_size,
padding='VALID',
scope='AvgPool_1a_{}x{}'.format(*kernel_size))
# 1 x 1 x 1024
net = layers_lib.dropout(
net, keep_prob=dropout_keep_prob, scope='Dropout_1b')
logits = layers.conv2d(
net,
num_classes, [1, 1],
activation_fn=None,
normalizer_fn=None,
scope='Conv2d_1c_1x1')
if spatial_squeeze:
logits = array_ops.squeeze(logits, [1, 2], name='SpatialSqueeze')
end_points['Logits'] = logits
end_points['Predictions'] = prediction_fn(logits, scope='Predictions')
return logits, end_points
inception_v2.default_image_size = 224
def _reduced_kernel_size_for_small_input(input_tensor, kernel_size):
"""Define kernel size which is automatically reduced for small input.
If the shape of the input images is unknown at graph construction time this
function assumes that the input images are is large enough.
Args:
input_tensor: input tensor of size [batch_size, height, width, channels].
kernel_size: desired kernel size of length 2: [kernel_height, kernel_width]
Returns:
a tensor with the kernel size.
TODO(jrru): Make this function work with unknown shapes. Theoretically, this
can be done with the code below. Problems are two-fold: (1) If the shape was
known, it will be lost. (2) inception.slim.ops._two_element_tuple cannot
handle tensors that define the kernel size.
shape = tf.shape(input_tensor)
return = tf.stack([tf.minimum(shape[1], kernel_size[0]),
tf.minimum(shape[2], kernel_size[1])])
"""
shape = input_tensor.get_shape().as_list()
if shape[1] is None or shape[2] is None:
kernel_size_out = kernel_size
else:
kernel_size_out = [
min(shape[1], kernel_size[0]), min(shape[2], kernel_size[1])
]
return kernel_size_out
def inception_v2_arg_scope(weight_decay=0.00004,
batch_norm_var_collection='moving_vars'):
"""Defines the default InceptionV2 arg scope.
Args:
weight_decay: The weight decay to use for regularizing the model.
batch_norm_var_collection: The name of the collection for the batch norm
variables.
Returns:
An `arg_scope` to use for the inception v3 model.
"""
batch_norm_params = {
# Decay for the moving averages.
'decay': 0.9997,
# epsilon to prevent 0s in variance.
'epsilon': 0.001,
# collection containing update_ops.
'updates_collections': ops.GraphKeys.UPDATE_OPS,
# collection containing the moving mean and moving variance.
'variables_collections': {
'beta': None,
'gamma': None,
'moving_mean': [batch_norm_var_collection],
'moving_variance': [batch_norm_var_collection],
}
}
# Set weight_decay for weights in Conv and FC layers.
with arg_scope(
[layers.conv2d, layers_lib.fully_connected],
weights_regularizer=regularizers.l2_regularizer(weight_decay)):
with arg_scope(
[layers.conv2d],
weights_initializer=initializers.variance_scaling_initializer(),
activation_fn=nn_ops.relu,
normalizer_fn=layers_lib.batch_norm,
normalizer_params=batch_norm_params) as sc:
return sc
| apache-2.0 | -6,959,975,744,027,337,000 | 2,691,302,059,847,046,700 | 40.989114 | 80 | 0.585244 | false |
shineyear/catawampus | tr/persistobj_test.py | 6 | 5542 | #!/usr/bin/python
# Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# unittest requires method names starting in 'test'
#pylint: disable-msg=C6409
"""Unit tests for persistobj.py."""
__author__ = 'dgentry@google.com (Denton Gentry)'
import os
import shutil
import tempfile
import unittest
import google3
import persistobj
class PersistentObjectTest(unittest.TestCase):
"""Tests for persistobj.py PersistentObject."""
def setUp(self):
self.tmpdir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.tmpdir)
def testPersistentObjectAttrs(self):
kwargs = {'foo1': 'bar1', 'foo2': 'bar2', 'foo3': 3}
tobj = persistobj.PersistentObject(self.tmpdir, 'TestObj', **kwargs)
self.assertEqual(tobj.foo1, 'bar1')
self.assertEqual(tobj.foo2, 'bar2')
self.assertEqual(tobj.foo3, 3)
def testReversibleEncoding(self):
kwargs = dict(foo1='bar1', foo3=3)
tobj = persistobj.PersistentObject(self.tmpdir, 'TestObj', **kwargs)
encoded = tobj._ToJson()
decoded = tobj._FromJson(encoded)
self.assertEqual(sorted(kwargs.items()), sorted(decoded.items()))
def testWriteToFile(self):
kwargs = dict(foo1='bar1', foo3=3)
tobj = persistobj.PersistentObject(self.tmpdir, 'TestObj', **kwargs)
encoded = open(tobj.filename).read()
decoded = tobj._FromJson(encoded)
self.assertEqual(sorted(kwargs.items()), sorted(decoded.items()))
def testReadFromFile(self):
contents = '{"foo": "bar", "baz": 4}'
with tempfile.NamedTemporaryFile(dir=self.tmpdir, delete=False) as f:
f.write(contents)
f.close()
tobj = persistobj.PersistentObject(self.tmpdir, 'TestObj',
filename=f.name)
self.assertEqual(tobj.foo, 'bar')
self.assertEqual(tobj.baz, 4)
def testReadFromCorruptFile(self):
contents = 'this is not a JSON file'
f = tempfile.NamedTemporaryFile(dir=self.tmpdir, delete=False)
f.write(contents)
f.close()
self.assertRaises(ValueError, persistobj.PersistentObject,
self.tmpdir, 'TestObj', filename=f.name)
def testUpdate(self):
kwargs = dict(foo1='bar1', foo3=3)
tobj = persistobj.PersistentObject(self.tmpdir, 'TestObj', **kwargs)
tobj2 = persistobj.PersistentObject(self.tmpdir, 'TestObj',
filename=tobj.filename)
self.assertEqual(list(sorted(tobj.items())), list(sorted(tobj2.items())))
kwargs['foo1'] = 'bar2'
tobj.Update(**kwargs)
tobj3 = persistobj.PersistentObject(self.tmpdir, 'TestObj',
filename=tobj.filename)
self.assertEqual(list(sorted(tobj.items())), list(sorted(tobj3.items())))
def testUpdateInline(self):
kwargs = dict(foo1='bar1', foo3=3)
tobj = persistobj.PersistentObject(self.tmpdir, 'TestObj', **kwargs)
tobj.Update(foo1='bar2')
self.assertEqual(tobj.foo1, 'bar2')
def testUpdateInlineMultiple(self):
kwargs = dict(foo1='bar1', foo3=3)
tobj = persistobj.PersistentObject(self.tmpdir, 'TestObj', **kwargs)
tobj.Update(foo1='bar2', foo3=4)
self.assertEqual(tobj.foo1, 'bar2')
self.assertEqual(tobj.foo3, 4)
def testUpdateInlineDict(self):
kwargs = dict(foo1='bar1', foo3=3)
tobj = persistobj.PersistentObject(self.tmpdir, 'TestObj', **kwargs)
tobj.Update(**dict(foo1='bar2'))
self.assertEqual(tobj.foo1, 'bar2')
def testUpdateFails(self):
kwargs = dict(foo1='bar1', foo3=3)
tobj = persistobj.PersistentObject(self.tmpdir, 'TestObj', **kwargs)
tobj.objdir = '/this_path_should_not_exist_hijhgvWRQ4MVVSDHuheifuh'
kwargs['foo1'] = 'bar2'
self.assertRaises(OSError, tobj.Update, **kwargs)
def testGetPersistentObjects(self):
expected = ['{"foo": "bar1", "baz": 4}',
'{"foo": "bar2", "baz": 5}',
'{"foo": "bar3", "baz": 6}',
'This is not a JSON file'] # test corrupt file hanlding
for obj in expected:
with tempfile.NamedTemporaryFile(
dir=self.tmpdir, prefix='tr69_dnld', delete=False) as f:
f.write(obj)
actual = persistobj.GetPersistentObjects(self.tmpdir)
self.assertEqual(len(actual), len(expected)-1)
found = [False, False, False]
for entry in actual:
if entry.foo == 'bar1' and entry.baz == 4:
found[0] = True
if entry.foo == 'bar2' and entry.baz == 5:
found[1] = True
if entry.foo == 'bar3' and entry.baz == 6:
found[2] = True
self.assertTrue(found[0])
self.assertTrue(found[1])
self.assertTrue(found[2])
def testDefaultValue(self):
kwargs = dict(foo=3)
tobj = persistobj.PersistentObject(self.tmpdir, 'TestObj', **kwargs)
self.assertEqual(getattr(tobj, 'foo2', 2), 2)
def testDelete(self):
kwargs = dict(foo1='bar1', foo3=3)
tobj = persistobj.PersistentObject(self.tmpdir, 'TestObj', **kwargs)
tobj.Delete()
self.assertRaises(OSError, os.stat, tobj.filename)
if __name__ == '__main__':
unittest.main()
| apache-2.0 | -5,573,689,718,332,462,000 | 5,905,934,722,409,583,000 | 34.987013 | 77 | 0.662577 | false |
eusi/MissionPlanerHM | Lib/mailbox.py | 74 | 78102 | #! /usr/bin/env python
"""Read/write support for Maildir, mbox, MH, Babyl, and MMDF mailboxes."""
# Notes for authors of new mailbox subclasses:
#
# Remember to fsync() changes to disk before closing a modified file
# or returning from a flush() method. See functions _sync_flush() and
# _sync_close().
import sys
import os
import time
import calendar
import socket
import errno
import copy
import email
import email.message
import email.generator
import StringIO
try:
if sys.platform == 'os2emx':
# OS/2 EMX fcntl() not adequate
raise ImportError
import fcntl
except ImportError:
fcntl = None
import warnings
with warnings.catch_warnings():
if sys.py3kwarning:
warnings.filterwarnings("ignore", ".*rfc822 has been removed",
DeprecationWarning)
import rfc822
__all__ = [ 'Mailbox', 'Maildir', 'mbox', 'MH', 'Babyl', 'MMDF',
'Message', 'MaildirMessage', 'mboxMessage', 'MHMessage',
'BabylMessage', 'MMDFMessage', 'UnixMailbox',
'PortableUnixMailbox', 'MmdfMailbox', 'MHMailbox', 'BabylMailbox' ]
class Mailbox:
"""A group of messages in a particular place."""
def __init__(self, path, factory=None, create=True):
"""Initialize a Mailbox instance."""
self._path = os.path.abspath(os.path.expanduser(path))
self._factory = factory
def add(self, message):
"""Add message and return assigned key."""
raise NotImplementedError('Method must be implemented by subclass')
def remove(self, key):
"""Remove the keyed message; raise KeyError if it doesn't exist."""
raise NotImplementedError('Method must be implemented by subclass')
def __delitem__(self, key):
self.remove(key)
def discard(self, key):
"""If the keyed message exists, remove it."""
try:
self.remove(key)
except KeyError:
pass
def __setitem__(self, key, message):
"""Replace the keyed message; raise KeyError if it doesn't exist."""
raise NotImplementedError('Method must be implemented by subclass')
def get(self, key, default=None):
"""Return the keyed message, or default if it doesn't exist."""
try:
return self.__getitem__(key)
except KeyError:
return default
def __getitem__(self, key):
"""Return the keyed message; raise KeyError if it doesn't exist."""
if not self._factory:
return self.get_message(key)
else:
return self._factory(self.get_file(key))
def get_message(self, key):
"""Return a Message representation or raise a KeyError."""
raise NotImplementedError('Method must be implemented by subclass')
def get_string(self, key):
"""Return a string representation or raise a KeyError."""
raise NotImplementedError('Method must be implemented by subclass')
def get_file(self, key):
"""Return a file-like representation or raise a KeyError."""
raise NotImplementedError('Method must be implemented by subclass')
def iterkeys(self):
"""Return an iterator over keys."""
raise NotImplementedError('Method must be implemented by subclass')
def keys(self):
"""Return a list of keys."""
return list(self.iterkeys())
def itervalues(self):
"""Return an iterator over all messages."""
for key in self.iterkeys():
try:
value = self[key]
except KeyError:
continue
yield value
def __iter__(self):
return self.itervalues()
def values(self):
"""Return a list of messages. Memory intensive."""
return list(self.itervalues())
def iteritems(self):
"""Return an iterator over (key, message) tuples."""
for key in self.iterkeys():
try:
value = self[key]
except KeyError:
continue
yield (key, value)
def items(self):
"""Return a list of (key, message) tuples. Memory intensive."""
return list(self.iteritems())
def has_key(self, key):
"""Return True if the keyed message exists, False otherwise."""
raise NotImplementedError('Method must be implemented by subclass')
def __contains__(self, key):
return self.has_key(key)
def __len__(self):
"""Return a count of messages in the mailbox."""
raise NotImplementedError('Method must be implemented by subclass')
def clear(self):
"""Delete all messages."""
for key in self.iterkeys():
self.discard(key)
def pop(self, key, default=None):
"""Delete the keyed message and return it, or default."""
try:
result = self[key]
except KeyError:
return default
self.discard(key)
return result
def popitem(self):
"""Delete an arbitrary (key, message) pair and return it."""
for key in self.iterkeys():
return (key, self.pop(key)) # This is only run once.
else:
raise KeyError('No messages in mailbox')
def update(self, arg=None):
"""Change the messages that correspond to certain keys."""
if hasattr(arg, 'iteritems'):
source = arg.iteritems()
elif hasattr(arg, 'items'):
source = arg.items()
else:
source = arg
bad_key = False
for key, message in source:
try:
self[key] = message
except KeyError:
bad_key = True
if bad_key:
raise KeyError('No message with key(s)')
def flush(self):
"""Write any pending changes to the disk."""
raise NotImplementedError('Method must be implemented by subclass')
def lock(self):
"""Lock the mailbox."""
raise NotImplementedError('Method must be implemented by subclass')
def unlock(self):
"""Unlock the mailbox if it is locked."""
raise NotImplementedError('Method must be implemented by subclass')
def close(self):
"""Flush and close the mailbox."""
raise NotImplementedError('Method must be implemented by subclass')
def _dump_message(self, message, target, mangle_from_=False):
# Most files are opened in binary mode to allow predictable seeking.
# To get native line endings on disk, the user-friendly \n line endings
# used in strings and by email.Message are translated here.
"""Dump message contents to target file."""
if isinstance(message, email.message.Message):
buffer = StringIO.StringIO()
gen = email.generator.Generator(buffer, mangle_from_, 0)
gen.flatten(message)
buffer.seek(0)
target.write(buffer.read().replace('\n', os.linesep))
elif isinstance(message, str):
if mangle_from_:
message = message.replace('\nFrom ', '\n>From ')
message = message.replace('\n', os.linesep)
target.write(message)
elif hasattr(message, 'read'):
while True:
line = message.readline()
if line == '':
break
if mangle_from_ and line.startswith('From '):
line = '>From ' + line[5:]
line = line.replace('\n', os.linesep)
target.write(line)
else:
raise TypeError('Invalid message type: %s' % type(message))
class Maildir(Mailbox):
"""A qmail-style Maildir mailbox."""
colon = ':'
def __init__(self, dirname, factory=rfc822.Message, create=True):
"""Initialize a Maildir instance."""
Mailbox.__init__(self, dirname, factory, create)
self._paths = {
'tmp': os.path.join(self._path, 'tmp'),
'new': os.path.join(self._path, 'new'),
'cur': os.path.join(self._path, 'cur'),
}
if not os.path.exists(self._path):
if create:
os.mkdir(self._path, 0700)
for path in self._paths.values():
os.mkdir(path, 0o700)
else:
raise NoSuchMailboxError(self._path)
self._toc = {}
self._toc_mtimes = {}
for subdir in ('cur', 'new'):
self._toc_mtimes[subdir] = os.path.getmtime(self._paths[subdir])
self._last_read = time.time() # Records last time we read cur/new
self._skewfactor = 0.1 # Adjust if os/fs clocks are skewing
def add(self, message):
"""Add message and return assigned key."""
tmp_file = self._create_tmp()
try:
self._dump_message(message, tmp_file)
except BaseException:
tmp_file.close()
os.remove(tmp_file.name)
raise
_sync_close(tmp_file)
if isinstance(message, MaildirMessage):
subdir = message.get_subdir()
suffix = self.colon + message.get_info()
if suffix == self.colon:
suffix = ''
else:
subdir = 'new'
suffix = ''
uniq = os.path.basename(tmp_file.name).split(self.colon)[0]
dest = os.path.join(self._path, subdir, uniq + suffix)
try:
if hasattr(os, 'link'):
os.link(tmp_file.name, dest)
os.remove(tmp_file.name)
else:
os.rename(tmp_file.name, dest)
except OSError, e:
os.remove(tmp_file.name)
if e.errno == errno.EEXIST:
raise ExternalClashError('Name clash with existing message: %s'
% dest)
else:
raise
if isinstance(message, MaildirMessage):
os.utime(dest, (os.path.getatime(dest), message.get_date()))
return uniq
def remove(self, key):
"""Remove the keyed message; raise KeyError if it doesn't exist."""
os.remove(os.path.join(self._path, self._lookup(key)))
def discard(self, key):
"""If the keyed message exists, remove it."""
# This overrides an inapplicable implementation in the superclass.
try:
self.remove(key)
except KeyError:
pass
except OSError, e:
if e.errno != errno.ENOENT:
raise
def __setitem__(self, key, message):
"""Replace the keyed message; raise KeyError if it doesn't exist."""
old_subpath = self._lookup(key)
temp_key = self.add(message)
temp_subpath = self._lookup(temp_key)
if isinstance(message, MaildirMessage):
# temp's subdir and suffix were specified by message.
dominant_subpath = temp_subpath
else:
# temp's subdir and suffix were defaults from add().
dominant_subpath = old_subpath
subdir = os.path.dirname(dominant_subpath)
if self.colon in dominant_subpath:
suffix = self.colon + dominant_subpath.split(self.colon)[-1]
else:
suffix = ''
self.discard(key)
new_path = os.path.join(self._path, subdir, key + suffix)
os.rename(os.path.join(self._path, temp_subpath), new_path)
if isinstance(message, MaildirMessage):
os.utime(new_path, (os.path.getatime(new_path),
message.get_date()))
def get_message(self, key):
"""Return a Message representation or raise a KeyError."""
subpath = self._lookup(key)
f = open(os.path.join(self._path, subpath), 'r')
try:
if self._factory:
msg = self._factory(f)
else:
msg = MaildirMessage(f)
finally:
f.close()
subdir, name = os.path.split(subpath)
msg.set_subdir(subdir)
if self.colon in name:
msg.set_info(name.split(self.colon)[-1])
msg.set_date(os.path.getmtime(os.path.join(self._path, subpath)))
return msg
def get_string(self, key):
"""Return a string representation or raise a KeyError."""
f = open(os.path.join(self._path, self._lookup(key)), 'r')
try:
return f.read()
finally:
f.close()
def get_file(self, key):
"""Return a file-like representation or raise a KeyError."""
f = open(os.path.join(self._path, self._lookup(key)), 'rb')
return _ProxyFile(f)
def iterkeys(self):
"""Return an iterator over keys."""
self._refresh()
for key in self._toc:
try:
self._lookup(key)
except KeyError:
continue
yield key
def has_key(self, key):
"""Return True if the keyed message exists, False otherwise."""
self._refresh()
return key in self._toc
def __len__(self):
"""Return a count of messages in the mailbox."""
self._refresh()
return len(self._toc)
def flush(self):
"""Write any pending changes to disk."""
# Maildir changes are always written immediately, so there's nothing
# to do.
pass
def lock(self):
"""Lock the mailbox."""
return
def unlock(self):
"""Unlock the mailbox if it is locked."""
return
def close(self):
"""Flush and close the mailbox."""
return
def list_folders(self):
"""Return a list of folder names."""
result = []
for entry in os.listdir(self._path):
if len(entry) > 1 and entry[0] == '.' and \
os.path.isdir(os.path.join(self._path, entry)):
result.append(entry[1:])
return result
def get_folder(self, folder):
"""Return a Maildir instance for the named folder."""
return Maildir(os.path.join(self._path, '.' + folder),
factory=self._factory,
create=False)
def add_folder(self, folder):
"""Create a folder and return a Maildir instance representing it."""
path = os.path.join(self._path, '.' + folder)
result = Maildir(path, factory=self._factory)
maildirfolder_path = os.path.join(path, 'maildirfolder')
if not os.path.exists(maildirfolder_path):
os.close(os.open(maildirfolder_path, os.O_CREAT | os.O_WRONLY,
0666))
return result
def remove_folder(self, folder):
"""Delete the named folder, which must be empty."""
path = os.path.join(self._path, '.' + folder)
for entry in os.listdir(os.path.join(path, 'new')) + \
os.listdir(os.path.join(path, 'cur')):
if len(entry) < 1 or entry[0] != '.':
raise NotEmptyError('Folder contains message(s): %s' % folder)
for entry in os.listdir(path):
if entry != 'new' and entry != 'cur' and entry != 'tmp' and \
os.path.isdir(os.path.join(path, entry)):
raise NotEmptyError("Folder contains subdirectory '%s': %s" %
(folder, entry))
for root, dirs, files in os.walk(path, topdown=False):
for entry in files:
os.remove(os.path.join(root, entry))
for entry in dirs:
os.rmdir(os.path.join(root, entry))
os.rmdir(path)
def clean(self):
"""Delete old files in "tmp"."""
now = time.time()
for entry in os.listdir(os.path.join(self._path, 'tmp')):
path = os.path.join(self._path, 'tmp', entry)
if now - os.path.getatime(path) > 129600: # 60 * 60 * 36
os.remove(path)
_count = 1 # This is used to generate unique file names.
def _create_tmp(self):
"""Create a file in the tmp subdirectory and open and return it."""
now = time.time()
hostname = socket.gethostname()
if '/' in hostname:
hostname = hostname.replace('/', r'\057')
if ':' in hostname:
hostname = hostname.replace(':', r'\072')
uniq = "%s.M%sP%sQ%s.%s" % (int(now), int(now % 1 * 1e6), os.getpid(),
Maildir._count, hostname)
path = os.path.join(self._path, 'tmp', uniq)
try:
os.stat(path)
except OSError, e:
if e.errno == errno.ENOENT:
Maildir._count += 1
try:
return _create_carefully(path)
except OSError, e:
if e.errno != errno.EEXIST:
raise
else:
raise
# Fall through to here if stat succeeded or open raised EEXIST.
raise ExternalClashError('Name clash prevented file creation: %s' %
path)
def _refresh(self):
"""Update table of contents mapping."""
# If it has been less than two seconds since the last _refresh() call,
# we have to unconditionally re-read the mailbox just in case it has
# been modified, because os.path.mtime() has a 2 sec resolution in the
# most common worst case (FAT) and a 1 sec resolution typically. This
# results in a few unnecessary re-reads when _refresh() is called
# multiple times in that interval, but once the clock ticks over, we
# will only re-read as needed. Because the filesystem might be being
# served by an independent system with its own clock, we record and
# compare with the mtimes from the filesystem. Because the other
# system's clock might be skewing relative to our clock, we add an
# extra delta to our wait. The default is one tenth second, but is an
# instance variable and so can be adjusted if dealing with a
# particularly skewed or irregular system.
if time.time() - self._last_read > 2 + self._skewfactor:
refresh = False
for subdir in self._toc_mtimes:
mtime = os.path.getmtime(self._paths[subdir])
if mtime > self._toc_mtimes[subdir]:
refresh = True
self._toc_mtimes[subdir] = mtime
if not refresh:
return
# Refresh toc
self._toc = {}
for subdir in self._toc_mtimes:
path = self._paths[subdir]
for entry in os.listdir(path):
p = os.path.join(path, entry)
if os.path.isdir(p):
continue
uniq = entry.split(self.colon)[0]
self._toc[uniq] = os.path.join(subdir, entry)
self._last_read = time.time()
def _lookup(self, key):
"""Use TOC to return subpath for given key, or raise a KeyError."""
try:
if os.path.exists(os.path.join(self._path, self._toc[key])):
return self._toc[key]
except KeyError:
pass
self._refresh()
try:
return self._toc[key]
except KeyError:
raise KeyError('No message with key: %s' % key)
# This method is for backward compatibility only.
def next(self):
"""Return the next message in a one-time iteration."""
if not hasattr(self, '_onetime_keys'):
self._onetime_keys = self.iterkeys()
while True:
try:
return self[self._onetime_keys.next()]
except StopIteration:
return None
except KeyError:
continue
class _singlefileMailbox(Mailbox):
"""A single-file mailbox."""
def __init__(self, path, factory=None, create=True):
"""Initialize a single-file mailbox."""
Mailbox.__init__(self, path, factory, create)
try:
f = open(self._path, 'rb+')
except IOError, e:
if e.errno == errno.ENOENT:
if create:
f = open(self._path, 'wb+')
else:
raise NoSuchMailboxError(self._path)
elif e.errno in (errno.EACCES, errno.EROFS):
f = open(self._path, 'rb')
else:
raise
self._file = f
self._toc = None
self._next_key = 0
self._pending = False # No changes require rewriting the file.
self._locked = False
self._file_length = None # Used to record mailbox size
def add(self, message):
"""Add message and return assigned key."""
self._lookup()
self._toc[self._next_key] = self._append_message(message)
self._next_key += 1
self._pending = True
return self._next_key - 1
def remove(self, key):
"""Remove the keyed message; raise KeyError if it doesn't exist."""
self._lookup(key)
del self._toc[key]
self._pending = True
def __setitem__(self, key, message):
"""Replace the keyed message; raise KeyError if it doesn't exist."""
self._lookup(key)
self._toc[key] = self._append_message(message)
self._pending = True
def iterkeys(self):
"""Return an iterator over keys."""
self._lookup()
for key in self._toc.keys():
yield key
def has_key(self, key):
"""Return True if the keyed message exists, False otherwise."""
self._lookup()
return key in self._toc
def __len__(self):
"""Return a count of messages in the mailbox."""
self._lookup()
return len(self._toc)
def lock(self):
"""Lock the mailbox."""
if not self._locked:
_lock_file(self._file)
self._locked = True
def unlock(self):
"""Unlock the mailbox if it is locked."""
if self._locked:
_unlock_file(self._file)
self._locked = False
def flush(self):
"""Write any pending changes to disk."""
if not self._pending:
return
# In order to be writing anything out at all, self._toc must
# already have been generated (and presumably has been modified
# by adding or deleting an item).
assert self._toc is not None
# Check length of self._file; if it's changed, some other process
# has modified the mailbox since we scanned it.
self._file.seek(0, 2)
cur_len = self._file.tell()
if cur_len != self._file_length:
raise ExternalClashError('Size of mailbox file changed '
'(expected %i, found %i)' %
(self._file_length, cur_len))
new_file = _create_temporary(self._path)
try:
new_toc = {}
self._pre_mailbox_hook(new_file)
for key in sorted(self._toc.keys()):
start, stop = self._toc[key]
self._file.seek(start)
self._pre_message_hook(new_file)
new_start = new_file.tell()
while True:
buffer = self._file.read(min(4096,
stop - self._file.tell()))
if buffer == '':
break
new_file.write(buffer)
new_toc[key] = (new_start, new_file.tell())
self._post_message_hook(new_file)
except:
new_file.close()
os.remove(new_file.name)
raise
_sync_close(new_file)
# self._file is about to get replaced, so no need to sync.
self._file.close()
try:
os.rename(new_file.name, self._path)
except OSError, e:
if e.errno == errno.EEXIST or \
(os.name == 'os2' and e.errno == errno.EACCES):
os.remove(self._path)
os.rename(new_file.name, self._path)
else:
raise
self._file = open(self._path, 'rb+')
self._toc = new_toc
self._pending = False
if self._locked:
_lock_file(self._file, dotlock=False)
def _pre_mailbox_hook(self, f):
"""Called before writing the mailbox to file f."""
return
def _pre_message_hook(self, f):
"""Called before writing each message to file f."""
return
def _post_message_hook(self, f):
"""Called after writing each message to file f."""
return
def close(self):
"""Flush and close the mailbox."""
self.flush()
if self._locked:
self.unlock()
self._file.close() # Sync has been done by self.flush() above.
def _lookup(self, key=None):
"""Return (start, stop) or raise KeyError."""
if self._toc is None:
self._generate_toc()
if key is not None:
try:
return self._toc[key]
except KeyError:
raise KeyError('No message with key: %s' % key)
def _append_message(self, message):
"""Append message to mailbox and return (start, stop) offsets."""
self._file.seek(0, 2)
before = self._file.tell()
try:
self._pre_message_hook(self._file)
offsets = self._install_message(message)
self._post_message_hook(self._file)
except BaseException:
self._file.truncate(before)
raise
self._file.flush()
self._file_length = self._file.tell() # Record current length of mailbox
return offsets
class _mboxMMDF(_singlefileMailbox):
"""An mbox or MMDF mailbox."""
_mangle_from_ = True
def get_message(self, key):
"""Return a Message representation or raise a KeyError."""
start, stop = self._lookup(key)
self._file.seek(start)
from_line = self._file.readline().replace(os.linesep, '')
string = self._file.read(stop - self._file.tell())
msg = self._message_factory(string.replace(os.linesep, '\n'))
msg.set_from(from_line[5:])
return msg
def get_string(self, key, from_=False):
"""Return a string representation or raise a KeyError."""
start, stop = self._lookup(key)
self._file.seek(start)
if not from_:
self._file.readline()
string = self._file.read(stop - self._file.tell())
return string.replace(os.linesep, '\n')
def get_file(self, key, from_=False):
"""Return a file-like representation or raise a KeyError."""
start, stop = self._lookup(key)
self._file.seek(start)
if not from_:
self._file.readline()
return _PartialFile(self._file, self._file.tell(), stop)
def _install_message(self, message):
"""Format a message and blindly write to self._file."""
from_line = None
if isinstance(message, str) and message.startswith('From '):
newline = message.find('\n')
if newline != -1:
from_line = message[:newline]
message = message[newline + 1:]
else:
from_line = message
message = ''
elif isinstance(message, _mboxMMDFMessage):
from_line = 'From ' + message.get_from()
elif isinstance(message, email.message.Message):
from_line = message.get_unixfrom() # May be None.
if from_line is None:
from_line = 'From MAILER-DAEMON %s' % time.asctime(time.gmtime())
start = self._file.tell()
self._file.write(from_line + os.linesep)
self._dump_message(message, self._file, self._mangle_from_)
stop = self._file.tell()
return (start, stop)
class mbox(_mboxMMDF):
"""A classic mbox mailbox."""
_mangle_from_ = True
def __init__(self, path, factory=None, create=True):
"""Initialize an mbox mailbox."""
self._message_factory = mboxMessage
_mboxMMDF.__init__(self, path, factory, create)
def _pre_message_hook(self, f):
"""Called before writing each message to file f."""
if f.tell() != 0:
f.write(os.linesep)
def _generate_toc(self):
"""Generate key-to-(start, stop) table of contents."""
starts, stops = [], []
self._file.seek(0)
while True:
line_pos = self._file.tell()
line = self._file.readline()
if line.startswith('From '):
if len(stops) < len(starts):
stops.append(line_pos - len(os.linesep))
starts.append(line_pos)
elif line == '':
stops.append(line_pos)
break
self._toc = dict(enumerate(zip(starts, stops)))
self._next_key = len(self._toc)
self._file_length = self._file.tell()
class MMDF(_mboxMMDF):
"""An MMDF mailbox."""
def __init__(self, path, factory=None, create=True):
"""Initialize an MMDF mailbox."""
self._message_factory = MMDFMessage
_mboxMMDF.__init__(self, path, factory, create)
def _pre_message_hook(self, f):
"""Called before writing each message to file f."""
f.write('\001\001\001\001' + os.linesep)
def _post_message_hook(self, f):
"""Called after writing each message to file f."""
f.write(os.linesep + '\001\001\001\001' + os.linesep)
def _generate_toc(self):
"""Generate key-to-(start, stop) table of contents."""
starts, stops = [], []
self._file.seek(0)
next_pos = 0
while True:
line_pos = next_pos
line = self._file.readline()
next_pos = self._file.tell()
if line.startswith('\001\001\001\001' + os.linesep):
starts.append(next_pos)
while True:
line_pos = next_pos
line = self._file.readline()
next_pos = self._file.tell()
if line == '\001\001\001\001' + os.linesep:
stops.append(line_pos - len(os.linesep))
break
elif line == '':
stops.append(line_pos)
break
elif line == '':
break
self._toc = dict(enumerate(zip(starts, stops)))
self._next_key = len(self._toc)
self._file.seek(0, 2)
self._file_length = self._file.tell()
class MH(Mailbox):
"""An MH mailbox."""
def __init__(self, path, factory=None, create=True):
"""Initialize an MH instance."""
Mailbox.__init__(self, path, factory, create)
if not os.path.exists(self._path):
if create:
os.mkdir(self._path, 0700)
os.close(os.open(os.path.join(self._path, '.mh_sequences'),
os.O_CREAT | os.O_EXCL | os.O_WRONLY, 0600))
else:
raise NoSuchMailboxError(self._path)
self._locked = False
def add(self, message):
"""Add message and return assigned key."""
keys = self.keys()
if len(keys) == 0:
new_key = 1
else:
new_key = max(keys) + 1
new_path = os.path.join(self._path, str(new_key))
f = _create_carefully(new_path)
closed = False
try:
if self._locked:
_lock_file(f)
try:
try:
self._dump_message(message, f)
except BaseException:
# Unlock and close so it can be deleted on Windows
if self._locked:
_unlock_file(f)
_sync_close(f)
closed = True
os.remove(new_path)
raise
if isinstance(message, MHMessage):
self._dump_sequences(message, new_key)
finally:
if self._locked:
_unlock_file(f)
finally:
if not closed:
_sync_close(f)
return new_key
def remove(self, key):
"""Remove the keyed message; raise KeyError if it doesn't exist."""
path = os.path.join(self._path, str(key))
try:
f = open(path, 'rb+')
except IOError, e:
if e.errno == errno.ENOENT:
raise KeyError('No message with key: %s' % key)
else:
raise
else:
f.close()
os.remove(path)
def __setitem__(self, key, message):
"""Replace the keyed message; raise KeyError if it doesn't exist."""
path = os.path.join(self._path, str(key))
try:
f = open(path, 'rb+')
except IOError, e:
if e.errno == errno.ENOENT:
raise KeyError('No message with key: %s' % key)
else:
raise
try:
if self._locked:
_lock_file(f)
try:
os.close(os.open(path, os.O_WRONLY | os.O_TRUNC))
self._dump_message(message, f)
if isinstance(message, MHMessage):
self._dump_sequences(message, key)
finally:
if self._locked:
_unlock_file(f)
finally:
_sync_close(f)
def get_message(self, key):
"""Return a Message representation or raise a KeyError."""
try:
if self._locked:
f = open(os.path.join(self._path, str(key)), 'r+')
else:
f = open(os.path.join(self._path, str(key)), 'r')
except IOError, e:
if e.errno == errno.ENOENT:
raise KeyError('No message with key: %s' % key)
else:
raise
try:
if self._locked:
_lock_file(f)
try:
msg = MHMessage(f)
finally:
if self._locked:
_unlock_file(f)
finally:
f.close()
for name, key_list in self.get_sequences().iteritems():
if key in key_list:
msg.add_sequence(name)
return msg
def get_string(self, key):
"""Return a string representation or raise a KeyError."""
try:
if self._locked:
f = open(os.path.join(self._path, str(key)), 'r+')
else:
f = open(os.path.join(self._path, str(key)), 'r')
except IOError, e:
if e.errno == errno.ENOENT:
raise KeyError('No message with key: %s' % key)
else:
raise
try:
if self._locked:
_lock_file(f)
try:
return f.read()
finally:
if self._locked:
_unlock_file(f)
finally:
f.close()
def get_file(self, key):
"""Return a file-like representation or raise a KeyError."""
try:
f = open(os.path.join(self._path, str(key)), 'rb')
except IOError, e:
if e.errno == errno.ENOENT:
raise KeyError('No message with key: %s' % key)
else:
raise
return _ProxyFile(f)
def iterkeys(self):
"""Return an iterator over keys."""
return iter(sorted(int(entry) for entry in os.listdir(self._path)
if entry.isdigit()))
def has_key(self, key):
"""Return True if the keyed message exists, False otherwise."""
return os.path.exists(os.path.join(self._path, str(key)))
def __len__(self):
"""Return a count of messages in the mailbox."""
return len(list(self.iterkeys()))
def lock(self):
"""Lock the mailbox."""
if not self._locked:
self._file = open(os.path.join(self._path, '.mh_sequences'), 'rb+')
_lock_file(self._file)
self._locked = True
def unlock(self):
"""Unlock the mailbox if it is locked."""
if self._locked:
_unlock_file(self._file)
_sync_close(self._file)
del self._file
self._locked = False
def flush(self):
"""Write any pending changes to the disk."""
return
def close(self):
"""Flush and close the mailbox."""
if self._locked:
self.unlock()
def list_folders(self):
"""Return a list of folder names."""
result = []
for entry in os.listdir(self._path):
if os.path.isdir(os.path.join(self._path, entry)):
result.append(entry)
return result
def get_folder(self, folder):
"""Return an MH instance for the named folder."""
return MH(os.path.join(self._path, folder),
factory=self._factory, create=False)
def add_folder(self, folder):
"""Create a folder and return an MH instance representing it."""
return MH(os.path.join(self._path, folder),
factory=self._factory)
def remove_folder(self, folder):
"""Delete the named folder, which must be empty."""
path = os.path.join(self._path, folder)
entries = os.listdir(path)
if entries == ['.mh_sequences']:
os.remove(os.path.join(path, '.mh_sequences'))
elif entries == []:
pass
else:
raise NotEmptyError('Folder not empty: %s' % self._path)
os.rmdir(path)
def get_sequences(self):
"""Return a name-to-key-list dictionary to define each sequence."""
results = {}
f = open(os.path.join(self._path, '.mh_sequences'), 'r')
try:
all_keys = set(self.keys())
for line in f:
try:
name, contents = line.split(':')
keys = set()
for spec in contents.split():
if spec.isdigit():
keys.add(int(spec))
else:
start, stop = (int(x) for x in spec.split('-'))
keys.update(range(start, stop + 1))
results[name] = [key for key in sorted(keys) \
if key in all_keys]
if len(results[name]) == 0:
del results[name]
except ValueError:
raise FormatError('Invalid sequence specification: %s' %
line.rstrip())
finally:
f.close()
return results
def set_sequences(self, sequences):
"""Set sequences using the given name-to-key-list dictionary."""
f = open(os.path.join(self._path, '.mh_sequences'), 'r+')
try:
os.close(os.open(f.name, os.O_WRONLY | os.O_TRUNC))
for name, keys in sequences.iteritems():
if len(keys) == 0:
continue
f.write('%s:' % name)
prev = None
completing = False
for key in sorted(set(keys)):
if key - 1 == prev:
if not completing:
completing = True
f.write('-')
elif completing:
completing = False
f.write('%s %s' % (prev, key))
else:
f.write(' %s' % key)
prev = key
if completing:
f.write(str(prev) + '\n')
else:
f.write('\n')
finally:
_sync_close(f)
def pack(self):
"""Re-name messages to eliminate numbering gaps. Invalidates keys."""
sequences = self.get_sequences()
prev = 0
changes = []
for key in self.iterkeys():
if key - 1 != prev:
changes.append((key, prev + 1))
if hasattr(os, 'link'):
os.link(os.path.join(self._path, str(key)),
os.path.join(self._path, str(prev + 1)))
os.unlink(os.path.join(self._path, str(key)))
else:
os.rename(os.path.join(self._path, str(key)),
os.path.join(self._path, str(prev + 1)))
prev += 1
self._next_key = prev + 1
if len(changes) == 0:
return
for name, key_list in sequences.items():
for old, new in changes:
if old in key_list:
key_list[key_list.index(old)] = new
self.set_sequences(sequences)
def _dump_sequences(self, message, key):
"""Inspect a new MHMessage and update sequences appropriately."""
pending_sequences = message.get_sequences()
all_sequences = self.get_sequences()
for name, key_list in all_sequences.iteritems():
if name in pending_sequences:
key_list.append(key)
elif key in key_list:
del key_list[key_list.index(key)]
for sequence in pending_sequences:
if sequence not in all_sequences:
all_sequences[sequence] = [key]
self.set_sequences(all_sequences)
class Babyl(_singlefileMailbox):
"""An Rmail-style Babyl mailbox."""
_special_labels = frozenset(('unseen', 'deleted', 'filed', 'answered',
'forwarded', 'edited', 'resent'))
def __init__(self, path, factory=None, create=True):
"""Initialize a Babyl mailbox."""
_singlefileMailbox.__init__(self, path, factory, create)
self._labels = {}
def add(self, message):
"""Add message and return assigned key."""
key = _singlefileMailbox.add(self, message)
if isinstance(message, BabylMessage):
self._labels[key] = message.get_labels()
return key
def remove(self, key):
"""Remove the keyed message; raise KeyError if it doesn't exist."""
_singlefileMailbox.remove(self, key)
if key in self._labels:
del self._labels[key]
def __setitem__(self, key, message):
"""Replace the keyed message; raise KeyError if it doesn't exist."""
_singlefileMailbox.__setitem__(self, key, message)
if isinstance(message, BabylMessage):
self._labels[key] = message.get_labels()
def get_message(self, key):
"""Return a Message representation or raise a KeyError."""
start, stop = self._lookup(key)
self._file.seek(start)
self._file.readline() # Skip '1,' line specifying labels.
original_headers = StringIO.StringIO()
while True:
line = self._file.readline()
if line == '*** EOOH ***' + os.linesep or line == '':
break
original_headers.write(line.replace(os.linesep, '\n'))
visible_headers = StringIO.StringIO()
while True:
line = self._file.readline()
if line == os.linesep or line == '':
break
visible_headers.write(line.replace(os.linesep, '\n'))
body = self._file.read(stop - self._file.tell()).replace(os.linesep,
'\n')
msg = BabylMessage(original_headers.getvalue() + body)
msg.set_visible(visible_headers.getvalue())
if key in self._labels:
msg.set_labels(self._labels[key])
return msg
def get_string(self, key):
"""Return a string representation or raise a KeyError."""
start, stop = self._lookup(key)
self._file.seek(start)
self._file.readline() # Skip '1,' line specifying labels.
original_headers = StringIO.StringIO()
while True:
line = self._file.readline()
if line == '*** EOOH ***' + os.linesep or line == '':
break
original_headers.write(line.replace(os.linesep, '\n'))
while True:
line = self._file.readline()
if line == os.linesep or line == '':
break
return original_headers.getvalue() + \
self._file.read(stop - self._file.tell()).replace(os.linesep,
'\n')
def get_file(self, key):
"""Return a file-like representation or raise a KeyError."""
return StringIO.StringIO(self.get_string(key).replace('\n',
os.linesep))
def get_labels(self):
"""Return a list of user-defined labels in the mailbox."""
self._lookup()
labels = set()
for label_list in self._labels.values():
labels.update(label_list)
labels.difference_update(self._special_labels)
return list(labels)
def _generate_toc(self):
"""Generate key-to-(start, stop) table of contents."""
starts, stops = [], []
self._file.seek(0)
next_pos = 0
label_lists = []
while True:
line_pos = next_pos
line = self._file.readline()
next_pos = self._file.tell()
if line == '\037\014' + os.linesep:
if len(stops) < len(starts):
stops.append(line_pos - len(os.linesep))
starts.append(next_pos)
labels = [label.strip() for label
in self._file.readline()[1:].split(',')
if label.strip() != '']
label_lists.append(labels)
elif line == '\037' or line == '\037' + os.linesep:
if len(stops) < len(starts):
stops.append(line_pos - len(os.linesep))
elif line == '':
stops.append(line_pos - len(os.linesep))
break
self._toc = dict(enumerate(zip(starts, stops)))
self._labels = dict(enumerate(label_lists))
self._next_key = len(self._toc)
self._file.seek(0, 2)
self._file_length = self._file.tell()
def _pre_mailbox_hook(self, f):
"""Called before writing the mailbox to file f."""
f.write('BABYL OPTIONS:%sVersion: 5%sLabels:%s%s\037' %
(os.linesep, os.linesep, ','.join(self.get_labels()),
os.linesep))
def _pre_message_hook(self, f):
"""Called before writing each message to file f."""
f.write('\014' + os.linesep)
def _post_message_hook(self, f):
"""Called after writing each message to file f."""
f.write(os.linesep + '\037')
def _install_message(self, message):
"""Write message contents and return (start, stop)."""
start = self._file.tell()
if isinstance(message, BabylMessage):
special_labels = []
labels = []
for label in message.get_labels():
if label in self._special_labels:
special_labels.append(label)
else:
labels.append(label)
self._file.write('1')
for label in special_labels:
self._file.write(', ' + label)
self._file.write(',,')
for label in labels:
self._file.write(' ' + label + ',')
self._file.write(os.linesep)
else:
self._file.write('1,,' + os.linesep)
if isinstance(message, email.message.Message):
orig_buffer = StringIO.StringIO()
orig_generator = email.generator.Generator(orig_buffer, False, 0)
orig_generator.flatten(message)
orig_buffer.seek(0)
while True:
line = orig_buffer.readline()
self._file.write(line.replace('\n', os.linesep))
if line == '\n' or line == '':
break
self._file.write('*** EOOH ***' + os.linesep)
if isinstance(message, BabylMessage):
vis_buffer = StringIO.StringIO()
vis_generator = email.generator.Generator(vis_buffer, False, 0)
vis_generator.flatten(message.get_visible())
while True:
line = vis_buffer.readline()
self._file.write(line.replace('\n', os.linesep))
if line == '\n' or line == '':
break
else:
orig_buffer.seek(0)
while True:
line = orig_buffer.readline()
self._file.write(line.replace('\n', os.linesep))
if line == '\n' or line == '':
break
while True:
buffer = orig_buffer.read(4096) # Buffer size is arbitrary.
if buffer == '':
break
self._file.write(buffer.replace('\n', os.linesep))
elif isinstance(message, str):
body_start = message.find('\n\n') + 2
if body_start - 2 != -1:
self._file.write(message[:body_start].replace('\n',
os.linesep))
self._file.write('*** EOOH ***' + os.linesep)
self._file.write(message[:body_start].replace('\n',
os.linesep))
self._file.write(message[body_start:].replace('\n',
os.linesep))
else:
self._file.write('*** EOOH ***' + os.linesep + os.linesep)
self._file.write(message.replace('\n', os.linesep))
elif hasattr(message, 'readline'):
original_pos = message.tell()
first_pass = True
while True:
line = message.readline()
self._file.write(line.replace('\n', os.linesep))
if line == '\n' or line == '':
self._file.write('*** EOOH ***' + os.linesep)
if first_pass:
first_pass = False
message.seek(original_pos)
else:
break
while True:
buffer = message.read(4096) # Buffer size is arbitrary.
if buffer == '':
break
self._file.write(buffer.replace('\n', os.linesep))
else:
raise TypeError('Invalid message type: %s' % type(message))
stop = self._file.tell()
return (start, stop)
class Message(email.message.Message):
"""Message with mailbox-format-specific properties."""
def __init__(self, message=None):
"""Initialize a Message instance."""
if isinstance(message, email.message.Message):
self._become_message(copy.deepcopy(message))
if isinstance(message, Message):
message._explain_to(self)
elif isinstance(message, str):
self._become_message(email.message_from_string(message))
elif hasattr(message, "read"):
self._become_message(email.message_from_file(message))
elif message is None:
email.message.Message.__init__(self)
else:
raise TypeError('Invalid message type: %s' % type(message))
def _become_message(self, message):
"""Assume the non-format-specific state of message."""
for name in ('_headers', '_unixfrom', '_payload', '_charset',
'preamble', 'epilogue', 'defects', '_default_type'):
self.__dict__[name] = message.__dict__[name]
def _explain_to(self, message):
"""Copy format-specific state to message insofar as possible."""
if isinstance(message, Message):
return # There's nothing format-specific to explain.
else:
raise TypeError('Cannot convert to specified type')
class MaildirMessage(Message):
"""Message with Maildir-specific properties."""
def __init__(self, message=None):
"""Initialize a MaildirMessage instance."""
self._subdir = 'new'
self._info = ''
self._date = time.time()
Message.__init__(self, message)
def get_subdir(self):
"""Return 'new' or 'cur'."""
return self._subdir
def set_subdir(self, subdir):
"""Set subdir to 'new' or 'cur'."""
if subdir == 'new' or subdir == 'cur':
self._subdir = subdir
else:
raise ValueError("subdir must be 'new' or 'cur': %s" % subdir)
def get_flags(self):
"""Return as a string the flags that are set."""
if self._info.startswith('2,'):
return self._info[2:]
else:
return ''
def set_flags(self, flags):
"""Set the given flags and unset all others."""
self._info = '2,' + ''.join(sorted(flags))
def add_flag(self, flag):
"""Set the given flag(s) without changing others."""
self.set_flags(''.join(set(self.get_flags()) | set(flag)))
def remove_flag(self, flag):
"""Unset the given string flag(s) without changing others."""
if self.get_flags() != '':
self.set_flags(''.join(set(self.get_flags()) - set(flag)))
def get_date(self):
"""Return delivery date of message, in seconds since the epoch."""
return self._date
def set_date(self, date):
"""Set delivery date of message, in seconds since the epoch."""
try:
self._date = float(date)
except ValueError:
raise TypeError("can't convert to float: %s" % date)
def get_info(self):
"""Get the message's "info" as a string."""
return self._info
def set_info(self, info):
"""Set the message's "info" string."""
if isinstance(info, str):
self._info = info
else:
raise TypeError('info must be a string: %s' % type(info))
def _explain_to(self, message):
"""Copy Maildir-specific state to message insofar as possible."""
if isinstance(message, MaildirMessage):
message.set_flags(self.get_flags())
message.set_subdir(self.get_subdir())
message.set_date(self.get_date())
elif isinstance(message, _mboxMMDFMessage):
flags = set(self.get_flags())
if 'S' in flags:
message.add_flag('R')
if self.get_subdir() == 'cur':
message.add_flag('O')
if 'T' in flags:
message.add_flag('D')
if 'F' in flags:
message.add_flag('F')
if 'R' in flags:
message.add_flag('A')
message.set_from('MAILER-DAEMON', time.gmtime(self.get_date()))
elif isinstance(message, MHMessage):
flags = set(self.get_flags())
if 'S' not in flags:
message.add_sequence('unseen')
if 'R' in flags:
message.add_sequence('replied')
if 'F' in flags:
message.add_sequence('flagged')
elif isinstance(message, BabylMessage):
flags = set(self.get_flags())
if 'S' not in flags:
message.add_label('unseen')
if 'T' in flags:
message.add_label('deleted')
if 'R' in flags:
message.add_label('answered')
if 'P' in flags:
message.add_label('forwarded')
elif isinstance(message, Message):
pass
else:
raise TypeError('Cannot convert to specified type: %s' %
type(message))
class _mboxMMDFMessage(Message):
"""Message with mbox- or MMDF-specific properties."""
def __init__(self, message=None):
"""Initialize an mboxMMDFMessage instance."""
self.set_from('MAILER-DAEMON', True)
if isinstance(message, email.message.Message):
unixfrom = message.get_unixfrom()
if unixfrom is not None and unixfrom.startswith('From '):
self.set_from(unixfrom[5:])
Message.__init__(self, message)
def get_from(self):
"""Return contents of "From " line."""
return self._from
def set_from(self, from_, time_=None):
"""Set "From " line, formatting and appending time_ if specified."""
if time_ is not None:
if time_ is True:
time_ = time.gmtime()
from_ += ' ' + time.asctime(time_)
self._from = from_
def get_flags(self):
"""Return as a string the flags that are set."""
return self.get('Status', '') + self.get('X-Status', '')
def set_flags(self, flags):
"""Set the given flags and unset all others."""
flags = set(flags)
status_flags, xstatus_flags = '', ''
for flag in ('R', 'O'):
if flag in flags:
status_flags += flag
flags.remove(flag)
for flag in ('D', 'F', 'A'):
if flag in flags:
xstatus_flags += flag
flags.remove(flag)
xstatus_flags += ''.join(sorted(flags))
try:
self.replace_header('Status', status_flags)
except KeyError:
self.add_header('Status', status_flags)
try:
self.replace_header('X-Status', xstatus_flags)
except KeyError:
self.add_header('X-Status', xstatus_flags)
def add_flag(self, flag):
"""Set the given flag(s) without changing others."""
self.set_flags(''.join(set(self.get_flags()) | set(flag)))
def remove_flag(self, flag):
"""Unset the given string flag(s) without changing others."""
if 'Status' in self or 'X-Status' in self:
self.set_flags(''.join(set(self.get_flags()) - set(flag)))
def _explain_to(self, message):
"""Copy mbox- or MMDF-specific state to message insofar as possible."""
if isinstance(message, MaildirMessage):
flags = set(self.get_flags())
if 'O' in flags:
message.set_subdir('cur')
if 'F' in flags:
message.add_flag('F')
if 'A' in flags:
message.add_flag('R')
if 'R' in flags:
message.add_flag('S')
if 'D' in flags:
message.add_flag('T')
del message['status']
del message['x-status']
maybe_date = ' '.join(self.get_from().split()[-5:])
try:
message.set_date(calendar.timegm(time.strptime(maybe_date,
'%a %b %d %H:%M:%S %Y')))
except (ValueError, OverflowError):
pass
elif isinstance(message, _mboxMMDFMessage):
message.set_flags(self.get_flags())
message.set_from(self.get_from())
elif isinstance(message, MHMessage):
flags = set(self.get_flags())
if 'R' not in flags:
message.add_sequence('unseen')
if 'A' in flags:
message.add_sequence('replied')
if 'F' in flags:
message.add_sequence('flagged')
del message['status']
del message['x-status']
elif isinstance(message, BabylMessage):
flags = set(self.get_flags())
if 'R' not in flags:
message.add_label('unseen')
if 'D' in flags:
message.add_label('deleted')
if 'A' in flags:
message.add_label('answered')
del message['status']
del message['x-status']
elif isinstance(message, Message):
pass
else:
raise TypeError('Cannot convert to specified type: %s' %
type(message))
class mboxMessage(_mboxMMDFMessage):
"""Message with mbox-specific properties."""
class MHMessage(Message):
"""Message with MH-specific properties."""
def __init__(self, message=None):
"""Initialize an MHMessage instance."""
self._sequences = []
Message.__init__(self, message)
def get_sequences(self):
"""Return a list of sequences that include the message."""
return self._sequences[:]
def set_sequences(self, sequences):
"""Set the list of sequences that include the message."""
self._sequences = list(sequences)
def add_sequence(self, sequence):
"""Add sequence to list of sequences including the message."""
if isinstance(sequence, str):
if not sequence in self._sequences:
self._sequences.append(sequence)
else:
raise TypeError('sequence must be a string: %s' % type(sequence))
def remove_sequence(self, sequence):
"""Remove sequence from the list of sequences including the message."""
try:
self._sequences.remove(sequence)
except ValueError:
pass
def _explain_to(self, message):
"""Copy MH-specific state to message insofar as possible."""
if isinstance(message, MaildirMessage):
sequences = set(self.get_sequences())
if 'unseen' in sequences:
message.set_subdir('cur')
else:
message.set_subdir('cur')
message.add_flag('S')
if 'flagged' in sequences:
message.add_flag('F')
if 'replied' in sequences:
message.add_flag('R')
elif isinstance(message, _mboxMMDFMessage):
sequences = set(self.get_sequences())
if 'unseen' not in sequences:
message.add_flag('RO')
else:
message.add_flag('O')
if 'flagged' in sequences:
message.add_flag('F')
if 'replied' in sequences:
message.add_flag('A')
elif isinstance(message, MHMessage):
for sequence in self.get_sequences():
message.add_sequence(sequence)
elif isinstance(message, BabylMessage):
sequences = set(self.get_sequences())
if 'unseen' in sequences:
message.add_label('unseen')
if 'replied' in sequences:
message.add_label('answered')
elif isinstance(message, Message):
pass
else:
raise TypeError('Cannot convert to specified type: %s' %
type(message))
class BabylMessage(Message):
"""Message with Babyl-specific properties."""
def __init__(self, message=None):
"""Initialize an BabylMessage instance."""
self._labels = []
self._visible = Message()
Message.__init__(self, message)
def get_labels(self):
"""Return a list of labels on the message."""
return self._labels[:]
def set_labels(self, labels):
"""Set the list of labels on the message."""
self._labels = list(labels)
def add_label(self, label):
"""Add label to list of labels on the message."""
if isinstance(label, str):
if label not in self._labels:
self._labels.append(label)
else:
raise TypeError('label must be a string: %s' % type(label))
def remove_label(self, label):
"""Remove label from the list of labels on the message."""
try:
self._labels.remove(label)
except ValueError:
pass
def get_visible(self):
"""Return a Message representation of visible headers."""
return Message(self._visible)
def set_visible(self, visible):
"""Set the Message representation of visible headers."""
self._visible = Message(visible)
def update_visible(self):
"""Update and/or sensibly generate a set of visible headers."""
for header in self._visible.keys():
if header in self:
self._visible.replace_header(header, self[header])
else:
del self._visible[header]
for header in ('Date', 'From', 'Reply-To', 'To', 'CC', 'Subject'):
if header in self and header not in self._visible:
self._visible[header] = self[header]
def _explain_to(self, message):
"""Copy Babyl-specific state to message insofar as possible."""
if isinstance(message, MaildirMessage):
labels = set(self.get_labels())
if 'unseen' in labels:
message.set_subdir('cur')
else:
message.set_subdir('cur')
message.add_flag('S')
if 'forwarded' in labels or 'resent' in labels:
message.add_flag('P')
if 'answered' in labels:
message.add_flag('R')
if 'deleted' in labels:
message.add_flag('T')
elif isinstance(message, _mboxMMDFMessage):
labels = set(self.get_labels())
if 'unseen' not in labels:
message.add_flag('RO')
else:
message.add_flag('O')
if 'deleted' in labels:
message.add_flag('D')
if 'answered' in labels:
message.add_flag('A')
elif isinstance(message, MHMessage):
labels = set(self.get_labels())
if 'unseen' in labels:
message.add_sequence('unseen')
if 'answered' in labels:
message.add_sequence('replied')
elif isinstance(message, BabylMessage):
message.set_visible(self.get_visible())
for label in self.get_labels():
message.add_label(label)
elif isinstance(message, Message):
pass
else:
raise TypeError('Cannot convert to specified type: %s' %
type(message))
class MMDFMessage(_mboxMMDFMessage):
"""Message with MMDF-specific properties."""
class _ProxyFile:
"""A read-only wrapper of a file."""
def __init__(self, f, pos=None):
"""Initialize a _ProxyFile."""
self._file = f
if pos is None:
self._pos = f.tell()
else:
self._pos = pos
def read(self, size=None):
"""Read bytes."""
return self._read(size, self._file.read)
def readline(self, size=None):
"""Read a line."""
return self._read(size, self._file.readline)
def readlines(self, sizehint=None):
"""Read multiple lines."""
result = []
for line in self:
result.append(line)
if sizehint is not None:
sizehint -= len(line)
if sizehint <= 0:
break
return result
def __iter__(self):
"""Iterate over lines."""
return iter(self.readline, "")
def tell(self):
"""Return the position."""
return self._pos
def seek(self, offset, whence=0):
"""Change position."""
if whence == 1:
self._file.seek(self._pos)
self._file.seek(offset, whence)
self._pos = self._file.tell()
def close(self):
"""Close the file."""
del self._file
def _read(self, size, read_method):
"""Read size bytes using read_method."""
if size is None:
size = -1
self._file.seek(self._pos)
result = read_method(size)
self._pos = self._file.tell()
return result
class _PartialFile(_ProxyFile):
"""A read-only wrapper of part of a file."""
def __init__(self, f, start=None, stop=None):
"""Initialize a _PartialFile."""
_ProxyFile.__init__(self, f, start)
self._start = start
self._stop = stop
def tell(self):
"""Return the position with respect to start."""
return _ProxyFile.tell(self) - self._start
def seek(self, offset, whence=0):
"""Change position, possibly with respect to start or stop."""
if whence == 0:
self._pos = self._start
whence = 1
elif whence == 2:
self._pos = self._stop
whence = 1
_ProxyFile.seek(self, offset, whence)
def _read(self, size, read_method):
"""Read size bytes using read_method, honoring start and stop."""
remaining = self._stop - self._pos
if remaining <= 0:
return ''
if size is None or size < 0 or size > remaining:
size = remaining
return _ProxyFile._read(self, size, read_method)
def _lock_file(f, dotlock=True):
"""Lock file f using lockf and dot locking."""
dotlock_done = False
try:
if fcntl:
try:
fcntl.lockf(f, fcntl.LOCK_EX | fcntl.LOCK_NB)
except IOError, e:
if e.errno in (errno.EAGAIN, errno.EACCES, errno.EROFS):
raise ExternalClashError('lockf: lock unavailable: %s' %
f.name)
else:
raise
if dotlock:
try:
pre_lock = _create_temporary(f.name + '.lock')
pre_lock.close()
except IOError, e:
if e.errno in (errno.EACCES, errno.EROFS):
return # Without write access, just skip dotlocking.
else:
raise
try:
if hasattr(os, 'link'):
os.link(pre_lock.name, f.name + '.lock')
dotlock_done = True
os.unlink(pre_lock.name)
else:
os.rename(pre_lock.name, f.name + '.lock')
dotlock_done = True
except OSError, e:
if e.errno == errno.EEXIST or \
(os.name == 'os2' and e.errno == errno.EACCES):
os.remove(pre_lock.name)
raise ExternalClashError('dot lock unavailable: %s' %
f.name)
else:
raise
except:
if fcntl:
fcntl.lockf(f, fcntl.LOCK_UN)
if dotlock_done:
os.remove(f.name + '.lock')
raise
def _unlock_file(f):
"""Unlock file f using lockf and dot locking."""
if fcntl:
fcntl.lockf(f, fcntl.LOCK_UN)
if os.path.exists(f.name + '.lock'):
os.remove(f.name + '.lock')
def _create_carefully(path):
"""Create a file if it doesn't exist and open for reading and writing."""
fd = os.open(path, os.O_CREAT | os.O_EXCL | os.O_RDWR, 0666)
try:
return open(path, 'rb+')
finally:
os.close(fd)
def _create_temporary(path):
"""Create a temp file based on path and open for reading and writing."""
return _create_carefully('%s.%s.%s.%s' % (path, int(time.time()),
socket.gethostname(),
os.getpid()))
def _sync_flush(f):
"""Ensure changes to file f are physically on disk."""
f.flush()
if hasattr(os, 'fsync'):
os.fsync(f.fileno())
def _sync_close(f):
"""Close file f, ensuring all changes are physically on disk."""
_sync_flush(f)
f.close()
## Start: classes from the original module (for backward compatibility).
# Note that the Maildir class, whose name is unchanged, itself offers a next()
# method for backward compatibility.
class _Mailbox:
def __init__(self, fp, factory=rfc822.Message):
self.fp = fp
self.seekp = 0
self.factory = factory
def __iter__(self):
return iter(self.next, None)
def next(self):
while 1:
self.fp.seek(self.seekp)
try:
self._search_start()
except EOFError:
self.seekp = self.fp.tell()
return None
start = self.fp.tell()
self._search_end()
self.seekp = stop = self.fp.tell()
if start != stop:
break
return self.factory(_PartialFile(self.fp, start, stop))
# Recommended to use PortableUnixMailbox instead!
class UnixMailbox(_Mailbox):
def _search_start(self):
while 1:
pos = self.fp.tell()
line = self.fp.readline()
if not line:
raise EOFError
if line[:5] == 'From ' and self._isrealfromline(line):
self.fp.seek(pos)
return
def _search_end(self):
self.fp.readline() # Throw away header line
while 1:
pos = self.fp.tell()
line = self.fp.readline()
if not line:
return
if line[:5] == 'From ' and self._isrealfromline(line):
self.fp.seek(pos)
return
# An overridable mechanism to test for From-line-ness. You can either
# specify a different regular expression or define a whole new
# _isrealfromline() method. Note that this only gets called for lines
# starting with the 5 characters "From ".
#
# BAW: According to
#http://home.netscape.com/eng/mozilla/2.0/relnotes/demo/content-length.html
# the only portable, reliable way to find message delimiters in a BSD (i.e
# Unix mailbox) style folder is to search for "\n\nFrom .*\n", or at the
# beginning of the file, "^From .*\n". While _fromlinepattern below seems
# like a good idea, in practice, there are too many variations for more
# strict parsing of the line to be completely accurate.
#
# _strict_isrealfromline() is the old version which tries to do stricter
# parsing of the From_ line. _portable_isrealfromline() simply returns
# true, since it's never called if the line doesn't already start with
# "From ".
#
# This algorithm, and the way it interacts with _search_start() and
# _search_end() may not be completely correct, because it doesn't check
# that the two characters preceding "From " are \n\n or the beginning of
# the file. Fixing this would require a more extensive rewrite than is
# necessary. For convenience, we've added a PortableUnixMailbox class
# which does no checking of the format of the 'From' line.
_fromlinepattern = (r"From \s*[^\s]+\s+\w\w\w\s+\w\w\w\s+\d?\d\s+"
r"\d?\d:\d\d(:\d\d)?(\s+[^\s]+)?\s+\d\d\d\d\s*"
r"[^\s]*\s*"
"$")
_regexp = None
def _strict_isrealfromline(self, line):
if not self._regexp:
import re
self._regexp = re.compile(self._fromlinepattern)
return self._regexp.match(line)
def _portable_isrealfromline(self, line):
return True
_isrealfromline = _strict_isrealfromline
class PortableUnixMailbox(UnixMailbox):
_isrealfromline = UnixMailbox._portable_isrealfromline
class MmdfMailbox(_Mailbox):
def _search_start(self):
while 1:
line = self.fp.readline()
if not line:
raise EOFError
if line[:5] == '\001\001\001\001\n':
return
def _search_end(self):
while 1:
pos = self.fp.tell()
line = self.fp.readline()
if not line:
return
if line == '\001\001\001\001\n':
self.fp.seek(pos)
return
class MHMailbox:
def __init__(self, dirname, factory=rfc822.Message):
import re
pat = re.compile('^[1-9][0-9]*$')
self.dirname = dirname
# the three following lines could be combined into:
# list = map(long, filter(pat.match, os.listdir(self.dirname)))
list = os.listdir(self.dirname)
list = filter(pat.match, list)
list = map(long, list)
list.sort()
# This only works in Python 1.6 or later;
# before that str() added 'L':
self.boxes = map(str, list)
self.boxes.reverse()
self.factory = factory
def __iter__(self):
return iter(self.next, None)
def next(self):
if not self.boxes:
return None
fn = self.boxes.pop()
fp = open(os.path.join(self.dirname, fn))
msg = self.factory(fp)
try:
msg._mh_msgno = fn
except (AttributeError, TypeError):
pass
return msg
class BabylMailbox(_Mailbox):
def _search_start(self):
while 1:
line = self.fp.readline()
if not line:
raise EOFError
if line == '*** EOOH ***\n':
return
def _search_end(self):
while 1:
pos = self.fp.tell()
line = self.fp.readline()
if not line:
return
if line == '\037\014\n' or line == '\037':
self.fp.seek(pos)
return
## End: classes from the original module (for backward compatibility).
class Error(Exception):
"""Raised for module-specific errors."""
class NoSuchMailboxError(Error):
"""The specified mailbox does not exist and won't be created."""
class NotEmptyError(Error):
"""The specified mailbox is not empty and deletion was requested."""
class ExternalClashError(Error):
"""Another process caused an action to fail."""
class FormatError(Error):
"""A file appears to have an invalid format."""
| gpl-3.0 | -6,770,923,384,633,964,000 | 1,295,131,570,074,910,200 | 34.975127 | 81 | 0.524199 | false |
ubernostrum/akismet | src/akismet.py | 1 | 8707 | import os
import sys
import textwrap
from typing import Optional
import requests
__version__ = "1.1"
class AkismetError(Exception):
"""
Base exception class for Akismet errors.
"""
pass
class UnknownArgumentError(AkismetError):
"""
Indicates an unknown argument was used as part of an API request.
"""
pass
class ProtocolError(AkismetError):
"""
Indicates an unexpected or non-standard response was received from
Akismet.
"""
pass
class ConfigurationError(AkismetError):
"""
Indicates an Akismet configuration error (config missing or invalid).
"""
pass
class APIKeyError(ConfigurationError):
"""
Indicates the supplied Akismet API key/URL are invalid.
"""
pass
class Akismet:
"""
A Python wrapper for the Akismet web API.
Two configuration parameters -- your Akismet API key and
registered URL -- are required; they can be passed when
instantiating, or set in the environment variables
PYTHON_AKISMET_API_KEY and PYTHON_AKISMET_BLOG_URL.
All the operations of the Akismet API are exposed here:
* verify_key
* comment_check
* submit_spam
* submit_ham
For full details of the Akismet API, see the Akismet documentation:
https://akismet.com/development/api/#detailed-docs
The verify_key operation will be automatically called for you as
this class is instantiated; ConfigurationError will be raised if
the configuration cannot be found or if the supplied key/URL are
invalid.
"""
COMMENT_CHECK_URL = "https://{}.rest.akismet.com/1.1/comment-check"
SUBMIT_HAM_URL = "https://{}.rest.akismet.com/1.1/submit-ham"
SUBMIT_SPAM_URL = "https://{}.rest.akismet.com/1.1/submit-spam"
VERIFY_KEY_URL = "https://rest.akismet.com/1.1/verify-key"
SUBMIT_SUCCESS_RESPONSE = "Thanks for making the web a better place."
OPTIONAL_KEYS = [
"blog_charset",
"blog_lang",
"comment_author",
"comment_author_email",
"comment_author_url",
"comment_content",
"comment_date_gmt",
"comment_post_modified_gmt",
"comment_type",
"is_test",
"permalink",
"recheck_reason",
"referrer",
"user_role",
]
user_agent_header = {
"User-Agent": "Python/{} | akismet.py/{}".format(
"{}.{}".format(*sys.version_info[:2]), __version__
)
}
def __init__(self, key: Optional[str] = None, blog_url: Optional[str] = None):
maybe_key = key if key is not None else os.getenv("PYTHON_AKISMET_API_KEY", "")
maybe_url = (
blog_url
if blog_url is not None
else os.getenv("PYTHON_AKISMET_BLOG_URL", "")
)
if maybe_key == "" or maybe_url == "":
raise ConfigurationError(
textwrap.dedent(
"""
Could not find full Akismet configuration.
Found API key: {}
Found blog URL: {}
""".format(
maybe_key, maybe_url
)
)
)
if not self.verify_key(maybe_key, maybe_url):
raise APIKeyError(
"Akismet key ({}, {}) is invalid.".format(maybe_key, maybe_url)
)
self.api_key = maybe_key
self.blog_url = maybe_url
def _api_request(
self, endpoint: str, user_ip: str, user_agent: str, **kwargs: str
) -> requests.Response:
"""
Makes a request to the Akismet API.
This method is used for all API calls except key verification,
since all endpoints other than key verification must
interpolate the API key into the URL and supply certain basic
data.
"""
unknown_args = [k for k in kwargs if k not in self.OPTIONAL_KEYS]
if unknown_args:
raise UnknownArgumentError(
"Unknown arguments while making request: {}.".format(
", ".join(unknown_args)
)
)
data = {
"blog": self.blog_url,
"user_ip": user_ip,
"user_agent": user_agent,
**kwargs,
}
return requests.post(
endpoint.format(self.api_key), data=data, headers=self.user_agent_header
)
def _submission_request(
self, operation: str, user_ip: str, user_agent: str, **kwargs: str
) -> bool:
"""
Submits spam or ham to the Akismet API.
"""
endpoint = {
"submit_spam": self.SUBMIT_SPAM_URL,
"submit_ham": self.SUBMIT_HAM_URL,
}[operation]
response = self._api_request(endpoint, user_ip, user_agent, **kwargs)
if response.text == self.SUBMIT_SUCCESS_RESPONSE:
return True
self._protocol_error(operation, response)
@classmethod
def _protocol_error(cls, operation: str, response: requests.Response) -> None:
"""
Raises an appropriate exception for unexpected API responses.
"""
raise ProtocolError(
textwrap.dedent(
"""
Received unexpected or non-standard response from Akismet API.
API operation was: {}
API response received was: {}
Debug header value was: {}
"""
).format(
operation, response.text, response.headers.get("X-akismet-debug-help")
)
)
@classmethod
def verify_key(cls, key: str, blog_url: str) -> bool:
"""
Verifies an Akismet API key and URL.
Returns True if the key and URL are valid, False otherwise.
"""
if not blog_url.startswith(("http://", "https://")):
raise ConfigurationError(
textwrap.dedent(
"""
Invalid site URL specified: {}
Akismet requires the full URL including the leading
'http://' or 'https://'.
"""
).format(blog_url)
)
response = requests.post(
cls.VERIFY_KEY_URL,
data={"key": key, "blog": blog_url},
headers=cls.user_agent_header,
)
if response.text == "valid":
return True
elif response.text == "invalid":
return False
else:
cls._protocol_error("verify_key", response)
def comment_check(self, user_ip: str, user_agent: str, **kwargs: str) -> bool:
"""
Checks a comment to determine whether it is spam.
The IP address and user-agent string of the remote user are
required. All other arguments documented by Akismet (other
than the PHP server information) are also optionally accepted.
See the Akismet API documentation for a full list:
https://akismet.com/development/api/#comment-check
Like the Akismet web API, returns True for a comment that is
spam, and False for a comment that is not spam.
"""
response = self._api_request(
self.COMMENT_CHECK_URL, user_ip, user_agent, **kwargs
)
if response.text == "true":
return True
elif response.text == "false":
return False
else:
self._protocol_error("comment_check", response)
def submit_spam(self, user_ip: str, user_agent: str, **kwargs: str) -> bool:
"""
Informs Akismet that a comment is spam.
The IP address and user-agent string of the remote user are
required. All other arguments documented by Akismet (other
than the PHP server information) are also optionally accepted.
See the Akismet API documentation for a full list:
https://akismet.com/development/api/#submit-spam
Returns True on success (the only expected response).
"""
return self._submission_request("submit_spam", user_ip, user_agent, **kwargs)
def submit_ham(self, user_ip: str, user_agent: str, **kwargs: str) -> bool:
"""
Informs Akismet that a comment is not spam.
The IP address and user-agent string of the remote user are
required. All other arguments documented by Akismet (other
than the PHP server information) are also optionally accepted.
See the Akismet API documentation for a full list:
https://akismet.com/development/api/#submit-ham
Returns True on success (the only expected response).
"""
return self._submission_request("submit_ham", user_ip, user_agent, **kwargs)
| bsd-3-clause | 2,898,891,687,682,068,000 | 2,405,577,221,350,555,000 | 28.316498 | 87 | 0.576777 | false |
Yelp/paasta | paasta_tools/adhoc_tools.py | 1 | 4751 | # Copyright 2015-2016 Yelp Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import service_configuration_lib
from paasta_tools.long_running_service_tools import LongRunningServiceConfig
from paasta_tools.long_running_service_tools import LongRunningServiceConfigDict
from paasta_tools.utils import BranchDictV2
from paasta_tools.utils import deep_merge_dictionaries
from paasta_tools.utils import DEFAULT_SOA_DIR
from paasta_tools.utils import load_service_instance_config
from paasta_tools.utils import load_v2_deployments_json
from paasta_tools.utils import NoConfigurationForServiceError
from paasta_tools.utils import NoDeploymentsAvailable
from paasta_tools.utils import prompt_pick_one
log = logging.getLogger(__name__)
def load_adhoc_job_config(
service, instance, cluster, load_deployments=True, soa_dir=DEFAULT_SOA_DIR
):
general_config = service_configuration_lib.read_service_configuration(
service, soa_dir=soa_dir
)
instance_config = load_service_instance_config(
service=service,
instance=instance,
instance_type="adhoc",
cluster=cluster,
soa_dir=soa_dir,
)
general_config = deep_merge_dictionaries(
overrides=instance_config, defaults=general_config
)
branch_dict = None
if load_deployments:
deployments_json = load_v2_deployments_json(service, soa_dir=soa_dir)
temp_instance_config = AdhocJobConfig(
service=service,
cluster=cluster,
instance=instance,
config_dict=general_config,
branch_dict=None,
soa_dir=soa_dir,
)
branch = temp_instance_config.get_branch()
deploy_group = temp_instance_config.get_deploy_group()
branch_dict = deployments_json.get_branch_dict(service, branch, deploy_group)
return AdhocJobConfig(
service=service,
cluster=cluster,
instance=instance,
config_dict=general_config,
branch_dict=branch_dict,
soa_dir=soa_dir,
)
class AdhocJobConfig(LongRunningServiceConfig):
config_filename_prefix = "adhoc"
def __init__(
self,
service: str,
instance: str,
cluster: str,
config_dict: LongRunningServiceConfigDict,
branch_dict: BranchDictV2,
soa_dir: str = DEFAULT_SOA_DIR,
) -> None:
super().__init__(
cluster=cluster,
instance=instance,
service=service,
config_dict=config_dict,
branch_dict=branch_dict,
soa_dir=soa_dir,
)
def get_default_interactive_config(
service: str, cluster: str, soa_dir: str, load_deployments: bool = False
) -> AdhocJobConfig:
default_job_config = {"cpus": 4, "mem": 10240, "disk": 1024}
try:
job_config = load_adhoc_job_config(
service=service, instance="interactive", cluster=cluster, soa_dir=soa_dir
)
except NoConfigurationForServiceError:
job_config = AdhocJobConfig(
service=service,
instance="interactive",
cluster=cluster,
config_dict={},
branch_dict=None,
soa_dir=soa_dir,
)
except NoDeploymentsAvailable:
job_config = load_adhoc_job_config(
service=service,
instance="interactive",
cluster=cluster,
soa_dir=soa_dir,
load_deployments=False,
)
if not job_config.branch_dict and load_deployments:
deployments_json = load_v2_deployments_json(service, soa_dir=soa_dir)
deploy_group = prompt_pick_one(
deployments_json.get_deploy_groups(), choosing="deploy group"
)
job_config.config_dict["deploy_group"] = deploy_group
job_config.branch_dict = {
"docker_image": deployments_json.get_docker_image_for_deploy_group(
deploy_group
),
"git_sha": deployments_json.get_git_sha_for_deploy_group(deploy_group),
"force_bounce": None,
"desired_state": "start",
}
for key, value in default_job_config.items():
job_config.config_dict.setdefault(key, value)
return job_config
| apache-2.0 | 220,785,687,989,356,700 | 1,588,496,175,319,215,000 | 32.457746 | 85 | 0.651652 | false |
zhumengyuan/kallithea | kallithea/controllers/api/__init__.py | 2 | 10720 | # -*- coding: utf-8 -*-
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
kallithea.controllers.api
~~~~~~~~~~~~~~~~~~~~~~~~~
JSON RPC controller
This file was forked by the Kallithea project in July 2014.
Original author and date, and relevant copyright and licensing information is below:
:created_on: Aug 20, 2011
:author: marcink
:copyright: (c) 2013 RhodeCode GmbH, and others.
:license: GPLv3, see LICENSE.md for more details.
"""
import inspect
import logging
import types
import traceback
import time
from paste.response import replace_header
from pylons.controllers import WSGIController
from webob.exc import HTTPError
from kallithea.model.db import User
from kallithea.model import meta
from kallithea.lib.compat import izip_longest, json
from kallithea.lib.auth import AuthUser
from kallithea.lib.base import _get_ip_addr as _get_ip, _get_access_path
from kallithea.lib.utils2 import safe_unicode, safe_str
log = logging.getLogger('JSONRPC')
class JSONRPCError(BaseException):
def __init__(self, message):
self.message = message
super(JSONRPCError, self).__init__()
def __str__(self):
return safe_str(self.message)
def jsonrpc_error(message, retid=None, code=None):
"""
Generate a Response object with a JSON-RPC error body
:param code:
:param retid:
:param message:
"""
from pylons.controllers.util import Response
return Response(
body=json.dumps(dict(id=retid, result=None, error=message)),
status=code,
content_type='application/json'
)
class JSONRPCController(WSGIController):
"""
A WSGI-speaking JSON-RPC controller class
See the specification:
<http://json-rpc.org/wiki/specification>`.
Valid controller return values should be json-serializable objects.
Sub-classes should catch their exceptions and raise JSONRPCError
if they want to pass meaningful errors to the client.
"""
def _get_ip_addr(self, environ):
return _get_ip(environ)
def _get_method_args(self):
"""
Return `self._rpc_args` to dispatched controller method
chosen by __call__
"""
return self._rpc_args
def __call__(self, environ, start_response):
"""
Parse the request body as JSON, look up the method on the
controller and if it exists, dispatch to it.
"""
try:
return self._handle_request(environ, start_response)
finally:
meta.Session.remove()
def _handle_request(self, environ, start_response):
start = time.time()
ip_addr = self.ip_addr = self._get_ip_addr(environ)
self._req_id = None
if 'CONTENT_LENGTH' not in environ:
log.debug("No Content-Length")
return jsonrpc_error(retid=self._req_id,
message="No Content-Length in request")
else:
length = environ['CONTENT_LENGTH'] or 0
length = int(environ['CONTENT_LENGTH'])
log.debug('Content-Length: %s' % length)
if length == 0:
log.debug("Content-Length is 0")
return jsonrpc_error(retid=self._req_id,
message="Content-Length is 0")
raw_body = environ['wsgi.input'].read(length)
try:
json_body = json.loads(raw_body)
except ValueError, e:
# catch JSON errors Here
return jsonrpc_error(retid=self._req_id,
message="JSON parse error ERR:%s RAW:%r"
% (e, raw_body))
# check AUTH based on API KEY
try:
self._req_api_key = json_body['api_key']
self._req_id = json_body['id']
self._req_method = json_body['method']
self._request_params = json_body['args']
if not isinstance(self._request_params, dict):
self._request_params = {}
log.debug(
'method: %s, params: %s' % (self._req_method,
self._request_params)
)
except KeyError, e:
return jsonrpc_error(retid=self._req_id,
message='Incorrect JSON query missing %s' % e)
# check if we can find this session using api_key
try:
u = User.get_by_api_key(self._req_api_key)
if u is None:
return jsonrpc_error(retid=self._req_id,
message='Invalid API KEY')
#check if we are allowed to use this IP
auth_u = AuthUser(u.user_id, self._req_api_key, ip_addr=ip_addr)
if not auth_u.ip_allowed:
return jsonrpc_error(retid=self._req_id,
message='request from IP:%s not allowed' % (ip_addr,))
else:
log.info('Access for IP:%s allowed' % (ip_addr,))
except Exception, e:
return jsonrpc_error(retid=self._req_id,
message='Invalid API KEY')
self._error = None
try:
self._func = self._find_method()
except AttributeError, e:
return jsonrpc_error(retid=self._req_id,
message=str(e))
# now that we have a method, add self._req_params to
# self.kargs and dispatch control to WGIController
argspec = inspect.getargspec(self._func)
arglist = argspec[0][1:]
defaults = map(type, argspec[3] or [])
default_empty = types.NotImplementedType
# kw arguments required by this method
func_kwargs = dict(izip_longest(reversed(arglist), reversed(defaults),
fillvalue=default_empty))
# this is little trick to inject logged in user for
# perms decorators to work they expect the controller class to have
# authuser attribute set
self.authuser = auth_u
# This attribute will need to be first param of a method that uses
# api_key, which is translated to instance of user at that name
USER_SESSION_ATTR = 'apiuser'
if USER_SESSION_ATTR not in arglist:
return jsonrpc_error(
retid=self._req_id,
message='This method [%s] does not support '
'authentication (missing %s param)' % (
self._func.__name__, USER_SESSION_ATTR)
)
# get our arglist and check if we provided them as args
for arg, default in func_kwargs.iteritems():
if arg == USER_SESSION_ATTR:
# USER_SESSION_ATTR is something translated from api key and
# this is checked before so we don't need validate it
continue
# skip the required param check if it's default value is
# NotImplementedType (default_empty)
if default == default_empty and arg not in self._request_params:
return jsonrpc_error(
retid=self._req_id,
message=(
'Missing non optional `%s` arg in JSON DATA' % arg
)
)
self._rpc_args = {USER_SESSION_ATTR: u}
self._rpc_args.update(self._request_params)
self._rpc_args['action'] = self._req_method
self._rpc_args['environ'] = environ
self._rpc_args['start_response'] = start_response
status = []
headers = []
exc_info = []
def change_content(new_status, new_headers, new_exc_info=None):
status.append(new_status)
headers.extend(new_headers)
exc_info.append(new_exc_info)
output = WSGIController.__call__(self, environ, change_content)
output = list(output)
headers.append(('Content-Length', str(len(output[0]))))
replace_header(headers, 'Content-Type', 'application/json')
start_response(status[0], headers, exc_info[0])
log.info('IP: %s Request to %s time: %.3fs' % (
self._get_ip_addr(environ),
safe_unicode(_get_access_path(environ)), time.time() - start)
)
return output
def _dispatch_call(self):
"""
Implement dispatch interface specified by WSGIController
"""
raw_response = ''
try:
raw_response = self._inspect_call(self._func)
if isinstance(raw_response, HTTPError):
self._error = str(raw_response)
except JSONRPCError, e:
self._error = safe_str(e)
except Exception, e:
log.error('Encountered unhandled exception: %s'
% (traceback.format_exc(),))
json_exc = JSONRPCError('Internal server error')
self._error = safe_str(json_exc)
if self._error is not None:
raw_response = None
response = dict(id=self._req_id, result=raw_response, error=self._error)
try:
return json.dumps(response)
except TypeError, e:
log.error('API FAILED. Error encoding response: %s' % e)
return json.dumps(
dict(
id=self._req_id,
result=None,
error="Error encoding response"
)
)
def _find_method(self):
"""
Return method named by `self._req_method` in controller if able
"""
log.debug('Trying to find JSON-RPC method: %s' % (self._req_method,))
if self._req_method.startswith('_'):
raise AttributeError("Method not allowed")
try:
func = getattr(self, self._req_method, None)
except UnicodeEncodeError:
raise AttributeError("Problem decoding unicode in requested "
"method name.")
if isinstance(func, types.MethodType):
return func
else:
raise AttributeError("No such method: %s" % (self._req_method,))
| gpl-3.0 | -2,452,979,359,706,960,000 | -5,444,090,528,293,516,000 | 34.379538 | 84 | 0.574347 | false |
bezhermoso/home | lib/ansible/runner/filter_plugins/core.py | 6 | 5614 | # (c) 2012, Jeroen Hoekx <jeroen@hoekx.be>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import base64
import json
import os.path
import yaml
import types
import pipes
import glob
import re
from ansible import errors
from ansible.utils import md5s
def to_nice_yaml(*a, **kw):
'''Make verbose, human readable yaml'''
return yaml.safe_dump(*a, indent=4, allow_unicode=True, default_flow_style=False, **kw)
def to_nice_json(*a, **kw):
'''Make verbose, human readable JSON'''
return json.dumps(*a, indent=4, sort_keys=True, **kw)
def failed(*a, **kw):
''' Test if task result yields failed '''
item = a[0]
if type(item) != dict:
raise errors.AnsibleFilterError("|failed expects a dictionary")
rc = item.get('rc',0)
failed = item.get('failed',False)
if rc != 0 or failed:
return True
else:
return False
def success(*a, **kw):
''' Test if task result yields success '''
return not failed(*a, **kw)
def changed(*a, **kw):
''' Test if task result yields changed '''
item = a[0]
if type(item) != dict:
raise errors.AnsibleFilterError("|changed expects a dictionary")
if not 'changed' in item:
changed = False
if ('results' in item # some modules return a 'results' key
and type(item['results']) == list
and type(item['results'][0]) == dict):
for result in item['results']:
changed = changed or result.get('changed', False)
else:
changed = item.get('changed', False)
return changed
def skipped(*a, **kw):
''' Test if task result yields skipped '''
item = a[0]
if type(item) != dict:
raise errors.AnsibleFilterError("|skipped expects a dictionary")
skipped = item.get('skipped', False)
return skipped
def mandatory(a):
''' Make a variable mandatory '''
if not a:
raise errors.AnsibleFilterError('Mandatory variable not defined.')
return a
def bool(a):
''' return a bool for the arg '''
if a is None or type(a) == bool:
return a
if type(a) in types.StringTypes:
a = a.lower()
if a in ['yes', 'on', '1', 'true', 1]:
return True
else:
return False
def quote(a):
''' return its argument quoted for shell usage '''
return pipes.quote(a)
def fileglob(pathname):
''' return list of matched files for glob '''
return glob.glob(pathname)
def regex(value='', pattern='', ignorecase=False, match_type='search'):
''' Expose `re` as a boolean filter using the `search` method by default.
This is likely only useful for `search` and `match` which already
have their own filters.
'''
if ignorecase:
flags = re.I
else:
flags = 0
_re = re.compile(pattern, flags=flags)
_bool = __builtins__.get('bool')
return _bool(getattr(_re, match_type, 'search')(value))
def match(value, pattern='', ignorecase=False):
''' Perform a `re.match` returning a boolean '''
return regex(value, pattern, ignorecase, 'match')
def search(value, pattern='', ignorecase=False):
''' Perform a `re.search` returning a boolean '''
return regex(value, pattern, ignorecase, 'search')
def unique(a):
return set(a)
def intersect(a, b):
return set(a).intersection(b)
def difference(a, b):
return set(a).difference(b)
def symmetric_difference(a, b):
return set(a).symmetric_difference(b)
def union(a, b):
return set(a).union(b)
class FilterModule(object):
''' Ansible core jinja2 filters '''
def filters(self):
return {
# base 64
'b64decode': base64.b64decode,
'b64encode': base64.b64encode,
# json
'to_json': json.dumps,
'to_nice_json': to_nice_json,
'from_json': json.loads,
# yaml
'to_yaml': yaml.safe_dump,
'to_nice_yaml': to_nice_yaml,
'from_yaml': yaml.safe_load,
# path
'basename': os.path.basename,
'dirname': os.path.dirname,
'realpath': os.path.realpath,
# failure testing
'failed' : failed,
'success' : success,
# changed testing
'changed' : changed,
# skip testing
'skipped' : skipped,
# variable existence
'mandatory': mandatory,
# value as boolean
'bool': bool,
# quote string for shell usage
'quote': quote,
# md5 hex digest of string
'md5': md5s,
# file glob
'fileglob': fileglob,
# regex
'match': match,
'search': search,
'regex': regex,
# list
'unique' : unique,
'intersect': intersect,
'difference': difference,
'symmetric_difference': symmetric_difference,
'union': union,
}
| gpl-3.0 | 1,946,348,149,435,482,400 | 4,067,467,322,319,269,000 | 27.211055 | 91 | 0.589419 | false |
GrognardsFromHell/TemplePlus | tpdatasrc/kotbfixes/scr/Spell093 - Cure Serious Wounds.py | 2 | 3880 | from toee import *
from utilities import *
def OnBeginSpellCast( spell ):
print "Cure Serious Wounds OnBeginSpellCast"
print "spell.target_list=", spell.target_list
print "spell.caster=", spell.caster, " caster.level= ", spell.caster_level
game.particles( "sp-conjuration-conjure", spell.caster )
def OnSpellEffect( spell ):
print "Cure Serious Wounds OnSpellEffect"
# Dar's level check no longer needed thanks to Spellslinger's dll fix
# if spell.caster_class == 13: #added to check for proper paladin slot level (darmagon)
# if spell.spell_level < 4:
# spell.caster.float_mesfile_line('mes\\spell.mes', 16008)
# spell.spell_end(spell.id)
# return
# if spell.caster_class == 14:
# if spell.spell_level < 4:#added to check for proper ranger slot level (darmagon)
# spell.caster.float_mesfile_line('mes\\spell.mes', 16008)
# spell.spell_end(spell.id)
# return
npc = spell.caster ## added so NPC's can use potion
if npc.type != obj_t_pc and npc.leader_get() == OBJ_HANDLE_NULL and spell.caster_level <= 0:
spell.caster_level = 10
dice = dice_new( "3d8" )
#dice.bonus = min( 15, spell.caster.stat_level_get( spell.caster_class ) )
dice.bonus = min( 15, spell.caster_level )
target = spell.target_list[0].obj
# check if target is friendly (willing target)
if target.is_friendly( spell.caster ):
# check if target is undead
if target.is_category_type( mc_type_undead ):
# check saving throw, damage target
if target.saving_throw_spell( spell.dc, D20_Save_Will, D20STD_F_NONE, spell.caster, spell.id ):
target.float_mesfile_line( 'mes\\spell.mes', 30001 )
# saving throw succesful, damage target, 1/2 damage
target.spell_damage_with_reduction( spell.caster, D20DT_POSITIVE_ENERGY, dice, D20DAP_UNSPECIFIED, DAMAGE_REDUCTION_HALF, D20A_CAST_SPELL, spell.id )
else:
target.float_mesfile_line( 'mes\\spell.mes', 30002 )
# saving throw unsuccesful, damage target, full damage
target.spell_damage( spell.caster, D20DT_POSITIVE_ENERGY, dice, D20DAP_UNSPECIFIED, D20A_CAST_SPELL, spell.id )
else:
# heal target
target.spell_heal( spell.caster, dice, D20A_CAST_SPELL, spell.id )
target.healsubdual( spell.caster, dice, D20A_CAST_SPELL, spell.id )
else:
# check if target is undead
if target.is_category_type( mc_type_undead ):
# check saving throw, damage target
if target.saving_throw_spell( spell.dc, D20_Save_Will, D20STD_F_NONE, spell.caster, spell.id ):
target.float_mesfile_line( 'mes\\spell.mes', 30001 )
# saving throw succesful, damage target, 1/2 damage
target.spell_damage_with_reduction( spell.caster, D20DT_POSITIVE_ENERGY, dice, D20DAP_UNSPECIFIED, DAMAGE_REDUCTION_HALF, D20A_CAST_SPELL, spell.id )
else:
target.float_mesfile_line( 'mes\\spell.mes', 30002 )
# saving throw unsuccesful, damage target, full damage
target.spell_damage( spell.caster, D20DT_POSITIVE_ENERGY, dice, D20DAP_UNSPECIFIED, D20A_CAST_SPELL, spell.id )
else:
# check saving throw
if target.saving_throw_spell( spell.dc, D20_Save_Will, D20STD_F_NONE, spell.caster, spell.id ):
#target.float_mesfile_line( 'mes\\spell.mes', 30001 )
# saving throw succesful, heal target, 1/2 heal
target.spell_heal( spell.caster, dice, D20A_CAST_SPELL, spell.id )
target.healsubdual( spell.caster, dice, D20A_CAST_SPELL, spell.id )
else:
#target.float_mesfile_line( 'mes\\spell.mes', 30002 )
# saving throw unsuccesful, heal target, full heal
target.spell_heal( spell.caster, dice, D20A_CAST_SPELL, spell.id )
target.healsubdual( spell.caster, dice, D20A_CAST_SPELL, spell.id )
game.particles( 'sp-Cure Serious Wounds', target )
spell.target_list.remove_target( target )
spell.spell_end(spell.id)
def OnBeginRound( spell ):
print "Cure Serious Wounds OnBeginRound"
def OnEndSpellCast( spell ):
print "Cure Serious Wounds OnEndSpellCast" | mit | 2,007,571,354,373,266,200 | -5,701,172,203,772,153,000 | 39.427083 | 153 | 0.714691 | false |
coverxiaoeye/ceryx | api/ceryx/db.py | 9 | 3248 | """
Simple Redis client, implemented the data logic of Ceryx.
"""
import redis
from ceryx import settings
class RedisRouter(object):
"""
Router using a redis backend, in order to route incoming requests.
"""
class LookupNotFound(Exception):
"""
Exception raised when a lookup for a specific host was not found.
"""
def __init__(self, message, errors=None):
Exception.__init__(self, message)
if errors is None:
self.errors = {'message': message}
else:
self.errors = errors
@staticmethod
def from_config(path=None):
"""
Returns a RedisRouter, using the default configuration from Ceryx
settings.
"""
return RedisRouter(settings.REDIS_HOST, settings.REDIS_PORT,
0, settings.REDIS_PREFIX)
def __init__(self, host, port, db, prefix):
self.client = redis.StrictRedis(host=host, port=port, db=db)
self.prefix = prefix
def _prefixed_route_key(self, source):
"""
Returns the prefixed key, if prefix has been defined, for the given
route.
"""
prefixed_key = 'routes:%s'
if self.prefix is not None:
prefixed_key = self.prefix + ':routes:%s'
prefixed_key = prefixed_key % source
return prefixed_key
def lookup(self, host, silent=False):
"""
Fetches the target host for the given host name. If no host matching
the given name is found and silent is False, raises a LookupNotFound
exception.
"""
lookup_host = self._prefixed_route_key(host)
target_host = self.client.get(lookup_host)
if target_host is None and not silent:
raise RedisRouter.LookupNotFound(
'Given host does not match with any route'
)
else:
return target_host
def lookup_hosts(self, pattern):
"""
Fetches hosts that match the given pattern. If no pattern is given,
all hosts are returned.
"""
if not pattern:
pattern = '*'
lookup_pattern = self._prefixed_route_key(pattern)
keys = self.client.keys(lookup_pattern)
return [key[len(lookup_pattern) - len(pattern):] for key in keys]
def lookup_routes(self, pattern):
"""
Fetches routes with host that matches the given pattern. If no pattern
is given, all routes are returned.
"""
hosts = self.lookup_hosts(pattern)
routes = []
for host in hosts:
routes.append(
{
'source': host,
'target': self.lookup(host, silent=True)
}
)
return routes
def insert(self, source, target):
"""
Inserts a new source/target host entry in to the database.
"""
source_key = self._prefixed_route_key(source)
self.client.set(source_key, target)
def delete(self, source):
"""
Deletes the entry of the given source, if it exists.
"""
source_key = self._prefixed_route_key(source)
self.client.delete(source_key)
| mit | 8,362,490,569,224,841,000 | 329,045,510,534,427,100 | 30.843137 | 78 | 0.568042 | false |
kamcpp/tensorflow | tensorflow/tools/docs/gen_cc_md.py | 50 | 8418 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Convert Doxygen .xml files to MarkDown (.md files)."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import re
from BeautifulSoup import BeautifulStoneSoup
import tensorflow as tf
ANCHOR_RE = re.compile(r'\W+')
PAGE_TEMPLATE = '''# `{0} {1}`
{2}
###Member Details
{3}'''
INDEX_TEMPLATE = '''# TensorFlow C++ Session API reference documentation
TensorFlow's public C++ API includes only the API for executing graphs, as of
version 0.5. To control the execution of a graph from C++:
1. Build the computation graph using the [Python API](../python/).
1. Use [`tf.train.write_graph()`](../python/train.md#write_graph) to
write the graph to a file.
1. Load the graph using the C++ Session API. For example:
```c++
// Reads a model graph definition from disk, and creates a session object you
// can use to run it.
Status LoadGraph(string graph_file_name, Session** session) {
GraphDef graph_def;
TF_RETURN_IF_ERROR(
ReadBinaryProto(Env::Default(), graph_file_name, &graph_def));
TF_RETURN_IF_ERROR(NewSession(SessionOptions(), session));
TF_RETURN_IF_ERROR((*session)->Create(graph_def));
return Status::OK();
}
```
1. Run the graph with a call to `session->Run()`
## Env
@@Env
@@RandomAccessFile
@@WritableFile
@@EnvWrapper
## Session
@@Session
@@SessionOptions
## Status
@@Status
@@Status::State
## Tensor
@@Tensor
@@TensorShape
@@TensorShapeDim
@@TensorShapeUtils
@@PartialTensorShape
@@PartialTensorShapeUtils
## Thread
@@Thread
@@ThreadOptions
'''
FLAGS = None
def member_definition(member_elt):
def_text = ''
def_elt = member_elt.find('definition')
if def_elt:
def_text = def_elt.text
return def_text
def member_sig(member_elt):
def_text = member_definition(member_elt)
argstring_text = ''
argstring = member_elt.find('argsstring')
if argstring:
argstring_text = argstring.text
sig = def_text + argstring_text
return sig
def anchorize(name):
return ANCHOR_RE.sub('_', name)
def element_text(member_elt, elt_name):
"""Extract all `para` text from (`elt_name` in) `member_elt`."""
text = []
if elt_name:
elt = member_elt.find(elt_name)
else:
elt = member_elt
if elt:
paras = elt.findAll('para')
for p in paras:
text.append(p.getText(separator=u' ').strip())
return '\n\n'.join(text)
def full_member_entry(member_elt):
"""Generate the description of `member_elt` for "Member Details"."""
anchor = '{#' + anchorize(member_definition(member_elt)) + '}'
full_entry = '#### `%s` %s' % (member_sig(member_elt), anchor)
complete_descr = element_text(member_elt, 'briefdescription') + '\n\n'
complete_descr += element_text(member_elt, 'detaileddescription')
if complete_descr:
full_entry += '\n\n' + complete_descr
return full_entry
def brief_member_entry(member_elt):
"""Generate the description of `member_elt` for the "Member Summary"."""
brief_item = ''
brief_descr = element_text(member_elt, 'briefdescription')
if brief_descr:
brief_item = '\n * ' + brief_descr
sig = member_sig(member_elt)
memdef = member_definition(member_elt)
linkified_sig = '[`{0}`](#{1})'.format(sig, anchorize(memdef))
return '* ' + linkified_sig + brief_item
def all_briefs(members):
briefs = [brief_member_entry(member_elt) for member_elt in members]
return '\n'.join(briefs)
def all_fulls(members):
fulls = [full_member_entry(member_elt) for member_elt in members]
return '\n\n'.join(fulls)
def page_overview(class_elt):
"""Returns the contents of the .md file for `class_elt`."""
overview_brief = ''
overview_details = ''
briefs = class_elt.findAll('briefdescription', recursive=False)
if briefs:
overview_brief = element_text(briefs[0], None)
details = class_elt.findAll('detaileddescription', recursive=False)
if details:
overview_details = element_text(details[0], None)
return overview_brief + '\n\n' + overview_details
def page_with_name(pages, name):
def match(n):
for i in xrange(len(pages)):
if pages[i].get_name() == n:
return i
return None
return match(name) or match('tensorflow::' + name)
def get_all_indexed_pages():
all_pages = set()
lines = INDEX_TEMPLATE.split('\n')
for i in range(len(lines)):
if lines[i].startswith('@@'):
name = lines[i][2:]
all_pages.add(name)
return all_pages
def index_page(pages):
"""Create the index page linking to `pages` using INDEX_TEMPLATE."""
pages = pages[:]
lines = INDEX_TEMPLATE.split('\n')
all_md_files = []
for i in range(len(lines)):
if lines[i].startswith('@@'):
name = lines[i][2:]
page_index = page_with_name(pages, name)
if page_index is None:
raise ValueError('Missing page with name: ' + name)
lines[i] = '* [{0}]({1})'.format(
pages[page_index].get_name(), pages[page_index].get_md_filename())
all_md_files.append(pages[page_index].get_md_filename())
pages.pop(page_index)
return '\n'.join(lines)
def page_in_name_list(page, names):
for name in names:
if page.get_name() == name or page.get_name() == 'tensorflow::' + name:
return True
return False
class Page(object):
"""Holds the MarkDown converted contents of a .xml page."""
def __init__(self, xml_path, deftype):
self.type = deftype
xml_file = open(xml_path)
xml = xml_file.read()
xml = xml.replace('<computeroutput>', '`').replace('</computeroutput>', '`')
# TODO(josh11b): Should not use HTML entities inside ```...```.
soup = BeautifulStoneSoup(
xml, convertEntities=BeautifulStoneSoup.HTML_ENTITIES)
self.name = soup.find('compoundname').text
print('Making page with name ' + self.name + ' (from ' + xml_path + ')')
members = soup('memberdef', prot='public')
fulls = all_fulls(members)
self.overview = page_overview(soup.find('compounddef'))
self.page_text = PAGE_TEMPLATE.format(
self.type, self.name, self.overview, fulls)
def get_text(self):
return self.page_text
def get_name(self):
return self.name
def get_short_name(self):
parse = self.get_name().split('::')
return parse[len(parse)-1]
def get_type(self):
return self.type
def get_md_filename(self):
capitalized_type = self.get_type()[0].upper() + self.get_type()[1:]
return capitalized_type + anchorize(self.get_short_name()) + '.md'
def main(unused_argv):
print('Converting in ' + FLAGS.src_dir)
pages = []
all_pages = get_all_indexed_pages()
xml_files = os.listdir(FLAGS.src_dir)
for fname in xml_files:
if len(fname) < 6: continue
newpage = None
if fname[0:5] == 'class':
newpage = Page(os.path.join(FLAGS.src_dir, fname), 'class')
elif fname[0:6] == 'struct':
newpage = Page(os.path.join(FLAGS.src_dir, fname), 'struct')
if newpage is not None and page_in_name_list(newpage, all_pages):
pages.append(newpage)
md_filename = newpage.get_md_filename()
print('Writing ' + md_filename)
md_file = open(os.path.join(FLAGS.out_dir, md_filename), 'w')
print(newpage.get_text(), file=md_file)
index_text = index_page(pages)
index_md_file = open(os.path.join(FLAGS.out_dir, 'index.md'), 'w')
print(index_text, file=index_md_file)
return 0
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--src_dir',
type=str,
default=None,
help='Directory containing the doxygen output.'
)
parser.add_argument(
'--out_dir',
type=str,
default=None,
help='Directory to which docs should be written.'
)
FLAGS = parser.parse_args()
tf.app.run()
| apache-2.0 | 1,643,179,804,716,882,700 | -3,006,418,260,584,479,000 | 25.808917 | 80 | 0.653837 | false |
inasafe/inasafe | safe/metadata35/test/test_provenance_step.py | 6 | 1282 | # coding=utf-8
"""
InaSAFE Disaster risk assessment tool developed by AusAid -
**Exception Classes.**
Custom exception classes for the IS application.
Contact : ole.moller.nielsen@gmail.com
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'marco@opengis.ch'
__revision__ = '$Format:%H$'
__date__ = '12/10/2014'
__copyright__ = ('Copyright 2012, Australia Indonesia Facility for '
'Disaster Reduction')
from datetime import datetime
from unittest import TestCase
from safe.metadata35.provenance import ProvenanceStep
class TestProvenanceStep(TestCase):
def test_step(self):
title = 'Calculated some random impact'
description = 'Calculated some random impact'
provenance_step = ProvenanceStep(title, description)
# check that we get the correct step message
self.assertIs(provenance_step.title, title)
# check that the timestamp is correct
delta_seconds = (datetime.now() - provenance_step.time).total_seconds()
self.assertLessEqual(delta_seconds, 0.1)
| gpl-3.0 | 7,097,099,656,059,109,000 | -3,753,911,353,644,489,000 | 30.268293 | 79 | 0.705148 | false |
nafex/pyload | module/plugins/hoster/OboomCom.py | 6 | 5029 | # -*- coding: utf-8 -*-
#
# Test links:
# https://www.oboom.com/B7CYZIEB/10Mio.dat
import re
from module.common.json_layer import json_loads
from module.plugins.internal.Hoster import Hoster
from module.plugins.captcha.ReCaptcha import ReCaptcha
class OboomCom(Hoster):
__name__ = "OboomCom"
__type__ = "hoster"
__version__ = "0.37"
__status__ = "testing"
__pattern__ = r'https?://(?:www\.)?oboom\.com/(?:#(?:id=|/)?)?(?P<ID>\w{8})'
__description__ = """Oboom.com hoster plugin"""
__license__ = "GPLv3"
__authors__ = [("stanley", "stanley.foerster@gmail.com")]
RECAPTCHA_KEY = "6LdqpO0SAAAAAJGHXo63HyalP7H4qlRs_vff0kJX"
def setup(self):
self.chunk_limit = 1
self.multiDL = self.resume_download = self.premium
def process(self, pyfile):
self.pyfile.url.replace(".com/#id=", ".com/#")
self.pyfile.url.replace(".com/#/", ".com/#")
self.html = self.load(pyfile.url)
self.get_file_id(self.pyfile.url)
self.get_session_token()
self.get_fileInfo(self.session_token, self.file_id)
self.pyfile.name = self.file_name
self.pyfile.size = self.file_size
if not self.premium:
self.solve_captcha()
self.get_download_ticket()
self.download("http://%s/1.0/dlh" % self.download_domain, get={'ticket': self.download_ticket, 'http_errors': 0})
def load_url(self, url, get=None):
if get is None:
get = {}
return json_loads(self.load(url, get))
def get_file_id(self, url):
self.file_id = re.match(OboomCom.__pattern__, url).group('ID')
def get_session_token(self):
if self.premium:
accountInfo = self.account.get_data(self.user, True)
if "session" in accountInfo:
self.session_token = accountInfo['session']
else:
self.fail(_("Could not retrieve premium session"))
else:
apiUrl = "http://www.oboom.com/1.0/guestsession"
result = self.load_url(apiUrl)
if result[0] == 200:
self.session_token = result[1]
else:
self.fail(_("Could not retrieve token for guest session. Error code: %s") % result[0])
def solve_captcha(self):
recaptcha = ReCaptcha(self)
for _i in xrange(5):
response, challenge = recaptcha.challenge(self.RECAPTCHA_KEY)
apiUrl = "http://www.oboom.com/1.0/download/ticket"
params = {'recaptcha_challenge_field': challenge,
'recaptcha_response_field': response,
'download_id': self.file_id,
'token': self.session_token}
result = self.load_url(apiUrl, params)
if result[0] == 200:
self.download_token = result[1]
self.download_auth = result[2]
self.captcha.correct()
self.wait(30)
break
elif result[0] == 400:
if result[1] == "incorrect-captcha-sol":
self.captcha.invalid()
elif result[1] == "captcha-timeout":
self.captcha.invalid()
elif result[1] == "forbidden":
self.retry(5, 15 * 60, _("Service unavailable"))
elif result[0] == 403:
if result[1] == -1: #: Another download is running
self.set_wait(15 * 60)
else:
self.set_wait(result[1], True)
self.wait()
self.retry(5)
else:
self.captcha.invalid()
self.fail(_("Received invalid captcha 5 times"))
def get_fileInfo(self, token, fileId):
apiUrl = "http://api.oboom.com/1.0/info"
params = {'token': token, 'items': fileId, 'http_errors': 0}
result = self.load_url(apiUrl, params)
if result[0] == 200:
item = result[1][0]
if item['state'] == "online":
self.file_size = item['size']
self.file_name = item['name']
else:
self.offline()
else:
self.fail(_("Could not retrieve file info. Error code %s: %s") % (result[0], result[1]))
def get_download_ticket(self):
apiUrl = "http://api.oboom.com/1/dl"
params = {'item': self.file_id, 'http_errors': 0}
if self.premium:
params['token'] = self.session_token
else:
params['token'] = self.download_token
params['auth'] = self.download_auth
result = self.load_url(apiUrl, params)
if result[0] == 200:
self.download_domain = result[1]
self.download_ticket = result[2]
elif result[0] == 421:
self.retry(wait_time=result[2] + 60, msg=_("Connection limit exceeded"))
else:
self.fail(_("Could not retrieve download ticket. Error code: %s") % result[0])
| gpl-3.0 | 61,965,872,000,214,696 | -6,808,268,617,498,315,000 | 33.445205 | 121 | 0.532909 | false |
fooelisa/ansible-modules-extras | system/locale_gen.py | 10 | 6423 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import os
import os.path
from subprocess import Popen, PIPE, call
import re
DOCUMENTATION = '''
---
module: locale_gen
short_description: Creates or removes locales.
description:
- Manages locales by editing /etc/locale.gen and invoking locale-gen.
version_added: "1.6"
options:
name:
description:
- Name and encoding of the locale, such as "en_GB.UTF-8".
required: true
default: null
aliases: []
state:
description:
- Whether the locale shall be present.
required: false
choices: ["present", "absent"]
default: "present"
'''
EXAMPLES = '''
# Ensure a locale exists.
- locale_gen: name=de_CH.UTF-8 state=present
'''
LOCALE_NORMALIZATION = {
".utf8": ".UTF-8",
".eucjp": ".EUC-JP",
}
# ===========================================
# location module specific support methods.
#
def is_available(name, ubuntuMode):
"""Check if the given locale is available on the system. This is done by
checking either :
* if the locale is present in /etc/locales.gen
* or if the locale is present in /usr/share/i18n/SUPPORTED"""
if ubuntuMode:
__regexp = '^(?P<locale>\S+_\S+) (?P<charset>\S+)\s*$'
__locales_available = '/usr/share/i18n/SUPPORTED'
else:
__regexp = '^#{0,1}\s*(?P<locale>\S+_\S+) (?P<charset>\S+)\s*$'
__locales_available = '/etc/locale.gen'
re_compiled = re.compile(__regexp)
with open(__locales_available, 'r') as fd:
for line in fd:
result = re_compiled.match(line)
if result and result.group('locale') == name:
return True
return False
def is_present(name):
"""Checks if the given locale is currently installed."""
output = Popen(["locale", "-a"], stdout=PIPE).communicate()[0]
return any(fix_case(name) == fix_case(line) for line in output.splitlines())
def fix_case(name):
"""locale -a might return the encoding in either lower or upper case.
Passing through this function makes them uniform for comparisons."""
for s, r in LOCALE_NORMALIZATION.iteritems():
name = name.replace(s, r)
return name
def replace_line(existing_line, new_line):
"""Replaces lines in /etc/locale.gen"""
with open("/etc/locale.gen", "r") as f:
lines = [line.replace(existing_line, new_line) for line in f]
with open("/etc/locale.gen", "w") as f:
f.write("".join(lines))
def set_locale(name, enabled=True):
""" Sets the state of the locale. Defaults to enabled. """
search_string = '#{0,1}\s*%s (?P<charset>.+)' % name
if enabled:
new_string = '%s \g<charset>' % (name)
else:
new_string = '# %s \g<charset>' % (name)
with open("/etc/locale.gen", "r") as f:
lines = [re.sub(search_string, new_string, line) for line in f]
with open("/etc/locale.gen", "w") as f:
f.write("".join(lines))
def apply_change(targetState, name):
"""Create or remove locale.
Keyword arguments:
targetState -- Desired state, either present or absent.
name -- Name including encoding such as de_CH.UTF-8.
"""
if targetState=="present":
# Create locale.
set_locale(name, enabled=True)
else:
# Delete locale.
set_locale(name, enabled=False)
localeGenExitValue = call("locale-gen")
if localeGenExitValue!=0:
raise EnvironmentError(localeGenExitValue, "locale.gen failed to execute, it returned "+str(localeGenExitValue))
def apply_change_ubuntu(targetState, name):
"""Create or remove locale.
Keyword arguments:
targetState -- Desired state, either present or absent.
name -- Name including encoding such as de_CH.UTF-8.
"""
if targetState=="present":
# Create locale.
# Ubuntu's patched locale-gen automatically adds the new locale to /var/lib/locales/supported.d/local
localeGenExitValue = call(["locale-gen", name])
else:
# Delete locale involves discarding the locale from /var/lib/locales/supported.d/local and regenerating all locales.
with open("/var/lib/locales/supported.d/local", "r") as f:
content = f.readlines()
with open("/var/lib/locales/supported.d/local", "w") as f:
for line in content:
locale, charset = line.split(' ')
if locale != name:
f.write(line)
# Purge locales and regenerate.
# Please provide a patch if you know how to avoid regenerating the locales to keep!
localeGenExitValue = call(["locale-gen", "--purge"])
if localeGenExitValue!=0:
raise EnvironmentError(localeGenExitValue, "locale.gen failed to execute, it returned "+str(localeGenExitValue))
# ==============================================================
# main
def main():
module = AnsibleModule(
argument_spec = dict(
name = dict(required=True),
state = dict(choices=['present','absent'], default='present'),
),
supports_check_mode=True
)
name = module.params['name']
state = module.params['state']
if not os.path.exists("/etc/locale.gen"):
if os.path.exists("/var/lib/locales/supported.d/local"):
# Ubuntu created its own system to manage locales.
ubuntuMode = True
else:
module.fail_json(msg="/etc/locale.gen and /var/lib/locales/supported.d/local are missing. Is the package \"locales\" installed?")
else:
# We found the common way to manage locales.
ubuntuMode = False
if not is_available(name, ubuntuMode):
module.fail_json(msg="The locales you've entered is not available "
"on your system.")
prev_state = "present" if is_present(name) else "absent"
changed = (prev_state!=state)
if module.check_mode:
module.exit_json(changed=changed)
else:
if changed:
try:
if ubuntuMode==False:
apply_change(state, name)
else:
apply_change_ubuntu(state, name)
except EnvironmentError as e:
module.fail_json(msg=e.strerror, exitValue=e.errno)
module.exit_json(name=name, changed=changed, msg="OK")
# import module snippets
from ansible.module_utils.basic import *
main()
| gpl-3.0 | 7,610,295,117,167,695,000 | 9,075,151,274,496,178,000 | 32.628272 | 141 | 0.601432 | false |
AymArbyn/get-emeralds | www/vendor/library50-php-5/CS50/share/php-openid-2.3.0/admin/gettlds.py | 126 | 1061 | """
Fetch the current TLD list from the IANA Web site, parse it, and print
an expression suitable for direct insertion into each library's trust
root validation module
Usage:
python gettlds.py (php|python|ruby)
Then cut-n-paste.
"""
import urllib2
import sys
langs = {
'php': (r"'/\.(",
"'", "|", "|' .",
r")\.?$/'"),
'python': ("['",
"'", "', '", "',",
"']"),
'ruby': ("%w'",
"", " ", "",
"'"),
}
lang = sys.argv[1]
prefix, line_prefix, separator, line_suffix, suffix = langs[lang]
f = urllib2.urlopen('http://data.iana.org/TLD/tlds-alpha-by-domain.txt')
tlds = []
output_line = ""
for input_line in f:
if input_line.startswith('#'):
continue
tld = input_line.strip().lower()
new_output_line = output_line + prefix + tld
if len(new_output_line) > 60:
print output_line + line_suffix
output_line = line_prefix + tld
else:
output_line = new_output_line
prefix = separator
print output_line + suffix
| gpl-3.0 | -1,697,573,697,778,030,800 | 3,022,823,965,923,892,000 | 21.574468 | 72 | 0.547597 | false |
tensorflow/models | research/object_detection/dataset_tools/tf_record_creation_util.py | 2 | 1718 | # Lint as: python2, python3
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Utilities for creating TFRecords of TF examples for the Open Images dataset.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six.moves import range
import tensorflow.compat.v1 as tf
def open_sharded_output_tfrecords(exit_stack, base_path, num_shards):
"""Opens all TFRecord shards for writing and adds them to an exit stack.
Args:
exit_stack: A context2.ExitStack used to automatically closed the TFRecords
opened in this function.
base_path: The base path for all shards
num_shards: The number of shards
Returns:
The list of opened TFRecords. Position k in the list corresponds to shard k.
"""
tf_record_output_filenames = [
'{}-{:05d}-of-{:05d}'.format(base_path, idx, num_shards)
for idx in range(num_shards)
]
tfrecords = [
exit_stack.enter_context(tf.python_io.TFRecordWriter(file_name))
for file_name in tf_record_output_filenames
]
return tfrecords
| apache-2.0 | 5,851,185,608,047,182,000 | 1,911,871,847,130,013,200 | 34.791667 | 80 | 0.699069 | false |
titiushko/readthedocs.org | readthedocs/rtd_tests/mocks/environment.py | 30 | 2826 | import mock
from readthedocs.doc_builder.environments import BuildEnvironment
class EnvironmentMockGroup(object):
'''Mock out necessary environment pieces'''
def __init__(self):
self.patches = {
'popen': mock.patch('subprocess.Popen'),
'process': mock.Mock(),
'api': mock.patch('slumber.Resource'),
'api_versions': mock.patch(
'readthedocs.projects.models.Project.api_versions'),
'non_blocking_lock': mock.patch(
'readthedocs.vcs_support.utils.NonBlockingLock.__enter__'),
'append_conf': mock.patch(
'readthedocs.doc_builder.backends.sphinx.BaseSphinx.append_conf'),
'move': mock.patch(
'readthedocs.doc_builder.backends.sphinx.BaseSphinx.move'),
'conf_dir': mock.patch(
'readthedocs.projects.models.Project.conf_dir'),
'html_build': mock.patch(
'readthedocs.doc_builder.backends.sphinx.HtmlBuilder.build'),
'html_move': mock.patch(
'readthedocs.doc_builder.backends.sphinx.HtmlBuilder.move'),
'pdf_build': mock.patch(
'readthedocs.doc_builder.backends.sphinx.PdfBuilder.build'),
'pdf_move': mock.patch(
'readthedocs.doc_builder.backends.sphinx.PdfBuilder.move'),
'epub_build': mock.patch(
'readthedocs.doc_builder.backends.sphinx.EpubBuilder.build'),
'epub_move': mock.patch(
'readthedocs.doc_builder.backends.sphinx.EpubBuilder.move'),
'glob': mock.patch('readthedocs.doc_builder.backends.sphinx.glob'),
'docker': mock.patch('readthedocs.doc_builder.environments.Client'),
'docker_client': mock.Mock(),
}
self.mocks = {}
def start(self):
'''Create a patch object for class patches'''
for patch in self.patches:
self.mocks[patch] = self.patches[patch].start()
self.mocks['process'].communicate.return_value = ('', '')
self.mocks['process'].returncode = 0
self.mocks['popen'].return_value = self.mocks['process']
self.mocks['docker'].return_value = self.mocks['docker_client']
self.mocks['glob'].return_value = ['/tmp/rtd/foo.tex']
self.mocks['conf_dir'].return_value = '/tmp/rtd'
def stop(self):
for patch in self.patches:
try:
self.patches[patch].stop()
except RuntimeError:
pass
def configure_mock(self, mock, kwargs):
'''Configure object mocks'''
self.mocks[mock].configure_mock(**kwargs)
def __getattr__(self, name):
try:
return self.mocks[name]
except KeyError:
raise AttributeError()
| mit | -923,468,614,410,776,800 | -4,491,342,586,045,047,000 | 38.802817 | 82 | 0.581741 | false |
Mashape/dd-agent | tests/checks/mock/test_directory.py | 35 | 5721 | # stdlib
from itertools import product
import os
import shutil
import tempfile
# project
from tests.checks.common import AgentCheckTest
class DirectoryTestCase(AgentCheckTest):
CHECK_NAME = 'directory'
FILE_METRICS = [
"system.disk.directory.file.bytes",
"system.disk.directory.file.modified_sec_ago",
"system.disk.directory.file.created_sec_ago"
]
HISTOGRAM_SUFFIXES = ['count', '95percentile', 'max', 'median', 'avg']
DIRECTORY_METRICS = [i1 + "." + i2 for i1, i2 in product([
"system.disk.directory.file.bytes",
"system.disk.directory.file.modified_sec_ago",
"system.disk.directory.file.created_sec_ago"
], HISTOGRAM_SUFFIXES)]
COMMON_METRICS = [
"system.disk.directory.files",
"system.disk.directory.bytes"
]
@staticmethod
def get_config_stubs(dir_name, filegauges=False):
"""
Helper to generate configs from a directory name
"""
return [
{
'directory': dir_name,
'filegauges': filegauges
}, {
'directory': dir_name,
'name': "my_beloved_directory",
'filegauges': filegauges
}, {
'directory': dir_name,
'dirtagname': "directory_custom_tagname",
'filegauges': filegauges
}, {
'directory': dir_name,
'filetagname': "file_custom_tagname",
'filegauges': filegauges
}, {
'directory': dir_name,
'dirtagname': "recursive_check",
'recursive': True,
'filegauges': filegauges
}, {
'directory': dir_name,
'dirtagname': "pattern_check",
'pattern': "*.log",
'filegauges': filegauges
}
]
def setUp(self):
"""
Generate a directory with a file structure for tests
"""
self.temp_dir = tempfile.mkdtemp()
# Create 10 files
for i in xrange(0, 10):
open(self.temp_dir + "/file_" + str(i), 'a').close()
# Add 2 '.log' files
open(self.temp_dir + "/log_1.log", 'a').close()
open(self.temp_dir + "/log_2.log", 'a').close()
# Create a subfolder and generate files into it
os.makedirs(str(self.temp_dir) + "/subfolder")
# Create 5 subfiles
for i in xrange(0, 5):
open(self.temp_dir + "/subfolder" + '/file_' + str(i), 'a').close()
def tearDown(self):
shutil.rmtree(self.temp_dir)
def test_directory_metrics(self):
"""
Directory metric coverage
"""
config_stubs = self.get_config_stubs(self.temp_dir)
config = {
'instances': config_stubs
}
self.run_check(config)
for config in config_stubs:
dirtagname = config.get('dirtagname', "name")
name = config.get('name', self.temp_dir)
dir_tags = [dirtagname + ":%s" % name]
# Directory metrics
for mname in (self.DIRECTORY_METRICS + self.COMMON_METRICS):
self.assertMetric(mname, tags=dir_tags, count=1)
# 'recursive' and 'pattern' parameters
if config.get('pattern'):
# 2 '*.log' files in 'temp_dir'
self.assertMetric("system.disk.directory.files", tags=dir_tags, count=1, value=2)
elif config.get('recursive'):
# 12 files in 'temp_dir' + 5 files in 'tempdir/subfolder'
self.assertMetric("system.disk.directory.files", tags=dir_tags, count=1, value=17)
else:
# 12 files in 'temp_dir'
self.assertMetric("system.disk.directory.files", tags=dir_tags, count=1, value=12)
# Raises when COVERAGE=true and coverage < 100%
self.coverage_report()
def test_file_metrics(self):
"""
File metric coverage
"""
config_stubs = self.get_config_stubs(self.temp_dir, filegauges=True)
config = {
'instances': config_stubs
}
self.run_check(config)
for config in config_stubs:
dirtagname = config.get('dirtagname', "name")
name = config.get('name', self.temp_dir)
filetagname = config.get('filetagname', "filename")
dir_tags = [dirtagname + ":%s" % name]
# File metrics
for mname in self.FILE_METRICS:
# 2 '*.log' files in 'temp_dir'
for i in xrange(1, 3):
file_tag = [filetagname + ":%s" % self.temp_dir + "/log_" + str(i) + ".log"]
self.assertMetric(mname, tags=dir_tags + file_tag, count=1)
if not config.get('pattern'):
# Files in 'temp_dir'
for i in xrange(0, 10):
file_tag = [filetagname + ":%s" % self.temp_dir + "/file_" + str(i)]
self.assertMetric(mname, tags=dir_tags + file_tag, count=1)
# Files in 'temp_dir/subfolder'
if config.get('recursive'):
for i in xrange(0, 5):
file_tag = [filetagname + ":%s" % self.temp_dir + "/subfolder" + "/file_" + str(i)]
self.assertMetric(mname, tags=dir_tags + file_tag, count=1)
# Common metrics
for mname in self.COMMON_METRICS:
self.assertMetric(mname, tags=dir_tags, count=1)
# Raises when COVERAGE=true and coverage < 100%
self.coverage_report()
| bsd-3-clause | -121,879,285,920,955,890 | -396,048,653,638,210,200 | 33.053571 | 111 | 0.51914 | false |
doutib/lobpredict | lobpredictrst/execute_model.py | 1 | 5878 | import sys
import imp
import yaml
import csv
import pandas as pd
import re
from rf import *
from svm import *
modl = imp.load_source('read_model_yaml', 'read_model_yaml.py')
# Parse the YAML file location as the first parameter
inp_yaml = sys.argv[1]
def write_results_txt(filename, result):
"""
Write results into csv file.
Parameters
----------
filename : string
filename to output the result
labels : list
labels for the results, i.e. names of parameters and metrics
"""
with open(filename, "w") as fp:
for item in result:
fp.write("%s\n\n" % item)
def execute_model(inp_yaml):
"""Apply trees in the forest to X, return leaf indices.
Parameters
----------
inp_yaml : A yaml file with model specifications
Returns
-------
parameters_dict : A python dictionary with the model specifications
to be used to encode metadata for the model
and pass into specific model functions e.g. random
forest
"""
# Read in and parse all parameters from the YAML file
yaml_params = modl.read_model_yaml(inp_yaml)
# Define output file name based on input
folder_name = re.split("/", inp_yaml)[2]
file_name = re.split("/", inp_yaml)[3][:-5]
output_txt_file = 'data/output/' + folder_name + '/' + file_name + '.txt'
#-------------------------------------------------
# Create Train and Test Datasets
#-------------------------------------------------
data_source_dir = yaml_params["data_source_dir"]
test_type = yaml_params["test_type"]
print('data source dir is: %s' % (data_source_dir))
print('test type is: %s' % (test_type))
if test_type == "test":
train_ds_name = "train.tar.gz"
test_ds_name = "test.tar.gz"
elif test_type == "validation":
train_ds_name = "train_test.tar.gz"
test_ds_name = "validation.tar.gz"
else:
train_ds_name = "train_test_validation.tar.gz"
test_ds_name = "strategy_validation.tar.gz"
train_ds_ref = "data/output/model_clean_data/" + data_source_dir + "/" + train_ds_name
test_ds_ref = "data/output/model_clean_data/" + data_source_dir + "/" + test_ds_name
print('training dataset is: %s' % (train_ds_ref))
print('test dataset is: %s' % (test_ds_ref))
# Open test and train sets
df_train = pd.read_csv(train_ds_ref
, compression='gzip', index_col = None)
df_test = pd.read_csv(test_ds_ref
, compression='gzip', index_col = None)
# Drop the first columns - they are not useful
df_train_clean = df_train.iloc[:,1:]
df_test_clean = df_test.iloc[:,1:]
# Traning data column names - used for variale importance
X_train_cols = list(df_train_clean.drop(['labels', 'index', 'Time'], axis=1).columns.values)
# Define test/training set
X_train = np.array(df_train_clean.drop(['labels', 'index', 'Time'], axis = 1))
Y_train = np.array(df_train_clean[['labels']])[:,0]
X_test = np.array(df_test_clean.drop(['labels', 'index', 'Time'], axis = 1))
Y_test = np.array(df_test_clean[['labels']])[:,0]
#-------------------------------------------------
# Run RF (RANDOM FOREST)
#-------------------------------------------------
if yaml_params["model_type"] == "RF":
# Extract the RF model variables from the YAML file
n_estimators = yaml_params["parameters"]["n_estimators"]
criterion = yaml_params["parameters"]["criterion"]
max_features = yaml_params["parameters"]["max_features"]
max_depth = yaml_params["parameters"]["max_depth"]
n_jobs = yaml_params["parameters"]["n_jobs"]
print('number of trees is: %d' % (n_estimators))
print('max depth is: %d' % (max_depth))
print("running RF WITHOUT simulation...")
# Run simulation
result = rf(X_train_cols = X_train_cols
, X_train = X_train
, Y_train = Y_train
, X_test = X_test
, Y_test = Y_test
, n_estimators = n_estimators
, criterion = criterion
, max_features = max_features
, max_depth = max_depth)
print("finished - rf without simulation")
# Write into text file
write_results_txt(output_txt_file, result)
#-------------------------------------------------
# Run SVM (SUPPORT VECTOR MACHINE)
#-------------------------------------------------
# Extract the SVM model variables from the YAML file
if yaml_params["model_type"] == "SVM":
kernel = yaml_params["parameters"]["kernel"]
degree = yaml_params["parameters"]["degree"]
gamma = yaml_params["parameters"]["gamma"]
tol = yaml_params["parameters"]["tol"]
C = yaml_params["parameters"]["C"]
print('The value of C is: %.2f' % (C))
print("running SVM WITHOUT simulation...")
# Run a single simulation
result = svm(X_train = X_train
, Y_train = Y_train
, X_test = X_test
, Y_test = Y_test
, kernel = kernel
, C = C
, degree = degree
, gamma = gamma
, tol = tol
, decision_function_shape='ovr')
# Write into text file
write_results_txt(output_txt_file, result)
print("finished - SVM without simulation")
# Run the execute model code
execute_model(inp_yaml)
| isc | -3,629,095,482,327,561,700 | -5,139,915,307,065,867,000 | 34.409639 | 98 | 0.514631 | false |
leiferikb/bitpop | src/tools/telemetry/telemetry/core/platform/power_monitor/android_ds2784_power_monitor.py | 1 | 4983 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import os
from telemetry import decorators
from telemetry.core.platform.profiler import android_prebuilt_profiler_helper
import telemetry.core.platform.power_monitor as power_monitor
SAMPLE_RATE_HZ = 2 # The data is collected from the ds2784 fuel gauge chip
# that only updates its data every 3.5s.
FUEL_GAUGE_PATH = '/sys/class/power_supply/ds2784-fuelgauge'
CHARGE_COUNTER = os.path.join(FUEL_GAUGE_PATH, 'charge_counter_ext')
CURRENT = os.path.join(FUEL_GAUGE_PATH, 'current_now')
VOLTAGE = os.path.join(FUEL_GAUGE_PATH, 'voltage_now')
class DS2784PowerMonitor(power_monitor.PowerMonitor):
def __init__(self, device):
super(DS2784PowerMonitor, self).__init__()
self._device = device
self._powermonitor_process_port = None
android_prebuilt_profiler_helper.InstallOnDevice(device, 'file_poller')
self._file_poller_binary = android_prebuilt_profiler_helper.GetDevicePath(
'file_poller')
@decorators.Cache
def _HasFuelGauge(self):
return self._device.old_interface.FileExistsOnDevice(CHARGE_COUNTER)
def CanMonitorPower(self):
if not self._HasFuelGauge():
return False
if self._device.old_interface.IsDeviceCharging():
logging.warning('Can\'t monitor power usage since device is charging.')
return False
return True
def StartMonitoringPower(self, browser):
assert not self._powermonitor_process_port, (
'Must call StopMonitoringPower().')
self._powermonitor_process_port = int(
self._device.old_interface.RunShellCommand(
'%s %d %s %s %s' % (self._file_poller_binary, SAMPLE_RATE_HZ,
CHARGE_COUNTER, CURRENT, VOLTAGE))[0])
def StopMonitoringPower(self):
assert self._powermonitor_process_port, (
'StartMonitoringPower() not called.')
try:
result = '\n'.join(self._device.old_interface.RunShellCommand(
'%s %d' % (self._file_poller_binary,
self._powermonitor_process_port)))
assert result, 'PowerMonitor produced no output'
return DS2784PowerMonitor.ParseSamplingOutput(result)
finally:
self._powermonitor_process_port = None
@staticmethod
def ParseSamplingOutput(powermonitor_output):
"""Parse output of powermonitor command line utility.
Returns:
Dictionary in the format returned by StopMonitoringPower().
"""
power_samples = []
total_energy_consumption_mwh = 0
def ParseSample(sample):
values = [float(x) for x in sample.split(' ')]
res = {}
(res['timestamp_s'],
res['charge_nah'],
res['current_ua'],
res['voltage_uv']) = values
return res
# The output contains a sample per line.
samples = map(ParseSample, powermonitor_output.split('\n')[:-1])
# Keep track of the last sample that found an updated reading.
last_updated_sample = samples[0]
# Compute average voltage.
voltage_sum_uv = 0
voltage_count = 0
for sample in samples:
if sample['charge_nah'] != last_updated_sample['charge_nah']:
charge_difference_nah = (sample['charge_nah'] -
last_updated_sample['charge_nah'])
# Use average voltage for the energy consumption.
voltage_sum_uv += sample['voltage_uv']
voltage_count += 1
average_voltage_uv = voltage_sum_uv / voltage_count
total_energy_consumption_mwh += (-charge_difference_nah *
average_voltage_uv / 10 ** 12)
last_updated_sample = sample
voltage_sum_uv = 0
voltage_count = 0
# Update average voltage.
voltage_sum_uv += sample['voltage_uv']
voltage_count += 1
# Compute energy of the sample.
energy_consumption_mw = (-sample['current_ua'] * sample['voltage_uv'] /
10 ** 9)
power_samples.append(energy_consumption_mw)
# Because the data is stalled for a few seconds, compute the remaining
# energy consumption using the last available current reading.
last_sample = samples[-1]
remaining_time_h = (
last_sample['timestamp_s'] - last_updated_sample['timestamp_s']) / 3600
average_voltage_uv = voltage_sum_uv / voltage_count
remaining_energy_consumption_mwh = (-last_updated_sample['current_ua'] *
average_voltage_uv *
remaining_time_h / 10 ** 9)
total_energy_consumption_mwh += remaining_energy_consumption_mwh
# -------- Collect and Process Data -------------
out_dict = {}
# Raw power usage samples.
out_dict['identifier'] = 'ds2784'
out_dict['power_samples_mw'] = power_samples
out_dict['energy_consumption_mwh'] = total_energy_consumption_mwh
return out_dict
| gpl-3.0 | 6,287,089,671,476,015,000 | 6,458,179,228,367,950,000 | 38.23622 | 79 | 0.645595 | false |
liavkoren/djangoDev | tests/admin_widgets/widgetadmin.py | 68 | 1222 | from django.contrib import admin
from . import models
class WidgetAdmin(admin.AdminSite):
pass
class CarAdmin(admin.ModelAdmin):
list_display = ['make', 'model', 'owner']
list_editable = ['owner']
class CarTireAdmin(admin.ModelAdmin):
def formfield_for_foreignkey(self, db_field, request, **kwargs):
if db_field.name == "car":
kwargs["queryset"] = models.Car.objects.filter(owner=request.user)
return db_field.formfield(**kwargs)
return super(CarTireAdmin, self).formfield_for_foreignkey(db_field, request, **kwargs)
class EventAdmin(admin.ModelAdmin):
raw_id_fields = ['main_band', 'supporting_bands']
class SchoolAdmin(admin.ModelAdmin):
filter_vertical = ('students',)
filter_horizontal = ('alumni',)
site = WidgetAdmin(name='widget-admin')
site.register(models.User)
site.register(models.Car, CarAdmin)
site.register(models.CarTire, CarTireAdmin)
site.register(models.Member)
site.register(models.Band)
site.register(models.Event, EventAdmin)
site.register(models.Album)
site.register(models.Inventory)
site.register(models.Bee)
site.register(models.Advisor)
site.register(models.School, SchoolAdmin)
site.register(models.Profile)
| bsd-3-clause | -2,161,742,824,772,420,600 | -5,676,291,459,606,736,000 | 23.44 | 94 | 0.725041 | false |
reinbach/finance | api/finance/views/account.py | 1 | 3764 | import json
from flask import abort, jsonify, request, Response
from flask.views import MethodView
import config
from finance import utils, db
from finance.forms.account import AccountForm
from finance.models.account import Account
from finance.stats import STATS
class AccountAPI(MethodView):
"""Account Views"""
decorators = [
utils.requires_auth,
utils.crossdomain(
origin='*',
headers=config.HEADERS_ALLOWED
),
]
def get(self, account_id):
if account_id is None:
with STATS.all_accounts.time():
# return a list of accounts
res = [acct.jsonify() for acct in Account.query.all()]
STATS.success += 1
return Response(json.dumps(res), mimetype='application/json')
else:
with STATS.get_account.time():
# expose a single account
acct = Account.query.get(account_id)
if acct is None:
STATS.notfound += 1
return abort(404)
STATS.success += 1
return jsonify(acct.jsonify())
def post(self):
with STATS.add_account.time():
# create a new account
form = AccountForm(request.data)
if form.validate():
acct = Account(
form.name.data,
form.acct_type,
form.description.data
)
db.session.add(acct)
db.session.commit()
STATS.success += 1
return jsonify({
'message': 'Successfully added Account',
'account_id': acct.account_id
})
STATS.validation += 1
resp = jsonify({"errors": form.errors})
resp.status_code = 400
return resp
def delete(self, account_id):
with STATS.delete_account.time():
# delete a single account
acct = Account.query.get(account_id)
if acct is None:
STATS.notfound += 1
return abort(404)
db.session.delete(acct)
db.session.commit()
STATS.success += 1
return jsonify({"message": "Successfully deleted Account"})
def put(self, account_id):
with STATS.update_account.time():
# update a single account
acct = Account.query.get(account_id)
if acct is None:
STATS.notfound += 1
return abort(404)
form = AccountForm(request.data)
if form.validate():
acct = Account.query.get(account_id)
acct.name = form.name.data
acct.account_type_id = form.account_type_id.data
acct.description = form.description.data
db.session.add(acct)
db.session.commit()
STATS.success += 1
return jsonify({
'message': 'Successfully updated Account'
})
STATS.validation += 1
resp = jsonify({'errors': form.errors})
resp.status_code = 400
return resp
@utils.requires_auth
@utils.crossdomain(origin='*', headers=config.HEADERS_ALLOWED)
def transactions(account_id):
"""Get transactions for account"""
with STATS.get_account_transactions.time():
acct = Account.query.get(account_id)
if acct is None:
STATS.notfound += 1
return abort(404)
res = [trx.jsonify() for trx in acct.transactions()]
STATS.success += 1
return Response(json.dumps(res), mimetype='application/json')
| bsd-3-clause | -4,936,414,759,550,588,000 | -3,557,276,142,265,882,000 | 32.607143 | 77 | 0.529224 | false |
dvliman/jaikuengine | .google_appengine/lib/django-1.4/django/core/context_processors.py | 80 | 3327 | """
A set of request processors that return dictionaries to be merged into a
template context. Each function takes the request object as its only parameter
and returns a dictionary to add to the context.
These are referenced from the setting TEMPLATE_CONTEXT_PROCESSORS and used by
RequestContext.
"""
from django.conf import settings
from django.middleware.csrf import get_token
from django.utils.functional import lazy
def csrf(request):
"""
Context processor that provides a CSRF token, or the string 'NOTPROVIDED' if
it has not been provided by either a view decorator or the middleware
"""
def _get_val():
token = get_token(request)
if token is None:
# In order to be able to provide debugging info in the
# case of misconfiguration, we use a sentinel value
# instead of returning an empty dict.
return 'NOTPROVIDED'
else:
return token
_get_val = lazy(_get_val, str)
return {'csrf_token': _get_val() }
def debug(request):
"Returns context variables helpful for debugging."
context_extras = {}
if settings.DEBUG and request.META.get('REMOTE_ADDR') in settings.INTERNAL_IPS:
context_extras['debug'] = True
from django.db import connection
context_extras['sql_queries'] = connection.queries
return context_extras
def i18n(request):
from django.utils import translation
context_extras = {}
context_extras['LANGUAGES'] = settings.LANGUAGES
context_extras['LANGUAGE_CODE'] = translation.get_language()
context_extras['LANGUAGE_BIDI'] = translation.get_language_bidi()
return context_extras
def tz(request):
from django.utils import timezone
return {'TIME_ZONE': timezone.get_current_timezone_name()}
def static(request):
"""
Adds static-related context variables to the context.
"""
return {'STATIC_URL': settings.STATIC_URL}
def media(request):
"""
Adds media-related context variables to the context.
"""
return {'MEDIA_URL': settings.MEDIA_URL}
def request(request):
return {'request': request}
# PermWrapper and PermLookupDict proxy the permissions system into objects that
# the template system can understand. They once lived here -- they have
# been moved to django.contrib.auth.context_processors.
from django.contrib.auth.context_processors import PermLookupDict as RealPermLookupDict
from django.contrib.auth.context_processors import PermWrapper as RealPermWrapper
class PermLookupDict(RealPermLookupDict):
def __init__(self, *args, **kwargs):
import warnings
warnings.warn(
"`django.core.context_processors.PermLookupDict` is " \
"deprecated; use `django.contrib.auth.context_processors.PermLookupDict` " \
"instead.",
DeprecationWarning
)
super(PermLookupDict, self).__init__(*args, **kwargs)
class PermWrapper(RealPermWrapper):
def __init__(self, *args, **kwargs):
import warnings
warnings.warn(
"`django.core.context_processors.PermWrapper` is " \
"deprecated; use `django.contrib.auth.context_processors.PermWrapper` " \
"instead.",
DeprecationWarning
)
super(PermWrapper, self).__init__(*args, **kwargs)
| apache-2.0 | 6,237,390,796,637,918,000 | -8,978,665,643,283,062,000 | 32.27 | 88 | 0.681695 | false |
uglyboxer/linear_neuron | net-p3/lib/python3.5/site-packages/scipy/spatial/__init__.py | 23 | 2970 | """
=============================================================
Spatial algorithms and data structures (:mod:`scipy.spatial`)
=============================================================
.. currentmodule:: scipy.spatial
Nearest-neighbor Queries
========================
.. autosummary::
:toctree: generated/
KDTree -- class for efficient nearest-neighbor queries
cKDTree -- class for efficient nearest-neighbor queries (faster impl.)
distance -- module containing many different distance measures
Delaunay Triangulation, Convex Hulls and Voronoi Diagrams
=========================================================
.. autosummary::
:toctree: generated/
Delaunay -- compute Delaunay triangulation of input points
ConvexHull -- compute a convex hull for input points
Voronoi -- compute a Voronoi diagram hull from input points
Plotting Helpers
================
.. autosummary::
:toctree: generated/
delaunay_plot_2d -- plot 2-D triangulation
convex_hull_plot_2d -- plot 2-D convex hull
voronoi_plot_2d -- plot 2-D voronoi diagram
.. seealso:: :ref:`Tutorial <qhulltutorial>`
Simplex representation
======================
The simplices (triangles, tetrahedra, ...) appearing in the Delaunay
tesselation (N-dim simplices), convex hull facets, and Voronoi ridges
(N-1 dim simplices) are represented in the following scheme::
tess = Delaunay(points)
hull = ConvexHull(points)
voro = Voronoi(points)
# coordinates of the j-th vertex of the i-th simplex
tess.points[tess.simplices[i, j], :] # tesselation element
hull.points[hull.simplices[i, j], :] # convex hull facet
voro.vertices[voro.ridge_vertices[i, j], :] # ridge between Voronoi cells
For Delaunay triangulations and convex hulls, the neighborhood
structure of the simplices satisfies the condition:
``tess.neighbors[i,j]`` is the neighboring simplex of the i-th
simplex, opposite to the j-vertex. It is -1 in case of no
neighbor.
Convex hull facets also define a hyperplane equation::
(hull.equations[i,:-1] * coord).sum() + hull.equations[i,-1] == 0
Similar hyperplane equations for the Delaunay triangulation correspond
to the convex hull facets on the corresponding N+1 dimensional
paraboloid.
The Delaunay triangulation objects offer a method for locating the
simplex containing a given point, and barycentric coordinate
computations.
Functions
---------
.. autosummary::
:toctree: generated/
tsearch
distance_matrix
minkowski_distance
minkowski_distance_p
procrustes
"""
from __future__ import division, print_function, absolute_import
from .kdtree import *
from .ckdtree import *
from .qhull import *
from ._plotutils import *
from ._procrustes import procrustes
__all__ = [s for s in dir() if not s.startswith('_')]
__all__ += ['distance']
from . import distance
from numpy.testing import Tester
test = Tester().test
bench = Tester().bench
| mit | -4,572,731,162,605,192,700 | 4,583,612,227,639,177,700 | 28.117647 | 77 | 0.661616 | false |
FRESNA/vresutils | test/test_transfer.py | 1 | 1167 | from __future__ import absolute_import
import pytest
import numpy as np
from numpy.testing import assert_allclose
import scipy.sparse as sparse
from vresutils import transfer
@pytest.fixture
def orig():
return np.array(((0., 0.), (1., 1.)))
@pytest.fixture
def dest():
return np.array(((1.5, 1.), (0., 0.5), (1.5, 0.)))
class TestPoints2Points:
def test_not_surjective(self, orig, dest):
spmatrix = transfer.Points2Points(orig, dest)
assert sparse.issparse(spmatrix)
assert spmatrix.shape == (len(dest), len(orig))
matrix = spmatrix.todense()
assert_allclose(matrix, np.array(((0., 1.),
(1., 0.),
(0., 0.))))
def test_surjective(self, orig, dest):
spmatrix = transfer.Points2Points(orig, dest, surjective=True)
assert sparse.issparse(spmatrix)
assert spmatrix.shape == (len(dest), len(orig))
matrix = spmatrix.todense()
assert_allclose(matrix, np.array(((0., 2./3.),
(1., 0.),
(0., 1./3.))))
| gpl-3.0 | -7,639,589,510,940,111,000 | -6,936,948,066,102,871,000 | 28.175 | 70 | 0.537275 | false |
harmy/kbengine | kbe/res/scripts/common/Lib/test/test_dbm_dumb.py | 3 | 6028 | #! /usr/bin/env python3
"""Test script for the dumbdbm module
Original by Roger E. Masse
"""
import io
import os
import unittest
import dbm.dumb as dumbdbm
from test import support
_fname = support.TESTFN
def _delete_files():
for ext in [".dir", ".dat", ".bak"]:
try:
os.unlink(_fname + ext)
except OSError:
pass
class DumbDBMTestCase(unittest.TestCase):
_dict = {b'0': b'',
b'a': b'Python:',
b'b': b'Programming',
b'c': b'the',
b'd': b'way',
b'f': b'Guido',
b'g': b'intended',
'\u00fc'.encode('utf-8') : b'!',
}
def __init__(self, *args):
unittest.TestCase.__init__(self, *args)
def test_dumbdbm_creation(self):
f = dumbdbm.open(_fname, 'c')
self.assertEqual(list(f.keys()), [])
for key in self._dict:
f[key] = self._dict[key]
self.read_helper(f)
f.close()
def test_dumbdbm_creation_mode(self):
# On platforms without chmod, don't do anything.
if not (hasattr(os, 'chmod') and hasattr(os, 'umask')):
return
try:
old_umask = os.umask(0o002)
f = dumbdbm.open(_fname, 'c', 0o637)
f.close()
finally:
os.umask(old_umask)
expected_mode = 0o635
if os.name != 'posix':
# Windows only supports setting the read-only attribute.
# This shouldn't fail, but doesn't work like Unix either.
expected_mode = 0o666
import stat
st = os.stat(_fname + '.dat')
self.assertEqual(stat.S_IMODE(st.st_mode), expected_mode)
st = os.stat(_fname + '.dir')
self.assertEqual(stat.S_IMODE(st.st_mode), expected_mode)
def test_close_twice(self):
f = dumbdbm.open(_fname)
f[b'a'] = b'b'
self.assertEqual(f[b'a'], b'b')
f.close()
f.close()
def test_dumbdbm_modification(self):
self.init_db()
f = dumbdbm.open(_fname, 'w')
self._dict[b'g'] = f[b'g'] = b"indented"
self.read_helper(f)
f.close()
def test_dumbdbm_read(self):
self.init_db()
f = dumbdbm.open(_fname, 'r')
self.read_helper(f)
f.close()
def test_dumbdbm_keys(self):
self.init_db()
f = dumbdbm.open(_fname)
keys = self.keys_helper(f)
f.close()
def test_write_contains(self):
f = dumbdbm.open(_fname)
f[b'1'] = b'hello'
self.assertIn(b'1', f)
f.close()
def test_write_write_read(self):
# test for bug #482460
f = dumbdbm.open(_fname)
f[b'1'] = b'hello'
f[b'1'] = b'hello2'
f.close()
f = dumbdbm.open(_fname)
self.assertEqual(f[b'1'], b'hello2')
f.close()
def test_str_read(self):
self.init_db()
f = dumbdbm.open(_fname, 'r')
self.assertEqual(f['\u00fc'], self._dict['\u00fc'.encode('utf-8')])
def test_str_write_contains(self):
self.init_db()
f = dumbdbm.open(_fname)
f['\u00fc'] = b'!'
f['1'] = 'a'
f.close()
f = dumbdbm.open(_fname, 'r')
self.assertIn('\u00fc', f)
self.assertEqual(f['\u00fc'.encode('utf-8')],
self._dict['\u00fc'.encode('utf-8')])
self.assertEqual(f[b'1'], b'a')
def test_line_endings(self):
# test for bug #1172763: dumbdbm would die if the line endings
# weren't what was expected.
f = dumbdbm.open(_fname)
f[b'1'] = b'hello'
f[b'2'] = b'hello2'
f.close()
# Mangle the file by changing the line separator to Windows or Unix
with io.open(_fname + '.dir', 'rb') as file:
data = file.read()
if os.linesep == '\n':
data = data.replace(b'\n', b'\r\n')
else:
data = data.replace(b'\r\n', b'\n')
with io.open(_fname + '.dir', 'wb') as file:
file.write(data)
f = dumbdbm.open(_fname)
self.assertEqual(f[b'1'], b'hello')
self.assertEqual(f[b'2'], b'hello2')
def read_helper(self, f):
keys = self.keys_helper(f)
for key in self._dict:
self.assertEqual(self._dict[key], f[key])
def init_db(self):
f = dumbdbm.open(_fname, 'w')
for k in self._dict:
f[k] = self._dict[k]
f.close()
def keys_helper(self, f):
keys = sorted(f.keys())
dkeys = sorted(self._dict.keys())
self.assertEqual(keys, dkeys)
return keys
# Perform randomized operations. This doesn't make assumptions about
# what *might* fail.
def test_random(self):
import random
d = {} # mirror the database
for dummy in range(5):
f = dumbdbm.open(_fname)
for dummy in range(100):
k = random.choice('abcdefghijklm')
if random.random() < 0.2:
if k in d:
del d[k]
del f[k]
else:
v = random.choice((b'a', b'b', b'c')) * random.randrange(10000)
d[k] = v
f[k] = v
self.assertEqual(f[k], v)
f.close()
f = dumbdbm.open(_fname)
expected = sorted((k.encode("latin-1"), v) for k, v in d.items())
got = sorted(f.items())
self.assertEqual(expected, got)
f.close()
def tearDown(self):
_delete_files()
def setUp(self):
_delete_files()
def test_main():
try:
support.run_unittest(DumbDBMTestCase)
finally:
_delete_files()
if __name__ == "__main__":
test_main()
| lgpl-3.0 | 5,234,426,417,583,332,000 | -1,025,748,911,887,853,800 | 27.404878 | 83 | 0.481254 | false |
fstagni/DIRAC | ConfigurationSystem/private/Refresher.py | 6 | 6648 | """ Refresh local CS (if needed)
In every gConfig
"""
__RCSID__ = "$Id$"
import threading
import thread
import time
import random
from DIRAC.ConfigurationSystem.Client.ConfigurationData import gConfigurationData
from DIRAC.ConfigurationSystem.Client.PathFinder import getGatewayURLs
from DIRAC.FrameworkSystem.Client.Logger import gLogger
from DIRAC.Core.Utilities import List, LockRing
from DIRAC.Core.Utilities.EventDispatcher import gEventDispatcher
from DIRAC.Core.Utilities.ReturnValues import S_OK, S_ERROR
def _updateFromRemoteLocation(serviceClient):
gLogger.debug("", "Trying to refresh from %s" % serviceClient.serviceURL)
localVersion = gConfigurationData.getVersion()
retVal = serviceClient.getCompressedDataIfNewer(localVersion)
if retVal['OK']:
dataDict = retVal['Value']
if localVersion < dataDict['newestVersion']:
gLogger.debug("New version available", "Updating to version %s..." % dataDict['newestVersion'])
gConfigurationData.loadRemoteCFGFromCompressedMem(dataDict['data'])
gLogger.debug("Updated to version %s" % gConfigurationData.getVersion())
gEventDispatcher.triggerEvent("CSNewVersion", dataDict['newestVersion'], threaded=True)
return S_OK()
return retVal
class Refresher(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
self.__automaticUpdate = False
self.__lastUpdateTime = 0
self.__url = False
self.__refreshEnabled = True
self.__timeout = 60
self.__callbacks = {'newVersion': []}
gEventDispatcher.registerEvent("CSNewVersion")
random.seed()
self.__triggeredRefreshLock = LockRing.LockRing().getLock()
def disable(self):
self.__refreshEnabled = False
def enable(self):
self.__refreshEnabled = True
if self.__lastRefreshExpired():
return self.forceRefresh()
return S_OK()
def isEnabled(self):
return self.__refreshEnabled
def addListenerToNewVersionEvent(self, functor):
gEventDispatcher.addListener("CSNewVersion", functor)
def __refreshInThread(self):
retVal = self.__refresh()
if not retVal['OK']:
gLogger.error("Error while updating the configuration", retVal['Message'])
def __lastRefreshExpired(self):
return time.time() - self.__lastUpdateTime >= gConfigurationData.getRefreshTime()
def refreshConfigurationIfNeeded(self):
if not self.__refreshEnabled or self.__automaticUpdate or not gConfigurationData.getServers():
return
self.__triggeredRefreshLock.acquire()
try:
if not self.__lastRefreshExpired():
return
self.__lastUpdateTime = time.time()
finally:
try:
self.__triggeredRefreshLock.release()
except thread.error:
pass
# Launch the refresh
thd = threading.Thread(target=self.__refreshInThread)
thd.setDaemon(1)
thd.start()
def forceRefresh(self, fromMaster=False):
if self.__refreshEnabled:
return self.__refresh(fromMaster=fromMaster)
return S_OK()
def autoRefreshAndPublish(self, sURL):
gLogger.debug("Setting configuration refresh as automatic")
if not gConfigurationData.getAutoPublish():
gLogger.debug("Slave server won't auto publish itself")
if not gConfigurationData.getName():
import DIRAC
DIRAC.abort(10, "Missing configuration name!")
self.__url = sURL
self.__automaticUpdate = True
self.setDaemon(1)
self.start()
def run(self):
while self.__automaticUpdate:
iWaitTime = gConfigurationData.getPropagationTime()
time.sleep(iWaitTime)
if self.__refreshEnabled:
if not self.__refreshAndPublish():
gLogger.error("Can't refresh configuration from any source")
def __refreshAndPublish(self):
self.__lastUpdateTime = time.time()
gLogger.info("Refreshing from master server")
from DIRAC.Core.DISET.RPCClient import RPCClient
sMasterServer = gConfigurationData.getMasterServer()
if sMasterServer:
oClient = RPCClient(sMasterServer, timeout=self.__timeout,
useCertificates=gConfigurationData.useServerCertificate(),
skipCACheck=gConfigurationData.skipCACheck())
dRetVal = _updateFromRemoteLocation(oClient)
if not dRetVal['OK']:
gLogger.error("Can't update from master server", dRetVal['Message'])
return False
if gConfigurationData.getAutoPublish():
gLogger.info("Publishing to master server...")
dRetVal = oClient.publishSlaveServer(self.__url)
if not dRetVal['OK']:
gLogger.error("Can't publish to master server", dRetVal['Message'])
return True
else:
gLogger.warn("No master server is specified in the configuration, trying to get data from other slaves")
return self.__refresh()['OK']
def __refresh(self, fromMaster=False):
self.__lastUpdateTime = time.time()
gLogger.debug("Refreshing configuration...")
gatewayList = getGatewayURLs("Configuration/Server")
updatingErrorsList = []
if gatewayList:
initialServerList = gatewayList
gLogger.debug("Using configuration gateway", str(initialServerList[0]))
elif fromMaster:
masterServer = gConfigurationData.getMasterServer()
initialServerList = [masterServer]
gLogger.debug("Refreshing from master %s" % masterServer)
else:
initialServerList = gConfigurationData.getServers()
gLogger.debug("Refreshing from list %s" % str(initialServerList))
# If no servers in the initial list, we are supposed to use the local configuration only
if not initialServerList:
return S_OK()
randomServerList = List.randomize(initialServerList)
gLogger.debug("Randomized server list is %s" % ", ".join(randomServerList))
for sServer in randomServerList:
from DIRAC.Core.DISET.RPCClient import RPCClient
oClient = RPCClient(sServer,
useCertificates=gConfigurationData.useServerCertificate(),
skipCACheck=gConfigurationData.skipCACheck())
dRetVal = _updateFromRemoteLocation(oClient)
if dRetVal['OK']:
return dRetVal
else:
updatingErrorsList.append(dRetVal['Message'])
gLogger.warn("Can't update from server", "Error while updating from %s: %s" % (sServer, dRetVal['Message']))
if dRetVal['Message'].find("Insane environment") > -1:
break
return S_ERROR("Reason(s):\n\t%s" % "\n\t".join(List.uniqueElements(updatingErrorsList)))
def daemonize(self):
self.setDaemon(1)
self.start()
gRefresher = Refresher()
if __name__ == "__main__":
time.sleep(0.1)
gRefresher.daemonize()
| gpl-3.0 | -1,633,473,800,147,296,000 | -6,242,919,006,389,121,000 | 35.130435 | 116 | 0.695848 | false |
funkypawz/MakerRobot | tests/test_inlinekeyboardbutton.py | 2 | 2954 | #!/usr/bin/env python
# encoding: utf-8
#
# A library that provides a Python interface to the Telegram Bot API
# Copyright (C) 2015-2016
# Leandro Toledo de Souza <devs@python-telegram-bot.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see [http://www.gnu.org/licenses/].
"""This module contains a object that represents Tests for Telegram InlineKeyboardButton"""
import sys
if sys.version_info[0:2] == (2, 6):
import unittest2 as unittest
else:
import unittest
sys.path.append('.')
import telegram
from tests.base import BaseTest
class InlineKeyboardButtonTest(BaseTest, unittest.TestCase):
"""This object represents Tests for Telegram KeyboardButton."""
def setUp(self):
self.text = 'text'
self.url = 'url'
self.callback_data = 'callback data'
self.switch_inline_query = ''
self.json_dict = {
'text': self.text,
'url': self.url,
'callback_data': self.callback_data,
'switch_inline_query': self.switch_inline_query
}
def test_inline_keyboard_button_de_json(self):
inline_keyboard_button = telegram.InlineKeyboardButton.de_json(self.json_dict)
self.assertEqual(inline_keyboard_button.text, self.text)
self.assertEqual(inline_keyboard_button.url, self.url)
self.assertEqual(inline_keyboard_button.callback_data, self.callback_data)
self.assertEqual(inline_keyboard_button.switch_inline_query, self.switch_inline_query)
def test_inline_keyboard_button_de_json_empty(self):
inline_keyboard_button = telegram.InlineKeyboardButton.de_json(None)
self.assertFalse(inline_keyboard_button)
def test_inline_keyboard_button_de_list_empty(self):
inline_keyboard_button = telegram.InlineKeyboardButton.de_list(None)
self.assertFalse(inline_keyboard_button)
def test_inline_keyboard_button_to_json(self):
inline_keyboard_button = telegram.InlineKeyboardButton.de_json(self.json_dict)
self.assertTrue(self.is_json(inline_keyboard_button.to_json()))
def test_inline_keyboard_button_to_dict(self):
inline_keyboard_button = telegram.InlineKeyboardButton.de_json(self.json_dict).to_dict()
self.assertTrue(self.is_dict(inline_keyboard_button))
self.assertDictEqual(self.json_dict, inline_keyboard_button)
if __name__ == '__main__':
unittest.main()
| gpl-3.0 | -3,793,759,570,990,634,000 | 3,459,002,104,477,422,000 | 35.02439 | 96 | 0.709885 | false |
hep-gc/panda-autopyfactory | bin/factory.py | 1 | 6335 | #! /usr/bin/env python
#
# Simple(ish) python condor_g factory for panda pilots
#
# $Id$
#
#
# Copyright (C) 2007,2008,2009 Graeme Andrew Stewart
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from optparse import OptionParser
import logging
import logging.handlers
import time
import os
import sys
import traceback
# Need to set PANDA_URL_MAP before the Client module is loaded (which happens
# when the Factory module is loaded). Unfortunately this means that logging
# is not yet available.
if not 'APF_NOSQUID' in os.environ:
if not 'PANDA_URL_MAP' in os.environ:
os.environ['PANDA_URL_MAP'] = 'CERN,http://pandaserver.cern.ch:25085/server/panda,https://pandaserver.cern.ch:25443/server/panda'
print >>sys.stderr, 'FACTORY DEBUG: Set PANDA_URL_MAP to %s' % os.environ['PANDA_URL_MAP']
else:
print >>sys.stderr, 'FACTORY DEBUG: Found PANDA_URL_MAP set to %s. Not changed.' % os.environ['PANDA_URL_MAP']
if not 'PANDA_URL' in os.environ:
os.environ['PANDA_URL'] = 'http://pandaserver.cern.ch:25085/server/panda'
print >>sys.stderr, 'FACTORY DEBUG: Set PANDA_URL to %s' % os.environ['PANDA_URL']
else:
print >>sys.stderr, 'FACTORY DEBUG: Found PANDA_URL set to %s. Not changed.' % os.environ['PANDA_URL']
else:
print >>sys.stderr, 'FACTORY DEBUG: Found APF_NOSQUID set. Not changing/setting panda client environment.'
from autopyfactory.Factory import factory
from autopyfactory.Exceptions import FactoryConfigurationFailure
def main():
parser = OptionParser(usage='''%prog [OPTIONS]
autopyfactory is an ATLAS pilot factory.
This program is licenced under the GPL, as set out in LICENSE file.
Author(s):
Graeme A Stewart <g.stewart@physics.gla.ac.uk>, Peter Love <p.love@lancaster.ac.uk>
''', version="%prog $Id$")
parser.add_option("--verbose", "--debug", dest="logLevel", default=logging.INFO,
action="store_const", const=logging.DEBUG, help="Set logging level to DEBUG [default INFO]")
parser.add_option("--quiet", dest="logLevel",
action="store_const", const=logging.WARNING, help="Set logging level to WARNING [default INFO]")
parser.add_option("--test", "--dry-run", dest="dryRun", default=False,
action="store_true", help="Dry run - supress job submission")
parser.add_option("--oneshot", "--one-shot", dest="cyclesToDo", default=0,
action="store_const", const=1, help="Run one cycle only")
parser.add_option("--cycles", dest="cyclesToDo",
action="store", type="int", metavar="CYCLES", help="Run CYCLES times, then exit [default infinite]")
parser.add_option("--sleep", dest="sleepTime", default=120,
action="store", type="int", metavar="TIME", help="Sleep TIME seconds between cycles [default %default]")
parser.add_option("--conf", dest="confFiles", default="factory.conf",
action="store", metavar="FILE1[,FILE2,FILE3]", help="Load configuration from FILEs (comma separated list)")
parser.add_option("--log", dest="logfile", default="syslog", metavar="LOGFILE", action="store",
help="Send logging output to LOGFILE or SYSLOG or stdout [default <syslog>]")
(options, args) = parser.parse_args()
options.confFiles = options.confFiles.split(',')
# Setup logging
factoryLogger = logging.getLogger('main')
if options.logfile == "stdout":
logStream = logging.StreamHandler()
elif options.logfile == 'syslog':
logStream = logging.handlers.SysLogHandler('/dev/log')
else:
logStream = logging.handlers.RotatingFileHandler(filename=options.logfile, maxBytes=10000000, backupCount=5)
formatter = logging.Formatter('%(asctime)s - %(name)s: %(levelname)s %(message)s')
logStream.setFormatter(formatter)
factoryLogger.addHandler(logStream)
factoryLogger.setLevel(options.logLevel)
factoryLogger.debug('logging initialised')
# Main loop
try:
f = factory(factoryLogger, options.dryRun, options.confFiles)
cyclesDone = 0
while True:
factoryLogger.info('\nStarting factory cycle %d at %s', cyclesDone, time.asctime(time.localtime()))
f.factorySubmitCycle(cyclesDone)
factoryLogger.info('Factory cycle %d done' % cyclesDone)
cyclesDone += 1
if cyclesDone == options.cyclesToDo:
break
factoryLogger.info('Sleeping %ds' % options.sleepTime)
time.sleep(options.sleepTime)
f.updateConfig(cyclesDone)
except KeyboardInterrupt:
factoryLogger.info('Caught keyboard interrupt - exiting')
except FactoryConfigurationFailure, errMsg:
factoryLogger.error('Factory configuration failure: %s', errMsg)
except ImportError, errorMsg:
factoryLogger.error('Failed to import necessary python module: %s' % errorMsg)
except:
# TODO - make this a logger.exception() call
factoryLogger.error('''Unexpected exception! There was an exception
raised which the factory was not expecting and did not know how to
handle. You may have discovered a new bug or an unforseen error
condition. Please report this exception to Graeme
<g.stewart@physics.gla.ac.uk>. The factory will now re-raise this
exception so that the python stack trace is printed, which will allow
it to be debugged - please send output from this message
onwards. Exploding in 5...4...3...2...1... Have a nice day!''')
# The following line prints the exception to the logging module
factoryLogger.error(traceback.format_exc(None))
raise
if __name__ == "__main__":
main()
| gpl-3.0 | 2,106,426,346,081,370,600 | 755,638,734,153,696,100 | 45.580882 | 137 | 0.679084 | false |
patrick-nicholson/spark | python/pyspark/ml/classification.py | 2 | 69237 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import operator
from pyspark import since, keyword_only
from pyspark.ml import Estimator, Model
from pyspark.ml.param.shared import *
from pyspark.ml.regression import DecisionTreeModel, DecisionTreeRegressionModel, \
RandomForestParams, TreeEnsembleModel, TreeEnsembleParams
from pyspark.ml.util import *
from pyspark.ml.wrapper import JavaEstimator, JavaModel, JavaParams
from pyspark.ml.wrapper import JavaWrapper
from pyspark.ml.common import inherit_doc
from pyspark.sql import DataFrame
from pyspark.sql.functions import udf, when
from pyspark.sql.types import ArrayType, DoubleType
from pyspark.storagelevel import StorageLevel
__all__ = ['LinearSVC', 'LinearSVCModel',
'LogisticRegression', 'LogisticRegressionModel',
'LogisticRegressionSummary', 'LogisticRegressionTrainingSummary',
'BinaryLogisticRegressionSummary', 'BinaryLogisticRegressionTrainingSummary',
'DecisionTreeClassifier', 'DecisionTreeClassificationModel',
'GBTClassifier', 'GBTClassificationModel',
'RandomForestClassifier', 'RandomForestClassificationModel',
'NaiveBayes', 'NaiveBayesModel',
'MultilayerPerceptronClassifier', 'MultilayerPerceptronClassificationModel',
'OneVsRest', 'OneVsRestModel']
@inherit_doc
class JavaClassificationModel(JavaPredictionModel):
"""
(Private) Java Model produced by a ``Classifier``.
Classes are indexed {0, 1, ..., numClasses - 1}.
To be mixed in with class:`pyspark.ml.JavaModel`
"""
@property
@since("2.1.0")
def numClasses(self):
"""
Number of classes (values which the label can take).
"""
return self._call_java("numClasses")
@inherit_doc
class LinearSVC(JavaEstimator, HasFeaturesCol, HasLabelCol, HasPredictionCol, HasMaxIter,
HasRegParam, HasTol, HasRawPredictionCol, HasFitIntercept, HasStandardization,
HasThreshold, HasWeightCol, HasAggregationDepth, JavaMLWritable, JavaMLReadable):
"""
.. note:: Experimental
`Linear SVM Classifier <https://en.wikipedia.org/wiki/Support_vector_machine#Linear_SVM>`_
This binary classifier optimizes the Hinge Loss using the OWLQN optimizer.
>>> from pyspark.sql import Row
>>> from pyspark.ml.linalg import Vectors
>>> df = sc.parallelize([
... Row(label=1.0, features=Vectors.dense(1.0, 1.0, 1.0)),
... Row(label=0.0, features=Vectors.dense(1.0, 2.0, 3.0))]).toDF()
>>> svm = LinearSVC(maxIter=5, regParam=0.01)
>>> model = svm.fit(df)
>>> model.coefficients
DenseVector([0.0, -0.2792, -0.1833])
>>> model.intercept
1.0206118982229047
>>> model.numClasses
2
>>> model.numFeatures
3
>>> test0 = sc.parallelize([Row(features=Vectors.dense(-1.0, -1.0, -1.0))]).toDF()
>>> result = model.transform(test0).head()
>>> result.prediction
1.0
>>> result.rawPrediction
DenseVector([-1.4831, 1.4831])
>>> svm_path = temp_path + "/svm"
>>> svm.save(svm_path)
>>> svm2 = LinearSVC.load(svm_path)
>>> svm2.getMaxIter()
5
>>> model_path = temp_path + "/svm_model"
>>> model.save(model_path)
>>> model2 = LinearSVCModel.load(model_path)
>>> model.coefficients[0] == model2.coefficients[0]
True
>>> model.intercept == model2.intercept
True
.. versionadded:: 2.2.0
"""
@keyword_only
def __init__(self, featuresCol="features", labelCol="label", predictionCol="prediction",
maxIter=100, regParam=0.0, tol=1e-6, rawPredictionCol="rawPrediction",
fitIntercept=True, standardization=True, threshold=0.0, weightCol=None,
aggregationDepth=2):
"""
__init__(self, featuresCol="features", labelCol="label", predictionCol="prediction", \
maxIter=100, regParam=0.0, tol=1e-6, rawPredictionCol="rawPrediction", \
fitIntercept=True, standardization=True, threshold=0.0, weightCol=None, \
aggregationDepth=2):
"""
super(LinearSVC, self).__init__()
self._java_obj = self._new_java_obj(
"org.apache.spark.ml.classification.LinearSVC", self.uid)
self._setDefault(maxIter=100, regParam=0.0, tol=1e-6, fitIntercept=True,
standardization=True, threshold=0.0, aggregationDepth=2)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("2.2.0")
def setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction",
maxIter=100, regParam=0.0, tol=1e-6, rawPredictionCol="rawPrediction",
fitIntercept=True, standardization=True, threshold=0.0, weightCol=None,
aggregationDepth=2):
"""
setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction", \
maxIter=100, regParam=0.0, tol=1e-6, rawPredictionCol="rawPrediction", \
fitIntercept=True, standardization=True, threshold=0.0, weightCol=None, \
aggregationDepth=2):
Sets params for Linear SVM Classifier.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
def _create_model(self, java_model):
return LinearSVCModel(java_model)
class LinearSVCModel(JavaModel, JavaClassificationModel, JavaMLWritable, JavaMLReadable):
"""
.. note:: Experimental
Model fitted by LinearSVC.
.. versionadded:: 2.2.0
"""
@property
@since("2.2.0")
def coefficients(self):
"""
Model coefficients of Linear SVM Classifier.
"""
return self._call_java("coefficients")
@property
@since("2.2.0")
def intercept(self):
"""
Model intercept of Linear SVM Classifier.
"""
return self._call_java("intercept")
@inherit_doc
class LogisticRegression(JavaEstimator, HasFeaturesCol, HasLabelCol, HasPredictionCol, HasMaxIter,
HasRegParam, HasTol, HasProbabilityCol, HasRawPredictionCol,
HasElasticNetParam, HasFitIntercept, HasStandardization, HasThresholds,
HasWeightCol, HasAggregationDepth, JavaMLWritable, JavaMLReadable):
"""
Logistic regression.
This class supports multinomial logistic (softmax) and binomial logistic regression.
>>> from pyspark.sql import Row
>>> from pyspark.ml.linalg import Vectors
>>> bdf = sc.parallelize([
... Row(label=1.0, weight=1.0, features=Vectors.dense(0.0, 5.0)),
... Row(label=0.0, weight=2.0, features=Vectors.dense(1.0, 2.0)),
... Row(label=1.0, weight=3.0, features=Vectors.dense(2.0, 1.0)),
... Row(label=0.0, weight=4.0, features=Vectors.dense(3.0, 3.0))]).toDF()
>>> blor = LogisticRegression(regParam=0.01, weightCol="weight")
>>> blorModel = blor.fit(bdf)
>>> blorModel.coefficients
DenseVector([-1.080..., -0.646...])
>>> blorModel.intercept
3.112...
>>> data_path = "data/mllib/sample_multiclass_classification_data.txt"
>>> mdf = spark.read.format("libsvm").load(data_path)
>>> mlor = LogisticRegression(regParam=0.1, elasticNetParam=1.0, family="multinomial")
>>> mlorModel = mlor.fit(mdf)
>>> mlorModel.coefficientMatrix
SparseMatrix(3, 4, [0, 1, 2, 3], [3, 2, 1], [1.87..., -2.75..., -0.50...], 1)
>>> mlorModel.interceptVector
DenseVector([0.04..., -0.42..., 0.37...])
>>> test0 = sc.parallelize([Row(features=Vectors.dense(-1.0, 1.0))]).toDF()
>>> result = blorModel.transform(test0).head()
>>> result.prediction
1.0
>>> result.probability
DenseVector([0.02..., 0.97...])
>>> result.rawPrediction
DenseVector([-3.54..., 3.54...])
>>> test1 = sc.parallelize([Row(features=Vectors.sparse(2, [0], [1.0]))]).toDF()
>>> blorModel.transform(test1).head().prediction
1.0
>>> blor.setParams("vector")
Traceback (most recent call last):
...
TypeError: Method setParams forces keyword arguments.
>>> lr_path = temp_path + "/lr"
>>> blor.save(lr_path)
>>> lr2 = LogisticRegression.load(lr_path)
>>> lr2.getRegParam()
0.01
>>> model_path = temp_path + "/lr_model"
>>> blorModel.save(model_path)
>>> model2 = LogisticRegressionModel.load(model_path)
>>> blorModel.coefficients[0] == model2.coefficients[0]
True
>>> blorModel.intercept == model2.intercept
True
.. versionadded:: 1.3.0
"""
threshold = Param(Params._dummy(), "threshold",
"Threshold in binary classification prediction, in range [0, 1]." +
" If threshold and thresholds are both set, they must match." +
"e.g. if threshold is p, then thresholds must be equal to [1-p, p].",
typeConverter=TypeConverters.toFloat)
family = Param(Params._dummy(), "family",
"The name of family which is a description of the label distribution to " +
"be used in the model. Supported options: auto, binomial, multinomial",
typeConverter=TypeConverters.toString)
@keyword_only
def __init__(self, featuresCol="features", labelCol="label", predictionCol="prediction",
maxIter=100, regParam=0.0, elasticNetParam=0.0, tol=1e-6, fitIntercept=True,
threshold=0.5, thresholds=None, probabilityCol="probability",
rawPredictionCol="rawPrediction", standardization=True, weightCol=None,
aggregationDepth=2, family="auto"):
"""
__init__(self, featuresCol="features", labelCol="label", predictionCol="prediction", \
maxIter=100, regParam=0.0, elasticNetParam=0.0, tol=1e-6, fitIntercept=True, \
threshold=0.5, thresholds=None, probabilityCol="probability", \
rawPredictionCol="rawPrediction", standardization=True, weightCol=None, \
aggregationDepth=2, family="auto")
If the threshold and thresholds Params are both set, they must be equivalent.
"""
super(LogisticRegression, self).__init__()
self._java_obj = self._new_java_obj(
"org.apache.spark.ml.classification.LogisticRegression", self.uid)
self._setDefault(maxIter=100, regParam=0.0, tol=1E-6, threshold=0.5, family="auto")
kwargs = self._input_kwargs
self.setParams(**kwargs)
self._checkThresholdConsistency()
@keyword_only
@since("1.3.0")
def setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction",
maxIter=100, regParam=0.0, elasticNetParam=0.0, tol=1e-6, fitIntercept=True,
threshold=0.5, thresholds=None, probabilityCol="probability",
rawPredictionCol="rawPrediction", standardization=True, weightCol=None,
aggregationDepth=2, family="auto"):
"""
setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction", \
maxIter=100, regParam=0.0, elasticNetParam=0.0, tol=1e-6, fitIntercept=True, \
threshold=0.5, thresholds=None, probabilityCol="probability", \
rawPredictionCol="rawPrediction", standardization=True, weightCol=None, \
aggregationDepth=2, family="auto")
Sets params for logistic regression.
If the threshold and thresholds Params are both set, they must be equivalent.
"""
kwargs = self._input_kwargs
self._set(**kwargs)
self._checkThresholdConsistency()
return self
def _create_model(self, java_model):
return LogisticRegressionModel(java_model)
@since("1.4.0")
def setThreshold(self, value):
"""
Sets the value of :py:attr:`threshold`.
Clears value of :py:attr:`thresholds` if it has been set.
"""
self._set(threshold=value)
self._clear(self.thresholds)
return self
@since("1.4.0")
def getThreshold(self):
"""
Get threshold for binary classification.
If :py:attr:`thresholds` is set with length 2 (i.e., binary classification),
this returns the equivalent threshold:
:math:`\\frac{1}{1 + \\frac{thresholds(0)}{thresholds(1)}}`.
Otherwise, returns :py:attr:`threshold` if set or its default value if unset.
"""
self._checkThresholdConsistency()
if self.isSet(self.thresholds):
ts = self.getOrDefault(self.thresholds)
if len(ts) != 2:
raise ValueError("Logistic Regression getThreshold only applies to" +
" binary classification, but thresholds has length != 2." +
" thresholds: " + ",".join(ts))
return 1.0/(1.0 + ts[0]/ts[1])
else:
return self.getOrDefault(self.threshold)
@since("1.5.0")
def setThresholds(self, value):
"""
Sets the value of :py:attr:`thresholds`.
Clears value of :py:attr:`threshold` if it has been set.
"""
self._set(thresholds=value)
self._clear(self.threshold)
return self
@since("1.5.0")
def getThresholds(self):
"""
If :py:attr:`thresholds` is set, return its value.
Otherwise, if :py:attr:`threshold` is set, return the equivalent thresholds for binary
classification: (1-threshold, threshold).
If neither are set, throw an error.
"""
self._checkThresholdConsistency()
if not self.isSet(self.thresholds) and self.isSet(self.threshold):
t = self.getOrDefault(self.threshold)
return [1.0-t, t]
else:
return self.getOrDefault(self.thresholds)
def _checkThresholdConsistency(self):
if self.isSet(self.threshold) and self.isSet(self.thresholds):
ts = self.getParam(self.thresholds)
if len(ts) != 2:
raise ValueError("Logistic Regression getThreshold only applies to" +
" binary classification, but thresholds has length != 2." +
" thresholds: " + ",".join(ts))
t = 1.0/(1.0 + ts[0]/ts[1])
t2 = self.getParam(self.threshold)
if abs(t2 - t) >= 1E-5:
raise ValueError("Logistic Regression getThreshold found inconsistent values for" +
" threshold (%g) and thresholds (equivalent to %g)" % (t2, t))
@since("2.1.0")
def setFamily(self, value):
"""
Sets the value of :py:attr:`family`.
"""
return self._set(family=value)
@since("2.1.0")
def getFamily(self):
"""
Gets the value of :py:attr:`family` or its default value.
"""
return self.getOrDefault(self.family)
class LogisticRegressionModel(JavaModel, JavaClassificationModel, JavaMLWritable, JavaMLReadable):
"""
Model fitted by LogisticRegression.
.. versionadded:: 1.3.0
"""
@property
@since("2.0.0")
def coefficients(self):
"""
Model coefficients of binomial logistic regression.
An exception is thrown in the case of multinomial logistic regression.
"""
return self._call_java("coefficients")
@property
@since("1.4.0")
def intercept(self):
"""
Model intercept of binomial logistic regression.
An exception is thrown in the case of multinomial logistic regression.
"""
return self._call_java("intercept")
@property
@since("2.1.0")
def coefficientMatrix(self):
"""
Model coefficients.
"""
return self._call_java("coefficientMatrix")
@property
@since("2.1.0")
def interceptVector(self):
"""
Model intercept.
"""
return self._call_java("interceptVector")
@property
@since("2.0.0")
def summary(self):
"""
Gets summary (e.g. accuracy/precision/recall, objective history, total iterations) of model
trained on the training set. An exception is thrown if `trainingSummary is None`.
"""
if self.hasSummary:
java_blrt_summary = self._call_java("summary")
# Note: Once multiclass is added, update this to return correct summary
return BinaryLogisticRegressionTrainingSummary(java_blrt_summary)
else:
raise RuntimeError("No training summary available for this %s" %
self.__class__.__name__)
@property
@since("2.0.0")
def hasSummary(self):
"""
Indicates whether a training summary exists for this model
instance.
"""
return self._call_java("hasSummary")
@since("2.0.0")
def evaluate(self, dataset):
"""
Evaluates the model on a test dataset.
:param dataset:
Test dataset to evaluate model on, where dataset is an
instance of :py:class:`pyspark.sql.DataFrame`
"""
if not isinstance(dataset, DataFrame):
raise ValueError("dataset must be a DataFrame but got %s." % type(dataset))
java_blr_summary = self._call_java("evaluate", dataset)
return BinaryLogisticRegressionSummary(java_blr_summary)
class LogisticRegressionSummary(JavaWrapper):
"""
.. note:: Experimental
Abstraction for Logistic Regression Results for a given model.
.. versionadded:: 2.0.0
"""
@property
@since("2.0.0")
def predictions(self):
"""
Dataframe outputted by the model's `transform` method.
"""
return self._call_java("predictions")
@property
@since("2.0.0")
def probabilityCol(self):
"""
Field in "predictions" which gives the probability
of each class as a vector.
"""
return self._call_java("probabilityCol")
@property
@since("2.0.0")
def labelCol(self):
"""
Field in "predictions" which gives the true label of each
instance.
"""
return self._call_java("labelCol")
@property
@since("2.0.0")
def featuresCol(self):
"""
Field in "predictions" which gives the features of each instance
as a vector.
"""
return self._call_java("featuresCol")
@inherit_doc
class LogisticRegressionTrainingSummary(LogisticRegressionSummary):
"""
.. note:: Experimental
Abstraction for multinomial Logistic Regression Training results.
Currently, the training summary ignores the training weights except
for the objective trace.
.. versionadded:: 2.0.0
"""
@property
@since("2.0.0")
def objectiveHistory(self):
"""
Objective function (scaled loss + regularization) at each
iteration.
"""
return self._call_java("objectiveHistory")
@property
@since("2.0.0")
def totalIterations(self):
"""
Number of training iterations until termination.
"""
return self._call_java("totalIterations")
@inherit_doc
class BinaryLogisticRegressionSummary(LogisticRegressionSummary):
"""
.. note:: Experimental
Binary Logistic regression results for a given model.
.. versionadded:: 2.0.0
"""
@property
@since("2.0.0")
def roc(self):
"""
Returns the receiver operating characteristic (ROC) curve,
which is a Dataframe having two fields (FPR, TPR) with
(0.0, 0.0) prepended and (1.0, 1.0) appended to it.
.. seealso:: `Wikipedia reference \
<http://en.wikipedia.org/wiki/Receiver_operating_characteristic>`_
.. note:: This ignores instance weights (setting all to 1.0) from
`LogisticRegression.weightCol`. This will change in later Spark
versions.
"""
return self._call_java("roc")
@property
@since("2.0.0")
def areaUnderROC(self):
"""
Computes the area under the receiver operating characteristic
(ROC) curve.
.. note:: This ignores instance weights (setting all to 1.0) from
`LogisticRegression.weightCol`. This will change in later Spark
versions.
"""
return self._call_java("areaUnderROC")
@property
@since("2.0.0")
def pr(self):
"""
Returns the precision-recall curve, which is a Dataframe
containing two fields recall, precision with (0.0, 1.0) prepended
to it.
.. note:: This ignores instance weights (setting all to 1.0) from
`LogisticRegression.weightCol`. This will change in later Spark
versions.
"""
return self._call_java("pr")
@property
@since("2.0.0")
def fMeasureByThreshold(self):
"""
Returns a dataframe with two fields (threshold, F-Measure) curve
with beta = 1.0.
.. note:: This ignores instance weights (setting all to 1.0) from
`LogisticRegression.weightCol`. This will change in later Spark
versions.
"""
return self._call_java("fMeasureByThreshold")
@property
@since("2.0.0")
def precisionByThreshold(self):
"""
Returns a dataframe with two fields (threshold, precision) curve.
Every possible probability obtained in transforming the dataset
are used as thresholds used in calculating the precision.
.. note:: This ignores instance weights (setting all to 1.0) from
`LogisticRegression.weightCol`. This will change in later Spark
versions.
"""
return self._call_java("precisionByThreshold")
@property
@since("2.0.0")
def recallByThreshold(self):
"""
Returns a dataframe with two fields (threshold, recall) curve.
Every possible probability obtained in transforming the dataset
are used as thresholds used in calculating the recall.
.. note:: This ignores instance weights (setting all to 1.0) from
`LogisticRegression.weightCol`. This will change in later Spark
versions.
"""
return self._call_java("recallByThreshold")
@inherit_doc
class BinaryLogisticRegressionTrainingSummary(BinaryLogisticRegressionSummary,
LogisticRegressionTrainingSummary):
"""
.. note:: Experimental
Binary Logistic regression training results for a given model.
.. versionadded:: 2.0.0
"""
pass
class TreeClassifierParams(object):
"""
Private class to track supported impurity measures.
.. versionadded:: 1.4.0
"""
supportedImpurities = ["entropy", "gini"]
impurity = Param(Params._dummy(), "impurity",
"Criterion used for information gain calculation (case-insensitive). " +
"Supported options: " +
", ".join(supportedImpurities), typeConverter=TypeConverters.toString)
def __init__(self):
super(TreeClassifierParams, self).__init__()
@since("1.6.0")
def setImpurity(self, value):
"""
Sets the value of :py:attr:`impurity`.
"""
return self._set(impurity=value)
@since("1.6.0")
def getImpurity(self):
"""
Gets the value of impurity or its default value.
"""
return self.getOrDefault(self.impurity)
class GBTParams(TreeEnsembleParams):
"""
Private class to track supported GBT params.
.. versionadded:: 1.4.0
"""
supportedLossTypes = ["logistic"]
@inherit_doc
class DecisionTreeClassifier(JavaEstimator, HasFeaturesCol, HasLabelCol, HasPredictionCol,
HasProbabilityCol, HasRawPredictionCol, DecisionTreeParams,
TreeClassifierParams, HasCheckpointInterval, HasSeed, JavaMLWritable,
JavaMLReadable):
"""
`Decision tree <http://en.wikipedia.org/wiki/Decision_tree_learning>`_
learning algorithm for classification.
It supports both binary and multiclass labels, as well as both continuous and categorical
features.
>>> from pyspark.ml.linalg import Vectors
>>> from pyspark.ml.feature import StringIndexer
>>> df = spark.createDataFrame([
... (1.0, Vectors.dense(1.0)),
... (0.0, Vectors.sparse(1, [], []))], ["label", "features"])
>>> stringIndexer = StringIndexer(inputCol="label", outputCol="indexed")
>>> si_model = stringIndexer.fit(df)
>>> td = si_model.transform(df)
>>> dt = DecisionTreeClassifier(maxDepth=2, labelCol="indexed")
>>> model = dt.fit(td)
>>> model.numNodes
3
>>> model.depth
1
>>> model.featureImportances
SparseVector(1, {0: 1.0})
>>> model.numFeatures
1
>>> model.numClasses
2
>>> print(model.toDebugString)
DecisionTreeClassificationModel (uid=...) of depth 1 with 3 nodes...
>>> test0 = spark.createDataFrame([(Vectors.dense(-1.0),)], ["features"])
>>> result = model.transform(test0).head()
>>> result.prediction
0.0
>>> result.probability
DenseVector([1.0, 0.0])
>>> result.rawPrediction
DenseVector([1.0, 0.0])
>>> test1 = spark.createDataFrame([(Vectors.sparse(1, [0], [1.0]),)], ["features"])
>>> model.transform(test1).head().prediction
1.0
>>> dtc_path = temp_path + "/dtc"
>>> dt.save(dtc_path)
>>> dt2 = DecisionTreeClassifier.load(dtc_path)
>>> dt2.getMaxDepth()
2
>>> model_path = temp_path + "/dtc_model"
>>> model.save(model_path)
>>> model2 = DecisionTreeClassificationModel.load(model_path)
>>> model.featureImportances == model2.featureImportances
True
.. versionadded:: 1.4.0
"""
@keyword_only
def __init__(self, featuresCol="features", labelCol="label", predictionCol="prediction",
probabilityCol="probability", rawPredictionCol="rawPrediction",
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0,
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10, impurity="gini",
seed=None):
"""
__init__(self, featuresCol="features", labelCol="label", predictionCol="prediction", \
probabilityCol="probability", rawPredictionCol="rawPrediction", \
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0, \
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10, impurity="gini", \
seed=None)
"""
super(DecisionTreeClassifier, self).__init__()
self._java_obj = self._new_java_obj(
"org.apache.spark.ml.classification.DecisionTreeClassifier", self.uid)
self._setDefault(maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0,
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10,
impurity="gini")
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.4.0")
def setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction",
probabilityCol="probability", rawPredictionCol="rawPrediction",
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0,
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10,
impurity="gini", seed=None):
"""
setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction", \
probabilityCol="probability", rawPredictionCol="rawPrediction", \
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0, \
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10, impurity="gini", \
seed=None)
Sets params for the DecisionTreeClassifier.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
def _create_model(self, java_model):
return DecisionTreeClassificationModel(java_model)
@inherit_doc
class DecisionTreeClassificationModel(DecisionTreeModel, JavaClassificationModel, JavaMLWritable,
JavaMLReadable):
"""
Model fitted by DecisionTreeClassifier.
.. versionadded:: 1.4.0
"""
@property
@since("2.0.0")
def featureImportances(self):
"""
Estimate of the importance of each feature.
This generalizes the idea of "Gini" importance to other losses,
following the explanation of Gini importance from "Random Forests" documentation
by Leo Breiman and Adele Cutler, and following the implementation from scikit-learn.
This feature importance is calculated as follows:
- importance(feature j) = sum (over nodes which split on feature j) of the gain,
where gain is scaled by the number of instances passing through node
- Normalize importances for tree to sum to 1.
.. note:: Feature importance for single decision trees can have high variance due to
correlated predictor variables. Consider using a :py:class:`RandomForestClassifier`
to determine feature importance instead.
"""
return self._call_java("featureImportances")
@inherit_doc
class RandomForestClassifier(JavaEstimator, HasFeaturesCol, HasLabelCol, HasPredictionCol, HasSeed,
HasRawPredictionCol, HasProbabilityCol,
RandomForestParams, TreeClassifierParams, HasCheckpointInterval,
JavaMLWritable, JavaMLReadable):
"""
`Random Forest <http://en.wikipedia.org/wiki/Random_forest>`_
learning algorithm for classification.
It supports both binary and multiclass labels, as well as both continuous and categorical
features.
>>> import numpy
>>> from numpy import allclose
>>> from pyspark.ml.linalg import Vectors
>>> from pyspark.ml.feature import StringIndexer
>>> df = spark.createDataFrame([
... (1.0, Vectors.dense(1.0)),
... (0.0, Vectors.sparse(1, [], []))], ["label", "features"])
>>> stringIndexer = StringIndexer(inputCol="label", outputCol="indexed")
>>> si_model = stringIndexer.fit(df)
>>> td = si_model.transform(df)
>>> rf = RandomForestClassifier(numTrees=3, maxDepth=2, labelCol="indexed", seed=42)
>>> model = rf.fit(td)
>>> model.featureImportances
SparseVector(1, {0: 1.0})
>>> allclose(model.treeWeights, [1.0, 1.0, 1.0])
True
>>> test0 = spark.createDataFrame([(Vectors.dense(-1.0),)], ["features"])
>>> result = model.transform(test0).head()
>>> result.prediction
0.0
>>> numpy.argmax(result.probability)
0
>>> numpy.argmax(result.rawPrediction)
0
>>> test1 = spark.createDataFrame([(Vectors.sparse(1, [0], [1.0]),)], ["features"])
>>> model.transform(test1).head().prediction
1.0
>>> model.trees
[DecisionTreeClassificationModel (uid=...) of depth..., DecisionTreeClassificationModel...]
>>> rfc_path = temp_path + "/rfc"
>>> rf.save(rfc_path)
>>> rf2 = RandomForestClassifier.load(rfc_path)
>>> rf2.getNumTrees()
3
>>> model_path = temp_path + "/rfc_model"
>>> model.save(model_path)
>>> model2 = RandomForestClassificationModel.load(model_path)
>>> model.featureImportances == model2.featureImportances
True
.. versionadded:: 1.4.0
"""
@keyword_only
def __init__(self, featuresCol="features", labelCol="label", predictionCol="prediction",
probabilityCol="probability", rawPredictionCol="rawPrediction",
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0,
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10, impurity="gini",
numTrees=20, featureSubsetStrategy="auto", seed=None, subsamplingRate=1.0):
"""
__init__(self, featuresCol="features", labelCol="label", predictionCol="prediction", \
probabilityCol="probability", rawPredictionCol="rawPrediction", \
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0, \
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10, impurity="gini", \
numTrees=20, featureSubsetStrategy="auto", seed=None, subsamplingRate=1.0)
"""
super(RandomForestClassifier, self).__init__()
self._java_obj = self._new_java_obj(
"org.apache.spark.ml.classification.RandomForestClassifier", self.uid)
self._setDefault(maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0,
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10,
impurity="gini", numTrees=20, featureSubsetStrategy="auto",
subsamplingRate=1.0)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.4.0")
def setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction",
probabilityCol="probability", rawPredictionCol="rawPrediction",
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0,
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10, seed=None,
impurity="gini", numTrees=20, featureSubsetStrategy="auto", subsamplingRate=1.0):
"""
setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction", \
probabilityCol="probability", rawPredictionCol="rawPrediction", \
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0, \
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10, seed=None, \
impurity="gini", numTrees=20, featureSubsetStrategy="auto", subsamplingRate=1.0)
Sets params for linear classification.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
def _create_model(self, java_model):
return RandomForestClassificationModel(java_model)
class RandomForestClassificationModel(TreeEnsembleModel, JavaClassificationModel, JavaMLWritable,
JavaMLReadable):
"""
Model fitted by RandomForestClassifier.
.. versionadded:: 1.4.0
"""
@property
@since("2.0.0")
def featureImportances(self):
"""
Estimate of the importance of each feature.
Each feature's importance is the average of its importance across all trees in the ensemble
The importance vector is normalized to sum to 1. This method is suggested by Hastie et al.
(Hastie, Tibshirani, Friedman. "The Elements of Statistical Learning, 2nd Edition." 2001.)
and follows the implementation from scikit-learn.
.. seealso:: :py:attr:`DecisionTreeClassificationModel.featureImportances`
"""
return self._call_java("featureImportances")
@property
@since("2.0.0")
def trees(self):
"""Trees in this ensemble. Warning: These have null parent Estimators."""
return [DecisionTreeClassificationModel(m) for m in list(self._call_java("trees"))]
@inherit_doc
class GBTClassifier(JavaEstimator, HasFeaturesCol, HasLabelCol, HasPredictionCol, HasMaxIter,
GBTParams, HasCheckpointInterval, HasStepSize, HasSeed, JavaMLWritable,
JavaMLReadable):
"""
`Gradient-Boosted Trees (GBTs) <http://en.wikipedia.org/wiki/Gradient_boosting>`_
learning algorithm for classification.
It supports binary labels, as well as both continuous and categorical features.
The implementation is based upon: J.H. Friedman. "Stochastic Gradient Boosting." 1999.
Notes on Gradient Boosting vs. TreeBoost:
- This implementation is for Stochastic Gradient Boosting, not for TreeBoost.
- Both algorithms learn tree ensembles by minimizing loss functions.
- TreeBoost (Friedman, 1999) additionally modifies the outputs at tree leaf nodes
based on the loss function, whereas the original gradient boosting method does not.
- We expect to implement TreeBoost in the future:
`SPARK-4240 <https://issues.apache.org/jira/browse/SPARK-4240>`_
.. note:: Multiclass labels are not currently supported.
>>> from numpy import allclose
>>> from pyspark.ml.linalg import Vectors
>>> from pyspark.ml.feature import StringIndexer
>>> df = spark.createDataFrame([
... (1.0, Vectors.dense(1.0)),
... (0.0, Vectors.sparse(1, [], []))], ["label", "features"])
>>> stringIndexer = StringIndexer(inputCol="label", outputCol="indexed")
>>> si_model = stringIndexer.fit(df)
>>> td = si_model.transform(df)
>>> gbt = GBTClassifier(maxIter=5, maxDepth=2, labelCol="indexed", seed=42)
>>> model = gbt.fit(td)
>>> model.featureImportances
SparseVector(1, {0: 1.0})
>>> allclose(model.treeWeights, [1.0, 0.1, 0.1, 0.1, 0.1])
True
>>> test0 = spark.createDataFrame([(Vectors.dense(-1.0),)], ["features"])
>>> model.transform(test0).head().prediction
0.0
>>> test1 = spark.createDataFrame([(Vectors.sparse(1, [0], [1.0]),)], ["features"])
>>> model.transform(test1).head().prediction
1.0
>>> model.totalNumNodes
15
>>> print(model.toDebugString)
GBTClassificationModel (uid=...)...with 5 trees...
>>> gbtc_path = temp_path + "gbtc"
>>> gbt.save(gbtc_path)
>>> gbt2 = GBTClassifier.load(gbtc_path)
>>> gbt2.getMaxDepth()
2
>>> model_path = temp_path + "gbtc_model"
>>> model.save(model_path)
>>> model2 = GBTClassificationModel.load(model_path)
>>> model.featureImportances == model2.featureImportances
True
>>> model.treeWeights == model2.treeWeights
True
>>> model.trees
[DecisionTreeRegressionModel (uid=...) of depth..., DecisionTreeRegressionModel...]
.. versionadded:: 1.4.0
"""
lossType = Param(Params._dummy(), "lossType",
"Loss function which GBT tries to minimize (case-insensitive). " +
"Supported options: " + ", ".join(GBTParams.supportedLossTypes),
typeConverter=TypeConverters.toString)
@keyword_only
def __init__(self, featuresCol="features", labelCol="label", predictionCol="prediction",
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0,
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10, lossType="logistic",
maxIter=20, stepSize=0.1, seed=None, subsamplingRate=1.0):
"""
__init__(self, featuresCol="features", labelCol="label", predictionCol="prediction", \
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0, \
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10, \
lossType="logistic", maxIter=20, stepSize=0.1, seed=None, subsamplingRate=1.0)
"""
super(GBTClassifier, self).__init__()
self._java_obj = self._new_java_obj(
"org.apache.spark.ml.classification.GBTClassifier", self.uid)
self._setDefault(maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0,
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10,
lossType="logistic", maxIter=20, stepSize=0.1, subsamplingRate=1.0)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.4.0")
def setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction",
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0,
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10,
lossType="logistic", maxIter=20, stepSize=0.1, seed=None, subsamplingRate=1.0):
"""
setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction", \
maxDepth=5, maxBins=32, minInstancesPerNode=1, minInfoGain=0.0, \
maxMemoryInMB=256, cacheNodeIds=False, checkpointInterval=10, \
lossType="logistic", maxIter=20, stepSize=0.1, seed=None, subsamplingRate=1.0)
Sets params for Gradient Boosted Tree Classification.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
def _create_model(self, java_model):
return GBTClassificationModel(java_model)
@since("1.4.0")
def setLossType(self, value):
"""
Sets the value of :py:attr:`lossType`.
"""
return self._set(lossType=value)
@since("1.4.0")
def getLossType(self):
"""
Gets the value of lossType or its default value.
"""
return self.getOrDefault(self.lossType)
class GBTClassificationModel(TreeEnsembleModel, JavaPredictionModel, JavaMLWritable,
JavaMLReadable):
"""
Model fitted by GBTClassifier.
.. versionadded:: 1.4.0
"""
@property
@since("2.0.0")
def featureImportances(self):
"""
Estimate of the importance of each feature.
Each feature's importance is the average of its importance across all trees in the ensemble
The importance vector is normalized to sum to 1. This method is suggested by Hastie et al.
(Hastie, Tibshirani, Friedman. "The Elements of Statistical Learning, 2nd Edition." 2001.)
and follows the implementation from scikit-learn.
.. seealso:: :py:attr:`DecisionTreeClassificationModel.featureImportances`
"""
return self._call_java("featureImportances")
@property
@since("2.0.0")
def trees(self):
"""Trees in this ensemble. Warning: These have null parent Estimators."""
return [DecisionTreeRegressionModel(m) for m in list(self._call_java("trees"))]
@inherit_doc
class NaiveBayes(JavaEstimator, HasFeaturesCol, HasLabelCol, HasPredictionCol, HasProbabilityCol,
HasRawPredictionCol, HasThresholds, HasWeightCol, JavaMLWritable, JavaMLReadable):
"""
Naive Bayes Classifiers.
It supports both Multinomial and Bernoulli NB. `Multinomial NB
<http://nlp.stanford.edu/IR-book/html/htmledition/naive-bayes-text-classification-1.html>`_
can handle finitely supported discrete data. For example, by converting documents into
TF-IDF vectors, it can be used for document classification. By making every vector a
binary (0/1) data, it can also be used as `Bernoulli NB
<http://nlp.stanford.edu/IR-book/html/htmledition/the-bernoulli-model-1.html>`_.
The input feature values must be nonnegative.
>>> from pyspark.sql import Row
>>> from pyspark.ml.linalg import Vectors
>>> df = spark.createDataFrame([
... Row(label=0.0, weight=0.1, features=Vectors.dense([0.0, 0.0])),
... Row(label=0.0, weight=0.5, features=Vectors.dense([0.0, 1.0])),
... Row(label=1.0, weight=1.0, features=Vectors.dense([1.0, 0.0]))])
>>> nb = NaiveBayes(smoothing=1.0, modelType="multinomial", weightCol="weight")
>>> model = nb.fit(df)
>>> model.pi
DenseVector([-0.81..., -0.58...])
>>> model.theta
DenseMatrix(2, 2, [-0.91..., -0.51..., -0.40..., -1.09...], 1)
>>> test0 = sc.parallelize([Row(features=Vectors.dense([1.0, 0.0]))]).toDF()
>>> result = model.transform(test0).head()
>>> result.prediction
1.0
>>> result.probability
DenseVector([0.32..., 0.67...])
>>> result.rawPrediction
DenseVector([-1.72..., -0.99...])
>>> test1 = sc.parallelize([Row(features=Vectors.sparse(2, [0], [1.0]))]).toDF()
>>> model.transform(test1).head().prediction
1.0
>>> nb_path = temp_path + "/nb"
>>> nb.save(nb_path)
>>> nb2 = NaiveBayes.load(nb_path)
>>> nb2.getSmoothing()
1.0
>>> model_path = temp_path + "/nb_model"
>>> model.save(model_path)
>>> model2 = NaiveBayesModel.load(model_path)
>>> model.pi == model2.pi
True
>>> model.theta == model2.theta
True
>>> nb = nb.setThresholds([0.01, 10.00])
>>> model3 = nb.fit(df)
>>> result = model3.transform(test0).head()
>>> result.prediction
0.0
.. versionadded:: 1.5.0
"""
smoothing = Param(Params._dummy(), "smoothing", "The smoothing parameter, should be >= 0, " +
"default is 1.0", typeConverter=TypeConverters.toFloat)
modelType = Param(Params._dummy(), "modelType", "The model type which is a string " +
"(case-sensitive). Supported options: multinomial (default) and bernoulli.",
typeConverter=TypeConverters.toString)
@keyword_only
def __init__(self, featuresCol="features", labelCol="label", predictionCol="prediction",
probabilityCol="probability", rawPredictionCol="rawPrediction", smoothing=1.0,
modelType="multinomial", thresholds=None, weightCol=None):
"""
__init__(self, featuresCol="features", labelCol="label", predictionCol="prediction", \
probabilityCol="probability", rawPredictionCol="rawPrediction", smoothing=1.0, \
modelType="multinomial", thresholds=None, weightCol=None)
"""
super(NaiveBayes, self).__init__()
self._java_obj = self._new_java_obj(
"org.apache.spark.ml.classification.NaiveBayes", self.uid)
self._setDefault(smoothing=1.0, modelType="multinomial")
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.5.0")
def setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction",
probabilityCol="probability", rawPredictionCol="rawPrediction", smoothing=1.0,
modelType="multinomial", thresholds=None, weightCol=None):
"""
setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction", \
probabilityCol="probability", rawPredictionCol="rawPrediction", smoothing=1.0, \
modelType="multinomial", thresholds=None, weightCol=None)
Sets params for Naive Bayes.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
def _create_model(self, java_model):
return NaiveBayesModel(java_model)
@since("1.5.0")
def setSmoothing(self, value):
"""
Sets the value of :py:attr:`smoothing`.
"""
return self._set(smoothing=value)
@since("1.5.0")
def getSmoothing(self):
"""
Gets the value of smoothing or its default value.
"""
return self.getOrDefault(self.smoothing)
@since("1.5.0")
def setModelType(self, value):
"""
Sets the value of :py:attr:`modelType`.
"""
return self._set(modelType=value)
@since("1.5.0")
def getModelType(self):
"""
Gets the value of modelType or its default value.
"""
return self.getOrDefault(self.modelType)
class NaiveBayesModel(JavaModel, JavaClassificationModel, JavaMLWritable, JavaMLReadable):
"""
Model fitted by NaiveBayes.
.. versionadded:: 1.5.0
"""
@property
@since("2.0.0")
def pi(self):
"""
log of class priors.
"""
return self._call_java("pi")
@property
@since("2.0.0")
def theta(self):
"""
log of class conditional probabilities.
"""
return self._call_java("theta")
@inherit_doc
class MultilayerPerceptronClassifier(JavaEstimator, HasFeaturesCol, HasLabelCol, HasPredictionCol,
HasMaxIter, HasTol, HasSeed, HasStepSize, JavaMLWritable,
JavaMLReadable):
"""
Classifier trainer based on the Multilayer Perceptron.
Each layer has sigmoid activation function, output layer has softmax.
Number of inputs has to be equal to the size of feature vectors.
Number of outputs has to be equal to the total number of labels.
>>> from pyspark.ml.linalg import Vectors
>>> df = spark.createDataFrame([
... (0.0, Vectors.dense([0.0, 0.0])),
... (1.0, Vectors.dense([0.0, 1.0])),
... (1.0, Vectors.dense([1.0, 0.0])),
... (0.0, Vectors.dense([1.0, 1.0]))], ["label", "features"])
>>> mlp = MultilayerPerceptronClassifier(maxIter=100, layers=[2, 2, 2], blockSize=1, seed=123)
>>> model = mlp.fit(df)
>>> model.layers
[2, 2, 2]
>>> model.weights.size
12
>>> testDF = spark.createDataFrame([
... (Vectors.dense([1.0, 0.0]),),
... (Vectors.dense([0.0, 0.0]),)], ["features"])
>>> model.transform(testDF).show()
+---------+----------+
| features|prediction|
+---------+----------+
|[1.0,0.0]| 1.0|
|[0.0,0.0]| 0.0|
+---------+----------+
...
>>> mlp_path = temp_path + "/mlp"
>>> mlp.save(mlp_path)
>>> mlp2 = MultilayerPerceptronClassifier.load(mlp_path)
>>> mlp2.getBlockSize()
1
>>> model_path = temp_path + "/mlp_model"
>>> model.save(model_path)
>>> model2 = MultilayerPerceptronClassificationModel.load(model_path)
>>> model.layers == model2.layers
True
>>> model.weights == model2.weights
True
>>> mlp2 = mlp2.setInitialWeights(list(range(0, 12)))
>>> model3 = mlp2.fit(df)
>>> model3.weights != model2.weights
True
>>> model3.layers == model.layers
True
.. versionadded:: 1.6.0
"""
layers = Param(Params._dummy(), "layers", "Sizes of layers from input layer to output layer " +
"E.g., Array(780, 100, 10) means 780 inputs, one hidden layer with 100 " +
"neurons and output layer of 10 neurons.",
typeConverter=TypeConverters.toListInt)
blockSize = Param(Params._dummy(), "blockSize", "Block size for stacking input data in " +
"matrices. Data is stacked within partitions. If block size is more than " +
"remaining data in a partition then it is adjusted to the size of this " +
"data. Recommended size is between 10 and 1000, default is 128.",
typeConverter=TypeConverters.toInt)
solver = Param(Params._dummy(), "solver", "The solver algorithm for optimization. Supported " +
"options: l-bfgs, gd.", typeConverter=TypeConverters.toString)
initialWeights = Param(Params._dummy(), "initialWeights", "The initial weights of the model.",
typeConverter=TypeConverters.toVector)
@keyword_only
def __init__(self, featuresCol="features", labelCol="label", predictionCol="prediction",
maxIter=100, tol=1e-6, seed=None, layers=None, blockSize=128, stepSize=0.03,
solver="l-bfgs", initialWeights=None):
"""
__init__(self, featuresCol="features", labelCol="label", predictionCol="prediction", \
maxIter=100, tol=1e-6, seed=None, layers=None, blockSize=128, stepSize=0.03, \
solver="l-bfgs", initialWeights=None)
"""
super(MultilayerPerceptronClassifier, self).__init__()
self._java_obj = self._new_java_obj(
"org.apache.spark.ml.classification.MultilayerPerceptronClassifier", self.uid)
self._setDefault(maxIter=100, tol=1E-4, blockSize=128, stepSize=0.03, solver="l-bfgs")
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.6.0")
def setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction",
maxIter=100, tol=1e-6, seed=None, layers=None, blockSize=128, stepSize=0.03,
solver="l-bfgs", initialWeights=None):
"""
setParams(self, featuresCol="features", labelCol="label", predictionCol="prediction", \
maxIter=100, tol=1e-6, seed=None, layers=None, blockSize=128, stepSize=0.03, \
solver="l-bfgs", initialWeights=None)
Sets params for MultilayerPerceptronClassifier.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
def _create_model(self, java_model):
return MultilayerPerceptronClassificationModel(java_model)
@since("1.6.0")
def setLayers(self, value):
"""
Sets the value of :py:attr:`layers`.
"""
return self._set(layers=value)
@since("1.6.0")
def getLayers(self):
"""
Gets the value of layers or its default value.
"""
return self.getOrDefault(self.layers)
@since("1.6.0")
def setBlockSize(self, value):
"""
Sets the value of :py:attr:`blockSize`.
"""
return self._set(blockSize=value)
@since("1.6.0")
def getBlockSize(self):
"""
Gets the value of blockSize or its default value.
"""
return self.getOrDefault(self.blockSize)
@since("2.0.0")
def setStepSize(self, value):
"""
Sets the value of :py:attr:`stepSize`.
"""
return self._set(stepSize=value)
@since("2.0.0")
def getStepSize(self):
"""
Gets the value of stepSize or its default value.
"""
return self.getOrDefault(self.stepSize)
@since("2.0.0")
def setSolver(self, value):
"""
Sets the value of :py:attr:`solver`.
"""
return self._set(solver=value)
@since("2.0.0")
def getSolver(self):
"""
Gets the value of solver or its default value.
"""
return self.getOrDefault(self.solver)
@since("2.0.0")
def setInitialWeights(self, value):
"""
Sets the value of :py:attr:`initialWeights`.
"""
return self._set(initialWeights=value)
@since("2.0.0")
def getInitialWeights(self):
"""
Gets the value of initialWeights or its default value.
"""
return self.getOrDefault(self.initialWeights)
class MultilayerPerceptronClassificationModel(JavaModel, JavaPredictionModel, JavaMLWritable,
JavaMLReadable):
"""
Model fitted by MultilayerPerceptronClassifier.
.. versionadded:: 1.6.0
"""
@property
@since("1.6.0")
def layers(self):
"""
array of layer sizes including input and output layers.
"""
return self._call_java("javaLayers")
@property
@since("2.0.0")
def weights(self):
"""
the weights of layers.
"""
return self._call_java("weights")
class OneVsRestParams(HasFeaturesCol, HasLabelCol, HasPredictionCol):
"""
Parameters for OneVsRest and OneVsRestModel.
"""
classifier = Param(Params._dummy(), "classifier", "base binary classifier")
@since("2.0.0")
def setClassifier(self, value):
"""
Sets the value of :py:attr:`classifier`.
.. note:: Only LogisticRegression and NaiveBayes are supported now.
"""
return self._set(classifier=value)
@since("2.0.0")
def getClassifier(self):
"""
Gets the value of classifier or its default value.
"""
return self.getOrDefault(self.classifier)
@inherit_doc
class OneVsRest(Estimator, OneVsRestParams, MLReadable, MLWritable):
"""
.. note:: Experimental
Reduction of Multiclass Classification to Binary Classification.
Performs reduction using one against all strategy.
For a multiclass classification with k classes, train k models (one per class).
Each example is scored against all k models and the model with highest score
is picked to label the example.
>>> from pyspark.sql import Row
>>> from pyspark.ml.linalg import Vectors
>>> data_path = "data/mllib/sample_multiclass_classification_data.txt"
>>> df = spark.read.format("libsvm").load(data_path)
>>> lr = LogisticRegression(regParam=0.01)
>>> ovr = OneVsRest(classifier=lr)
>>> model = ovr.fit(df)
>>> model.models[0].coefficients
DenseVector([0.5..., -1.0..., 3.4..., 4.2...])
>>> model.models[1].coefficients
DenseVector([-2.1..., 3.1..., -2.6..., -2.3...])
>>> model.models[2].coefficients
DenseVector([0.3..., -3.4..., 1.0..., -1.1...])
>>> [x.intercept for x in model.models]
[-2.7..., -2.5..., -1.3...]
>>> test0 = sc.parallelize([Row(features=Vectors.dense(-1.0, 0.0, 1.0, 1.0))]).toDF()
>>> model.transform(test0).head().prediction
0.0
>>> test1 = sc.parallelize([Row(features=Vectors.sparse(4, [0], [1.0]))]).toDF()
>>> model.transform(test1).head().prediction
2.0
>>> test2 = sc.parallelize([Row(features=Vectors.dense(0.5, 0.4, 0.3, 0.2))]).toDF()
>>> model.transform(test2).head().prediction
0.0
>>> model_path = temp_path + "/ovr_model"
>>> model.save(model_path)
>>> model2 = OneVsRestModel.load(model_path)
>>> model2.transform(test0).head().prediction
0.0
.. versionadded:: 2.0.0
"""
@keyword_only
def __init__(self, featuresCol="features", labelCol="label", predictionCol="prediction",
classifier=None):
"""
__init__(self, featuresCol="features", labelCol="label", predictionCol="prediction", \
classifier=None)
"""
super(OneVsRest, self).__init__()
kwargs = self._input_kwargs
self._set(**kwargs)
@keyword_only
@since("2.0.0")
def setParams(self, featuresCol=None, labelCol=None, predictionCol=None, classifier=None):
"""
setParams(self, featuresCol=None, labelCol=None, predictionCol=None, classifier=None):
Sets params for OneVsRest.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
def _fit(self, dataset):
labelCol = self.getLabelCol()
featuresCol = self.getFeaturesCol()
predictionCol = self.getPredictionCol()
classifier = self.getClassifier()
assert isinstance(classifier, HasRawPredictionCol),\
"Classifier %s doesn't extend from HasRawPredictionCol." % type(classifier)
numClasses = int(dataset.agg({labelCol: "max"}).head()["max("+labelCol+")"]) + 1
multiclassLabeled = dataset.select(labelCol, featuresCol)
# persist if underlying dataset is not persistent.
handlePersistence = \
dataset.rdd.getStorageLevel() == StorageLevel(False, False, False, False)
if handlePersistence:
multiclassLabeled.persist(StorageLevel.MEMORY_AND_DISK)
def trainSingleClass(index):
binaryLabelCol = "mc2b$" + str(index)
trainingDataset = multiclassLabeled.withColumn(
binaryLabelCol,
when(multiclassLabeled[labelCol] == float(index), 1.0).otherwise(0.0))
paramMap = dict([(classifier.labelCol, binaryLabelCol),
(classifier.featuresCol, featuresCol),
(classifier.predictionCol, predictionCol)])
return classifier.fit(trainingDataset, paramMap)
# TODO: Parallel training for all classes.
models = [trainSingleClass(i) for i in range(numClasses)]
if handlePersistence:
multiclassLabeled.unpersist()
return self._copyValues(OneVsRestModel(models=models))
@since("2.0.0")
def copy(self, extra=None):
"""
Creates a copy of this instance with a randomly generated uid
and some extra params. This creates a deep copy of the embedded paramMap,
and copies the embedded and extra parameters over.
:param extra: Extra parameters to copy to the new instance
:return: Copy of this instance
"""
if extra is None:
extra = dict()
newOvr = Params.copy(self, extra)
if self.isSet(self.classifier):
newOvr.setClassifier(self.getClassifier().copy(extra))
return newOvr
@since("2.0.0")
def write(self):
"""Returns an MLWriter instance for this ML instance."""
return JavaMLWriter(self)
@since("2.0.0")
def save(self, path):
"""Save this ML instance to the given path, a shortcut of `write().save(path)`."""
self.write().save(path)
@classmethod
@since("2.0.0")
def read(cls):
"""Returns an MLReader instance for this class."""
return JavaMLReader(cls)
@classmethod
def _from_java(cls, java_stage):
"""
Given a Java OneVsRest, create and return a Python wrapper of it.
Used for ML persistence.
"""
featuresCol = java_stage.getFeaturesCol()
labelCol = java_stage.getLabelCol()
predictionCol = java_stage.getPredictionCol()
classifier = JavaParams._from_java(java_stage.getClassifier())
py_stage = cls(featuresCol=featuresCol, labelCol=labelCol, predictionCol=predictionCol,
classifier=classifier)
py_stage._resetUid(java_stage.uid())
return py_stage
def _to_java(self):
"""
Transfer this instance to a Java OneVsRest. Used for ML persistence.
:return: Java object equivalent to this instance.
"""
_java_obj = JavaParams._new_java_obj("org.apache.spark.ml.classification.OneVsRest",
self.uid)
_java_obj.setClassifier(self.getClassifier()._to_java())
_java_obj.setFeaturesCol(self.getFeaturesCol())
_java_obj.setLabelCol(self.getLabelCol())
_java_obj.setPredictionCol(self.getPredictionCol())
return _java_obj
class OneVsRestModel(Model, OneVsRestParams, MLReadable, MLWritable):
"""
.. note:: Experimental
Model fitted by OneVsRest.
This stores the models resulting from training k binary classifiers: one for each class.
Each example is scored against all k models, and the model with the highest score
is picked to label the example.
.. versionadded:: 2.0.0
"""
def __init__(self, models):
super(OneVsRestModel, self).__init__()
self.models = models
def _transform(self, dataset):
# determine the input columns: these need to be passed through
origCols = dataset.columns
# add an accumulator column to store predictions of all the models
accColName = "mbc$acc" + str(uuid.uuid4())
initUDF = udf(lambda _: [], ArrayType(DoubleType()))
newDataset = dataset.withColumn(accColName, initUDF(dataset[origCols[0]]))
# persist if underlying dataset is not persistent.
handlePersistence = \
dataset.rdd.getStorageLevel() == StorageLevel(False, False, False, False)
if handlePersistence:
newDataset.persist(StorageLevel.MEMORY_AND_DISK)
# update the accumulator column with the result of prediction of models
aggregatedDataset = newDataset
for index, model in enumerate(self.models):
rawPredictionCol = model._call_java("getRawPredictionCol")
columns = origCols + [rawPredictionCol, accColName]
# add temporary column to store intermediate scores and update
tmpColName = "mbc$tmp" + str(uuid.uuid4())
updateUDF = udf(
lambda predictions, prediction: predictions + [prediction.tolist()[1]],
ArrayType(DoubleType()))
transformedDataset = model.transform(aggregatedDataset).select(*columns)
updatedDataset = transformedDataset.withColumn(
tmpColName,
updateUDF(transformedDataset[accColName], transformedDataset[rawPredictionCol]))
newColumns = origCols + [tmpColName]
# switch out the intermediate column with the accumulator column
aggregatedDataset = updatedDataset\
.select(*newColumns).withColumnRenamed(tmpColName, accColName)
if handlePersistence:
newDataset.unpersist()
# output the index of the classifier with highest confidence as prediction
labelUDF = udf(
lambda predictions: float(max(enumerate(predictions), key=operator.itemgetter(1))[0]),
DoubleType())
# output label and label metadata as prediction
return aggregatedDataset.withColumn(
self.getPredictionCol(), labelUDF(aggregatedDataset[accColName])).drop(accColName)
@since("2.0.0")
def copy(self, extra=None):
"""
Creates a copy of this instance with a randomly generated uid
and some extra params. This creates a deep copy of the embedded paramMap,
and copies the embedded and extra parameters over.
:param extra: Extra parameters to copy to the new instance
:return: Copy of this instance
"""
if extra is None:
extra = dict()
newModel = Params.copy(self, extra)
newModel.models = [model.copy(extra) for model in self.models]
return newModel
@since("2.0.0")
def write(self):
"""Returns an MLWriter instance for this ML instance."""
return JavaMLWriter(self)
@since("2.0.0")
def save(self, path):
"""Save this ML instance to the given path, a shortcut of `write().save(path)`."""
self.write().save(path)
@classmethod
@since("2.0.0")
def read(cls):
"""Returns an MLReader instance for this class."""
return JavaMLReader(cls)
@classmethod
def _from_java(cls, java_stage):
"""
Given a Java OneVsRestModel, create and return a Python wrapper of it.
Used for ML persistence.
"""
featuresCol = java_stage.getFeaturesCol()
labelCol = java_stage.getLabelCol()
predictionCol = java_stage.getPredictionCol()
classifier = JavaParams._from_java(java_stage.getClassifier())
models = [JavaParams._from_java(model) for model in java_stage.models()]
py_stage = cls(models=models).setPredictionCol(predictionCol).setLabelCol(labelCol)\
.setFeaturesCol(featuresCol).setClassifier(classifier)
py_stage._resetUid(java_stage.uid())
return py_stage
def _to_java(self):
"""
Transfer this instance to a Java OneVsRestModel. Used for ML persistence.
:return: Java object equivalent to this instance.
"""
sc = SparkContext._active_spark_context
java_models = [model._to_java() for model in self.models]
java_models_array = JavaWrapper._new_java_array(
java_models, sc._gateway.jvm.org.apache.spark.ml.classification.ClassificationModel)
metadata = JavaParams._new_java_obj("org.apache.spark.sql.types.Metadata")
_java_obj = JavaParams._new_java_obj("org.apache.spark.ml.classification.OneVsRestModel",
self.uid, metadata.empty(), java_models_array)
_java_obj.set("classifier", self.getClassifier()._to_java())
_java_obj.set("featuresCol", self.getFeaturesCol())
_java_obj.set("labelCol", self.getLabelCol())
_java_obj.set("predictionCol", self.getPredictionCol())
return _java_obj
if __name__ == "__main__":
import doctest
import pyspark.ml.classification
from pyspark.sql import SparkSession
globs = pyspark.ml.classification.__dict__.copy()
# The small batch size here ensures that we see multiple batches,
# even in these small test examples:
spark = SparkSession.builder\
.master("local[2]")\
.appName("ml.classification tests")\
.getOrCreate()
sc = spark.sparkContext
globs['sc'] = sc
globs['spark'] = spark
import tempfile
temp_path = tempfile.mkdtemp()
globs['temp_path'] = temp_path
try:
(failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)
spark.stop()
finally:
from shutil import rmtree
try:
rmtree(temp_path)
except OSError:
pass
if failure_count:
exit(-1)
| apache-2.0 | -7,851,684,853,778,807,000 | -8,299,541,848,213,666,000 | 37.636719 | 99 | 0.618831 | false |
yqm/sl4a | python/src/Tools/scripts/ftpmirror.py | 89 | 12854 | #! /usr/bin/env python
"""Mirror a remote ftp subtree into a local directory tree.
usage: ftpmirror [-v] [-q] [-i] [-m] [-n] [-r] [-s pat]
[-l username [-p passwd [-a account]]]
hostname[:port] [remotedir [localdir]]
-v: verbose
-q: quiet
-i: interactive mode
-m: macintosh server (NCSA telnet 2.4) (implies -n -s '*.o')
-n: don't log in
-r: remove local files/directories no longer pertinent
-l username [-p passwd [-a account]]: login info (default .netrc or anonymous)
-s pat: skip files matching pattern
hostname: remote host w/ optional port separated by ':'
remotedir: remote directory (default initial)
localdir: local directory (default current)
"""
import os
import sys
import time
import getopt
import ftplib
import netrc
from fnmatch import fnmatch
# Print usage message and exit
def usage(*args):
sys.stdout = sys.stderr
for msg in args: print msg
print __doc__
sys.exit(2)
verbose = 1 # 0 for -q, 2 for -v
interactive = 0
mac = 0
rmok = 0
nologin = 0
skippats = ['.', '..', '.mirrorinfo']
# Main program: parse command line and start processing
def main():
global verbose, interactive, mac, rmok, nologin
try:
opts, args = getopt.getopt(sys.argv[1:], 'a:bil:mnp:qrs:v')
except getopt.error, msg:
usage(msg)
login = ''
passwd = ''
account = ''
if not args: usage('hostname missing')
host = args[0]
port = 0
if ':' in host:
host, port = host.split(':', 1)
port = int(port)
try:
auth = netrc.netrc().authenticators(host)
if auth is not None:
login, account, passwd = auth
except (netrc.NetrcParseError, IOError):
pass
for o, a in opts:
if o == '-l': login = a
if o == '-p': passwd = a
if o == '-a': account = a
if o == '-v': verbose = verbose + 1
if o == '-q': verbose = 0
if o == '-i': interactive = 1
if o == '-m': mac = 1; nologin = 1; skippats.append('*.o')
if o == '-n': nologin = 1
if o == '-r': rmok = 1
if o == '-s': skippats.append(a)
remotedir = ''
localdir = ''
if args[1:]:
remotedir = args[1]
if args[2:]:
localdir = args[2]
if args[3:]: usage('too many arguments')
#
f = ftplib.FTP()
if verbose: print "Connecting to '%s%s'..." % (host,
(port and ":%d"%port or ""))
f.connect(host,port)
if not nologin:
if verbose:
print 'Logging in as %r...' % (login or 'anonymous')
f.login(login, passwd, account)
if verbose: print 'OK.'
pwd = f.pwd()
if verbose > 1: print 'PWD =', repr(pwd)
if remotedir:
if verbose > 1: print 'cwd(%s)' % repr(remotedir)
f.cwd(remotedir)
if verbose > 1: print 'OK.'
pwd = f.pwd()
if verbose > 1: print 'PWD =', repr(pwd)
#
mirrorsubdir(f, localdir)
# Core logic: mirror one subdirectory (recursively)
def mirrorsubdir(f, localdir):
pwd = f.pwd()
if localdir and not os.path.isdir(localdir):
if verbose: print 'Creating local directory', repr(localdir)
try:
makedir(localdir)
except os.error, msg:
print "Failed to establish local directory", repr(localdir)
return
infofilename = os.path.join(localdir, '.mirrorinfo')
try:
text = open(infofilename, 'r').read()
except IOError, msg:
text = '{}'
try:
info = eval(text)
except (SyntaxError, NameError):
print 'Bad mirror info in', repr(infofilename)
info = {}
subdirs = []
listing = []
if verbose: print 'Listing remote directory %r...' % (pwd,)
f.retrlines('LIST', listing.append)
filesfound = []
for line in listing:
if verbose > 1: print '-->', repr(line)
if mac:
# Mac listing has just filenames;
# trailing / means subdirectory
filename = line.strip()
mode = '-'
if filename[-1:] == '/':
filename = filename[:-1]
mode = 'd'
infostuff = ''
else:
# Parse, assuming a UNIX listing
words = line.split(None, 8)
if len(words) < 6:
if verbose > 1: print 'Skipping short line'
continue
filename = words[-1].lstrip()
i = filename.find(" -> ")
if i >= 0:
# words[0] had better start with 'l'...
if verbose > 1:
print 'Found symbolic link %r' % (filename,)
linkto = filename[i+4:]
filename = filename[:i]
infostuff = words[-5:-1]
mode = words[0]
skip = 0
for pat in skippats:
if fnmatch(filename, pat):
if verbose > 1:
print 'Skip pattern', repr(pat),
print 'matches', repr(filename)
skip = 1
break
if skip:
continue
if mode[0] == 'd':
if verbose > 1:
print 'Remembering subdirectory', repr(filename)
subdirs.append(filename)
continue
filesfound.append(filename)
if info.has_key(filename) and info[filename] == infostuff:
if verbose > 1:
print 'Already have this version of',repr(filename)
continue
fullname = os.path.join(localdir, filename)
tempname = os.path.join(localdir, '@'+filename)
if interactive:
doit = askabout('file', filename, pwd)
if not doit:
if not info.has_key(filename):
info[filename] = 'Not retrieved'
continue
try:
os.unlink(tempname)
except os.error:
pass
if mode[0] == 'l':
if verbose:
print "Creating symlink %r -> %r" % (filename, linkto)
try:
os.symlink(linkto, tempname)
except IOError, msg:
print "Can't create %r: %s" % (tempname, msg)
continue
else:
try:
fp = open(tempname, 'wb')
except IOError, msg:
print "Can't create %r: %s" % (tempname, msg)
continue
if verbose:
print 'Retrieving %r from %r as %r...' % (filename, pwd, fullname)
if verbose:
fp1 = LoggingFile(fp, 1024, sys.stdout)
else:
fp1 = fp
t0 = time.time()
try:
f.retrbinary('RETR ' + filename,
fp1.write, 8*1024)
except ftplib.error_perm, msg:
print msg
t1 = time.time()
bytes = fp.tell()
fp.close()
if fp1 != fp:
fp1.close()
try:
os.unlink(fullname)
except os.error:
pass # Ignore the error
try:
os.rename(tempname, fullname)
except os.error, msg:
print "Can't rename %r to %r: %s" % (tempname, fullname, msg)
continue
info[filename] = infostuff
writedict(info, infofilename)
if verbose and mode[0] != 'l':
dt = t1 - t0
kbytes = bytes / 1024.0
print int(round(kbytes)),
print 'Kbytes in',
print int(round(dt)),
print 'seconds',
if t1 > t0:
print '(~%d Kbytes/sec)' % \
int(round(kbytes/dt),)
print
#
# Remove files from info that are no longer remote
deletions = 0
for filename in info.keys():
if filename not in filesfound:
if verbose:
print "Removing obsolete info entry for",
print repr(filename), "in", repr(localdir or ".")
del info[filename]
deletions = deletions + 1
if deletions:
writedict(info, infofilename)
#
# Remove local files that are no longer in the remote directory
try:
if not localdir: names = os.listdir(os.curdir)
else: names = os.listdir(localdir)
except os.error:
names = []
for name in names:
if name[0] == '.' or info.has_key(name) or name in subdirs:
continue
skip = 0
for pat in skippats:
if fnmatch(name, pat):
if verbose > 1:
print 'Skip pattern', repr(pat),
print 'matches', repr(name)
skip = 1
break
if skip:
continue
fullname = os.path.join(localdir, name)
if not rmok:
if verbose:
print 'Local file', repr(fullname),
print 'is no longer pertinent'
continue
if verbose: print 'Removing local file/dir', repr(fullname)
remove(fullname)
#
# Recursively mirror subdirectories
for subdir in subdirs:
if interactive:
doit = askabout('subdirectory', subdir, pwd)
if not doit: continue
if verbose: print 'Processing subdirectory', repr(subdir)
localsubdir = os.path.join(localdir, subdir)
pwd = f.pwd()
if verbose > 1:
print 'Remote directory now:', repr(pwd)
print 'Remote cwd', repr(subdir)
try:
f.cwd(subdir)
except ftplib.error_perm, msg:
print "Can't chdir to", repr(subdir), ":", repr(msg)
else:
if verbose: print 'Mirroring as', repr(localsubdir)
mirrorsubdir(f, localsubdir)
if verbose > 1: print 'Remote cwd ..'
f.cwd('..')
newpwd = f.pwd()
if newpwd != pwd:
print 'Ended up in wrong directory after cd + cd ..'
print 'Giving up now.'
break
else:
if verbose > 1: print 'OK.'
# Helper to remove a file or directory tree
def remove(fullname):
if os.path.isdir(fullname) and not os.path.islink(fullname):
try:
names = os.listdir(fullname)
except os.error:
names = []
ok = 1
for name in names:
if not remove(os.path.join(fullname, name)):
ok = 0
if not ok:
return 0
try:
os.rmdir(fullname)
except os.error, msg:
print "Can't remove local directory %r: %s" % (fullname, msg)
return 0
else:
try:
os.unlink(fullname)
except os.error, msg:
print "Can't remove local file %r: %s" % (fullname, msg)
return 0
return 1
# Wrapper around a file for writing to write a hash sign every block.
class LoggingFile:
def __init__(self, fp, blocksize, outfp):
self.fp = fp
self.bytes = 0
self.hashes = 0
self.blocksize = blocksize
self.outfp = outfp
def write(self, data):
self.bytes = self.bytes + len(data)
hashes = int(self.bytes) / self.blocksize
while hashes > self.hashes:
self.outfp.write('#')
self.outfp.flush()
self.hashes = self.hashes + 1
self.fp.write(data)
def close(self):
self.outfp.write('\n')
# Ask permission to download a file.
def askabout(filetype, filename, pwd):
prompt = 'Retrieve %s %s from %s ? [ny] ' % (filetype, filename, pwd)
while 1:
reply = raw_input(prompt).strip().lower()
if reply in ['y', 'ye', 'yes']:
return 1
if reply in ['', 'n', 'no', 'nop', 'nope']:
return 0
print 'Please answer yes or no.'
# Create a directory if it doesn't exist. Recursively create the
# parent directory as well if needed.
def makedir(pathname):
if os.path.isdir(pathname):
return
dirname = os.path.dirname(pathname)
if dirname: makedir(dirname)
os.mkdir(pathname, 0777)
# Write a dictionary to a file in a way that can be read back using
# rval() but is still somewhat readable (i.e. not a single long line).
# Also creates a backup file.
def writedict(dict, filename):
dir, fname = os.path.split(filename)
tempname = os.path.join(dir, '@' + fname)
backup = os.path.join(dir, fname + '~')
try:
os.unlink(backup)
except os.error:
pass
fp = open(tempname, 'w')
fp.write('{\n')
for key, value in dict.items():
fp.write('%r: %r,\n' % (key, value))
fp.write('}\n')
fp.close()
try:
os.rename(filename, backup)
except os.error:
pass
os.rename(tempname, filename)
if __name__ == '__main__':
main()
| apache-2.0 | -2,619,000,734,700,214,000 | 7,947,048,885,250,735,000 | 31.135 | 82 | 0.517504 | false |
gwpy/gwpy | examples/spectrogram/rayleigh.py | 3 | 2125 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) Duncan Macleod (2014-2020)
#
# This file is part of GWpy.
#
# GWpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# GWpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GWpy. If not, see <http://www.gnu.org/licenses/>.
"""Plotting a `Spectrogram` of the Rayleigh statistic
As described in :ref:`gwpy-example-frequencyseries-rayleigh`, the Rayleigh
statistic can be used to study non-Gaussianity in a timeseries.
We can study the time variance of these features by plotting a
time-frequency spectrogram where we calculate the Rayleigh statistic for
each time bin.
"""
__author__ = "Duncan Macleod <duncan.macleod@ligo.org>"
__currentmodule__ = 'gwpy.spectrogram'
# To demonstate this, we can load some data from the LIGO Livingston
# intereferometer around the time of the GW151226 gravitational wave detection:
from gwpy.timeseries import TimeSeries
gwdata = TimeSeries.fetch_open_data('L1', 'Dec 26 2015 03:37',
'Dec 26 2015 03:47', verbose=True)
# Next, we can calculate a Rayleigh statistic `Spectrogram` using the
# :meth:`~gwpy.timeseries.TimeSeries.rayleigh_spectrogram` method of the
# `~gwpy.timeseries.TimeSeries` and a 5-second stride with a 2-second FFT and
# 1-second overlap (50%):
rayleigh = gwdata.rayleigh_spectrogram(5, fftlength=2, overlap=1)
# and can make a plot using the :meth:`~Spectrogram.plot` method
plot = rayleigh.plot(norm='log', vmin=0.25, vmax=4)
ax = plot.gca()
ax.set_yscale('log')
ax.set_ylim(30, 1500)
ax.set_title('Sensitivity of LIGO-Livingston around GW151226')
ax.colorbar(cmap='coolwarm', label='Rayleigh statistic')
plot.show()
| gpl-3.0 | -4,803,243,034,858,792,000 | -2,019,834,646,603,736,300 | 39.865385 | 79 | 0.741176 | false |
jgao54/airflow | tests/contrib/hooks/test_aws_hook.py | 5 | 8814 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import unittest
import boto3
from airflow import configuration
from airflow.models.connection import Connection
from airflow.contrib.hooks.aws_hook import AwsHook
try:
from unittest import mock
except ImportError:
try:
import mock
except ImportError:
mock = None
try:
from moto import mock_emr, mock_dynamodb2, mock_sts, mock_iam
except ImportError:
mock_emr = None
mock_dynamodb2 = None
mock_sts = None
mock_iam = None
class TestAwsHook(unittest.TestCase):
@mock_emr
def setUp(self):
configuration.load_test_config()
@unittest.skipIf(mock_emr is None, 'mock_emr package not present')
@mock_emr
def test_get_client_type_returns_a_boto3_client_of_the_requested_type(self):
client = boto3.client('emr', region_name='us-east-1')
if len(client.list_clusters()['Clusters']):
raise ValueError('AWS not properly mocked')
hook = AwsHook(aws_conn_id='aws_default')
client_from_hook = hook.get_client_type('emr')
self.assertEqual(client_from_hook.list_clusters()['Clusters'], [])
@unittest.skipIf(mock_dynamodb2 is None, 'mock_dynamo2 package not present')
@mock_dynamodb2
def test_get_resource_type_returns_a_boto3_resource_of_the_requested_type(self):
hook = AwsHook(aws_conn_id='aws_default')
resource_from_hook = hook.get_resource_type('dynamodb')
# this table needs to be created in production
table = resource_from_hook.create_table(
TableName='test_airflow',
KeySchema=[
{
'AttributeName': 'id',
'KeyType': 'HASH'
},
],
AttributeDefinitions=[
{
'AttributeName': 'name',
'AttributeType': 'S'
}
],
ProvisionedThroughput={
'ReadCapacityUnits': 10,
'WriteCapacityUnits': 10
}
)
table.meta.client.get_waiter(
'table_exists').wait(TableName='test_airflow')
self.assertEqual(table.item_count, 0)
@unittest.skipIf(mock_dynamodb2 is None, 'mock_dynamo2 package not present')
@mock_dynamodb2
def test_get_session_returns_a_boto3_session(self):
hook = AwsHook(aws_conn_id='aws_default')
session_from_hook = hook.get_session()
resource_from_session = session_from_hook.resource('dynamodb')
table = resource_from_session.create_table(
TableName='test_airflow',
KeySchema=[
{
'AttributeName': 'id',
'KeyType': 'HASH'
},
],
AttributeDefinitions=[
{
'AttributeName': 'name',
'AttributeType': 'S'
}
],
ProvisionedThroughput={
'ReadCapacityUnits': 10,
'WriteCapacityUnits': 10
}
)
table.meta.client.get_waiter(
'table_exists').wait(TableName='test_airflow')
self.assertEqual(table.item_count, 0)
@mock.patch.object(AwsHook, 'get_connection')
def test_get_credentials_from_login(self, mock_get_connection):
mock_connection = Connection(login='aws_access_key_id',
password='aws_secret_access_key')
mock_get_connection.return_value = mock_connection
hook = AwsHook()
credentials_from_hook = hook.get_credentials()
self.assertEqual(credentials_from_hook.access_key, 'aws_access_key_id')
self.assertEqual(credentials_from_hook.secret_key, 'aws_secret_access_key')
self.assertIsNone(credentials_from_hook.token)
@mock.patch.object(AwsHook, 'get_connection')
def test_get_credentials_from_extra(self, mock_get_connection):
mock_connection = Connection(
extra='{"aws_access_key_id": "aws_access_key_id",'
'"aws_secret_access_key": "aws_secret_access_key"}'
)
mock_get_connection.return_value = mock_connection
hook = AwsHook()
credentials_from_hook = hook.get_credentials()
self.assertEqual(credentials_from_hook.access_key, 'aws_access_key_id')
self.assertEqual(credentials_from_hook.secret_key, 'aws_secret_access_key')
self.assertIsNone(credentials_from_hook.token)
@mock.patch('airflow.contrib.hooks.aws_hook._parse_s3_config',
return_value=('aws_access_key_id', 'aws_secret_access_key'))
@mock.patch.object(AwsHook, 'get_connection')
def test_get_credentials_from_extra_with_s3_config_and_profile(
self, mock_get_connection, mock_parse_s3_config
):
mock_connection = Connection(
extra='{"s3_config_format": "aws", '
'"profile": "test", '
'"s3_config_file": "aws-credentials", '
'"region_name": "us-east-1"}')
mock_get_connection.return_value = mock_connection
hook = AwsHook()
hook._get_credentials(region_name=None)
mock_parse_s3_config.assert_called_with(
'aws-credentials',
'aws',
'test'
)
@unittest.skipIf(mock_sts is None, 'mock_sts package not present')
@mock.patch.object(AwsHook, 'get_connection')
@mock_sts
def test_get_credentials_from_role_arn(self, mock_get_connection):
mock_connection = Connection(
extra='{"role_arn":"arn:aws:iam::123456:role/role_arn"}')
mock_get_connection.return_value = mock_connection
hook = AwsHook()
credentials_from_hook = hook.get_credentials()
self.assertEqual(credentials_from_hook.access_key, 'AKIAIOSFODNN7EXAMPLE')
self.assertEqual(credentials_from_hook.secret_key,
'aJalrXUtnFEMI/K7MDENG/bPxRfiCYzEXAMPLEKEY')
self.assertEqual(credentials_from_hook.token,
'BQoEXAMPLEH4aoAH0gNCAPyJxz4BlCFFxWNE1OPTgk5TthT+FvwqnKwRcOIfrRh'
'3c/LTo6UDdyJwOOvEVPvLXCrrrUtdnniCEXAMPLE/IvU1dYUg2RVAJBanLiHb4I'
'gRmpRV3zrkuWJOgQs8IZZaIv2BXIa2R4OlgkBN9bkUDNCJiBeb/AXlzBBko7b15'
'fjrBs2+cTQtpZ3CYWFXG8C5zqx37wnOE49mRl/+OtkIKGO7fAE')
@unittest.skipIf(mock_sts is None, 'mock_sts package not present')
@mock.patch.object(AwsHook, 'get_connection')
@mock_sts
def test_get_credentials_from_role_arn_with_external_id(self, mock_get_connection):
mock_connection = Connection(
extra='{"role_arn":"arn:aws:iam::123456:role/role_arn",'
' "external_id":"external_id"}')
mock_get_connection.return_value = mock_connection
hook = AwsHook()
credentials_from_hook = hook.get_credentials()
self.assertEqual(credentials_from_hook.access_key, 'AKIAIOSFODNN7EXAMPLE')
self.assertEqual(credentials_from_hook.secret_key,
'aJalrXUtnFEMI/K7MDENG/bPxRfiCYzEXAMPLEKEY')
self.assertEqual(credentials_from_hook.token,
'BQoEXAMPLEH4aoAH0gNCAPyJxz4BlCFFxWNE1OPTgk5TthT+FvwqnKwRcOIfrRh'
'3c/LTo6UDdyJwOOvEVPvLXCrrrUtdnniCEXAMPLE/IvU1dYUg2RVAJBanLiHb4I'
'gRmpRV3zrkuWJOgQs8IZZaIv2BXIa2R4OlgkBN9bkUDNCJiBeb/AXlzBBko7b15'
'fjrBs2+cTQtpZ3CYWFXG8C5zqx37wnOE49mRl/+OtkIKGO7fAE')
@unittest.skipIf(mock_iam is None, 'mock_iam package not present')
@mock_iam
def test_expand_role(self):
conn = boto3.client('iam', region_name='us-east-1')
conn.create_role(RoleName='test-role', AssumeRolePolicyDocument='some policy')
hook = AwsHook()
arn = hook.expand_role('test-role')
expect_arn = conn.get_role(RoleName='test-role').get('Role').get('Arn')
self.assertEqual(arn, expect_arn)
if __name__ == '__main__':
unittest.main()
| apache-2.0 | 115,164,699,807,792,240 | -8,070,996,176,292,980,000 | 39.063636 | 90 | 0.621511 | false |
heran7/edx-platform | common/lib/sample-post.py | 29 | 2491 | # A simple script demonstrating how to have an external program post problem
# responses to an edx server.
#
# ***** NOTE *****
# This is not intended as a stable public API. In fact, it is almost certainly
# going to change. If you use this for some reason, be prepared to change your
# code.
#
# We will be working to define a stable public API for external programs. We
# don't have have one yet (Feb 2013).
import requests
import sys
import getpass
def prompt(msg, default=None, safe=False):
d = ' [{0}]'.format(default) if default is not None else ''
prompt = 'Enter {msg}{default}: '.format(msg=msg, default=d)
if not safe:
print prompt
x = sys.stdin.readline().strip()
else:
x = getpass.getpass(prompt=prompt)
if x == '' and default is not None:
return default
return x
server = 'https://www.edx.org'
course_id = 'HarvardX/PH207x/2012_Fall'
location = 'i4x://HarvardX/PH207x/problem/ex_practice_2'
#server = prompt('Server (no trailing slash)', 'http://127.0.0.1:8000')
#course_id = prompt('Course id', 'MITx/7012x/2013_Spring')
#location = prompt('problem location', 'i4x://MITx/7012x/problem/example_upload_answer')
value = prompt('value to upload')
username = prompt('username on server', 'victor@edx.org')
password = prompt('password', 'abc123', safe=True)
print "get csrf cookie"
session = requests.Session()
r = session.get(server + '/')
r.raise_for_status()
# print session.cookies
# for some reason, the server expects a header containing the csrf cookie, not just the
# cookie itself.
session.headers['X-CSRFToken'] = session.cookies['csrftoken']
# for https, need a referer header
session.headers['Referer'] = server + '/'
login_url = '/'.join([server, 'login'])
print "log in"
r = session.post(login_url, {'email': 'victor@edx.org', 'password': 'Secret!', 'remember': 'false'})
#print "request headers: ", r.request.headers
#print "response headers: ", r.headers
r.raise_for_status()
url = '/'.join([server, 'courses', course_id, 'modx', location, 'problem_check'])
data = {'input_{0}_2_1'.format(location.replace('/','-').replace(':','').replace('--','-')): value}
#data = {'input_i4x-MITx-7012x-problem-example_upload_answer_2_1': value}
print "Posting to '{0}': {1}".format(url, data)
r = session.post(url, data)
r.raise_for_status()
print ("To see the uploaded answer, go to {server}/courses/{course_id}/jump_to/{location}"
.format(server=server, course_id=course_id, location=location))
| agpl-3.0 | -7,591,974,262,895,487,000 | -516,487,302,601,604,160 | 34.084507 | 100 | 0.679245 | false |
gandalfcode/gandalf | tests/paper_tests/binaryorbit.py | 1 | 3711 | #==============================================================================
# freefalltest.py
# Run the freefall collapse test using initial conditions specified in the
# file 'freefall.dat'.
#==============================================================================
from gandalf.analysis.facade import *
from gandalf.analysis.data_fetcher import *
from gandalf.analysis.compute import particle_data
from gandalf.analysis.SimBuffer import SimBuffer, BufferException
import time
import matplotlib.pyplot as plt
import numpy as np
import math
from matplotlib import rc
from mpl_toolkits.axes_grid1 import AxesGrid
#--------------------------------------------------------------------------------------------------
rc('font', **{'family': 'normal', 'weight' : 'bold', 'size' : 16})
rc('text', usetex=True)
# Binary parameters
m1 = 0.5
m2 = 0.5
abin = 1.0
ebin = 0.5
etot0 = -0.5*m1*m2/abin
period = 2.0*math.pi*math.sqrt(abin*abin*abin/(m1 + m2))
xmin = -0.6
xmax = 2.1
ymin = -0.85
ymax = 0.85
xsize = xmax - xmin
ysize = ymax - ymin
CreateTimeData('x',particle_data,quantity='x')
CreateTimeData('y',particle_data,quantity='y')
# Leapfrog KDK
kdksim = newsim('binaryorbit.dat')
kdksim.SetParam('nbody','lfkdk')
setupsim()
run()
x_kdk = get_time_data("t","x")
y_kdk = get_time_data("t","y")
# Leapfrog DKD
dkdsim = newsim('binaryorbit.dat')
dkdsim.SetParam('nbody','lfdkd')
setupsim()
run()
x_dkd = get_time_data("t","x")
y_dkd = get_time_data("t","y")
# 4th-order Hermite
hermite4sim = newsim('binaryorbit.dat')
hermite4sim.SetParam('nbody','hermite4')
setupsim()
run()
x_hermite4 = get_time_data("t","x")
y_hermite4 = get_time_data("t","y")
# 4th-order Hermite TS
hermite4tssim = newsim('binaryorbit.dat')
hermite4tssim.SetParam('nbody','hermite4ts')
hermite4tssim.SetParam('Npec',5)
setupsim()
run()
x_4ts = get_time_data("t","x")
y_4ts = get_time_data("t","y")
# 6th-order Hermite
#hermite6tssim = newsim('binaryorbit.dat')
#hermite6tssim.SetParam('nbody','hermite6ts')
#hermite6tssim.SetParam('Npec',5)
#setupsim()
#run()
#x_6ts = get_time_data("t","x")
#y_6ts = get_time_data("t","y")
# Create matplotlib figure object with shared x-axis
#--------------------------------------------------------------------------------------------------
#fig, axarr = plt.subplots(2, 1, sharex='col', sharey='row', figsize=(10,4))
fig, axarr = plt.subplots(4, 1, figsize=(6,11), sharex='col', sharey='row')
fig.subplots_adjust(hspace=0.001, wspace=0.001)
fig.subplots_adjust(bottom=0.06, top=0.98, left=0.14, right=0.98)
axarr[0].set_ylabel(r"$y$")
axarr[0].set_ylim([ymin, ymax])
axarr[0].set_xlim([xmin, xmax])
axarr[0].plot(x_kdk.y_data, y_kdk.y_data, color="black", linestyle='-', label='Leapfrog KDK', lw=1.0)
axarr[0].text(xmin + 0.02*xsize, ymax - 0.1*ysize, "(a) Leapfrog-KDK", fontsize=12)
axarr[1].set_ylabel(r"$y$")
axarr[1].set_ylim([ymin, ymax])
axarr[1].plot(x_dkd.y_data, y_dkd.y_data, color="black", linestyle='-', label='Leapfrog DKD', lw=1.0)
axarr[1].text(xmin + 0.02*xsize, ymax - 0.1*ysize, "(b) Leapfrog-DKD", fontsize=12)
axarr[2].set_ylabel(r"$y$")
axarr[2].set_ylim([ymin, ymax])
axarr[2].plot(x_hermite4.y_data, y_hermite4.y_data, color="black", linestyle='-', label='4H', lw=1.0)
axarr[2].text(xmin + 0.02*xsize, ymax - 0.1*ysize, "(c) 4th-order Hermite", fontsize=12)
axarr[3].set_xlabel(r"$x$")
axarr[3].set_ylabel(r"$y$")
axarr[3].set_ylim([ymin, ymax])
axarr[3].plot(x_4ts.y_data, y_4ts.y_data, color="black", linestyle='-', label='4TS', lw=1.0)
axarr[3].text(xmin + 0.02*xsize, ymax - 0.1*ysize, "(d) 4th-order Hermite TS", fontsize=12)
plt.show()
fig.savefig('binaryorbit.pdf', dpi=50)
# Prevent program from closing before showing plot window
block()
| gpl-2.0 | -952,220,859,976,447,600 | -7,018,341,765,465,065,000 | 27.992188 | 101 | 0.618971 | false |
nwjs/chromium.src | tools/grit/grit/gather/rc.py | 11 | 11211 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''Support for gathering resources from RC files.
'''
from __future__ import print_function
import re
from grit import exception
from grit import lazy_re
from grit import tclib
from grit.gather import regexp
# Find portions that need unescaping in resource strings. We need to be
# careful that a \\n is matched _first_ as a \\ rather than matching as
# a \ followed by a \n.
# TODO(joi) Handle ampersands if we decide to change them into <ph>
# TODO(joi) May need to handle other control characters than \n
_NEED_UNESCAPE = lazy_re.compile(r'""|\\\\|\\n|\\t')
# Find portions that need escaping to encode string as a resource string.
_NEED_ESCAPE = lazy_re.compile(r'"|\n|\t|\\|\ \;')
# How to escape certain characters
_ESCAPE_CHARS = {
'"' : '""',
'\n' : '\\n',
'\t' : '\\t',
'\\' : '\\\\',
' ' : ' '
}
# How to unescape certain strings
_UNESCAPE_CHARS = dict([[value, key] for key, value in _ESCAPE_CHARS.items()])
class Section(regexp.RegexpGatherer):
'''A section from a resource file.'''
@staticmethod
def Escape(text):
'''Returns a version of 'text' with characters escaped that need to be
for inclusion in a resource section.'''
def Replace(match):
return _ESCAPE_CHARS[match.group()]
return _NEED_ESCAPE.sub(Replace, text)
@staticmethod
def UnEscape(text):
'''Returns a version of 'text' with escaped characters unescaped.'''
def Replace(match):
return _UNESCAPE_CHARS[match.group()]
return _NEED_UNESCAPE.sub(Replace, text)
def _RegExpParse(self, rexp, text_to_parse):
'''Overrides _RegExpParse to add shortcut group handling. Otherwise
the same.
'''
super(Section, self)._RegExpParse(rexp, text_to_parse)
if not self.is_skeleton and len(self.GetTextualIds()) > 0:
group_name = self.GetTextualIds()[0]
for c in self.GetCliques():
c.AddToShortcutGroup(group_name)
def ReadSection(self):
rc_text = self._LoadInputFile()
out = ''
begin_count = 0
assert self.extkey
first_line_re = re.compile(r'\s*' + self.extkey + r'\b')
for line in rc_text.splitlines(True):
if out or first_line_re.match(line):
out += line
# we stop once we reach the END for the outermost block.
begin_count_was = begin_count
if len(out) > 0 and line.strip() == 'BEGIN':
begin_count += 1
elif len(out) > 0 and line.strip() == 'END':
begin_count -= 1
if begin_count_was == 1 and begin_count == 0:
break
if len(out) == 0:
raise exception.SectionNotFound('%s in file %s' % (self.extkey, self.rc_file))
self.text_ = out.strip()
class Dialog(Section):
'''A resource section that contains a dialog resource.'''
# A typical dialog resource section looks like this:
#
# IDD_ABOUTBOX DIALOGEX 22, 17, 230, 75
# STYLE DS_SETFONT | DS_MODALFRAME | WS_CAPTION | WS_SYSMENU
# CAPTION "About"
# FONT 8, "System", 0, 0, 0x0
# BEGIN
# ICON IDI_KLONK,IDC_MYICON,14,9,20,20
# LTEXT "klonk Version ""yibbee"" 1.0",IDC_STATIC,49,10,119,8,
# SS_NOPREFIX
# LTEXT "Copyright (C) 2005",IDC_STATIC,49,20,119,8
# DEFPUSHBUTTON "OK",IDOK,195,6,30,11,WS_GROUP
# CONTROL "Jack ""Black"" Daniels",IDC_RADIO1,"Button",
# BS_AUTORADIOBUTTON,46,51,84,10
# END
# We are using a sorted set of keys, and we assume that the
# group name used for descriptions (type) will come after the "text"
# group in alphabetical order. We also assume that there cannot be
# more than one description per regular expression match.
# If that's not the case some descriptions will be clobbered.
dialog_re_ = lazy_re.compile(r'''
# The dialog's ID in the first line
(?P<id1>[A-Z0-9_]+)\s+DIALOG(EX)?
|
# The caption of the dialog
(?P<type1>CAPTION)\s+"(?P<text1>.*?([^"]|""))"\s
|
# Lines for controls that have text and an ID
\s+(?P<type2>[A-Z]+)\s+"(?P<text2>.*?([^"]|"")?)"\s*,\s*(?P<id2>[A-Z0-9_]+)\s*,
|
# Lines for controls that have text only
\s+(?P<type3>[A-Z]+)\s+"(?P<text3>.*?([^"]|"")?)"\s*,
|
# Lines for controls that reference other resources
\s+[A-Z]+\s+[A-Z0-9_]+\s*,\s*(?P<id3>[A-Z0-9_]*[A-Z][A-Z0-9_]*)
|
# This matches "NOT SOME_STYLE" so that it gets consumed and doesn't get
# matched by the next option (controls that have only an ID and then just
# numbers)
\s+NOT\s+[A-Z][A-Z0-9_]+
|
# Lines for controls that have only an ID and then just numbers
\s+[A-Z]+\s+(?P<id4>[A-Z0-9_]*[A-Z][A-Z0-9_]*)\s*,
''', re.MULTILINE | re.VERBOSE)
def Parse(self):
'''Knows how to parse dialog resource sections.'''
self.ReadSection()
self._RegExpParse(self.dialog_re_, self.text_)
class Menu(Section):
'''A resource section that contains a menu resource.'''
# A typical menu resource section looks something like this:
#
# IDC_KLONK MENU
# BEGIN
# POPUP "&File"
# BEGIN
# MENUITEM "E&xit", IDM_EXIT
# MENUITEM "This be ""Klonk"" me like", ID_FILE_THISBE
# POPUP "gonk"
# BEGIN
# MENUITEM "Klonk && is ""good""", ID_GONK_KLONKIS
# END
# END
# POPUP "&Help"
# BEGIN
# MENUITEM "&About ...", IDM_ABOUT
# END
# END
# Description used for the messages generated for menus, to explain to
# the translators how to handle them.
MENU_MESSAGE_DESCRIPTION = (
'This message represents a menu. Each of the items appears in sequence '
'(some possibly within sub-menus) in the menu. The XX01XX placeholders '
'serve to separate items. Each item contains an & (ampersand) character '
'in front of the keystroke that should be used as a shortcut for that item '
'in the menu. Please make sure that no two items in the same menu share '
'the same shortcut.'
)
# A dandy regexp to suck all the IDs and translateables out of a menu
# resource
menu_re_ = lazy_re.compile(r'''
# Match the MENU ID on the first line
^(?P<id1>[A-Z0-9_]+)\s+MENU
|
# Match the translateable caption for a popup menu
POPUP\s+"(?P<text1>.*?([^"]|""))"\s
|
# Match the caption & ID of a MENUITEM
MENUITEM\s+"(?P<text2>.*?([^"]|""))"\s*,\s*(?P<id2>[A-Z0-9_]+)
''', re.MULTILINE | re.VERBOSE)
def Parse(self):
'''Knows how to parse menu resource sections. Because it is important that
menu shortcuts are unique within the menu, we return each menu as a single
message with placeholders to break up the different menu items, rather than
return a single message per menu item. we also add an automatic description
with instructions for the translators.'''
self.ReadSection()
self.single_message_ = tclib.Message(description=self.MENU_MESSAGE_DESCRIPTION)
self._RegExpParse(self.menu_re_, self.text_)
class Version(Section):
'''A resource section that contains a VERSIONINFO resource.'''
# A typical version info resource can look like this:
#
# VS_VERSION_INFO VERSIONINFO
# FILEVERSION 1,0,0,1
# PRODUCTVERSION 1,0,0,1
# FILEFLAGSMASK 0x3fL
# #ifdef _DEBUG
# FILEFLAGS 0x1L
# #else
# FILEFLAGS 0x0L
# #endif
# FILEOS 0x4L
# FILETYPE 0x2L
# FILESUBTYPE 0x0L
# BEGIN
# BLOCK "StringFileInfo"
# BEGIN
# BLOCK "040904e4"
# BEGIN
# VALUE "CompanyName", "TODO: <Company name>"
# VALUE "FileDescription", "TODO: <File description>"
# VALUE "FileVersion", "1.0.0.1"
# VALUE "LegalCopyright", "TODO: (c) <Company name>. All rights reserved."
# VALUE "InternalName", "res_format_test.dll"
# VALUE "OriginalFilename", "res_format_test.dll"
# VALUE "ProductName", "TODO: <Product name>"
# VALUE "ProductVersion", "1.0.0.1"
# END
# END
# BLOCK "VarFileInfo"
# BEGIN
# VALUE "Translation", 0x409, 1252
# END
# END
#
#
# In addition to the above fields, VALUE fields named "Comments" and
# "LegalTrademarks" may also be translateable.
version_re_ = lazy_re.compile(r'''
# Match the ID on the first line
^(?P<id1>[A-Z0-9_]+)\s+VERSIONINFO
|
# Match all potentially translateable VALUE sections
\s+VALUE\s+"
(
CompanyName|FileDescription|LegalCopyright|
ProductName|Comments|LegalTrademarks
)",\s+"(?P<text1>.*?([^"]|""))"\s
''', re.MULTILINE | re.VERBOSE)
def Parse(self):
'''Knows how to parse VERSIONINFO resource sections.'''
self.ReadSection()
self._RegExpParse(self.version_re_, self.text_)
# TODO(joi) May need to override the Translate() method to change the
# "Translation" VALUE block to indicate the correct language code.
class RCData(Section):
'''A resource section that contains some data .'''
# A typical rcdataresource section looks like this:
#
# IDR_BLAH RCDATA { 1, 2, 3, 4 }
dialog_re_ = lazy_re.compile(r'''
^(?P<id1>[A-Z0-9_]+)\s+RCDATA\s+(DISCARDABLE)?\s+\{.*?\}
''', re.MULTILINE | re.VERBOSE | re.DOTALL)
def Parse(self):
'''Implementation for resource types w/braces (not BEGIN/END)
'''
rc_text = self._LoadInputFile()
out = ''
begin_count = 0
openbrace_count = 0
assert self.extkey
first_line_re = re.compile(r'\s*' + self.extkey + r'\b')
for line in rc_text.splitlines(True):
if out or first_line_re.match(line):
out += line
# We stop once the braces balance (could happen in one line).
begin_count_was = begin_count
if len(out) > 0:
openbrace_count += line.count('{')
begin_count += line.count('{')
begin_count -= line.count('}')
if ((begin_count_was == 1 and begin_count == 0) or
(openbrace_count > 0 and begin_count == 0)):
break
if len(out) == 0:
raise exception.SectionNotFound('%s in file %s' % (self.extkey, self.rc_file))
self.text_ = out
self._RegExpParse(self.dialog_re_, out)
class Accelerators(Section):
'''An ACCELERATORS table.
'''
# A typical ACCELERATORS section looks like this:
#
# IDR_ACCELERATOR1 ACCELERATORS
# BEGIN
# "^C", ID_ACCELERATOR32770, ASCII, NOINVERT
# "^V", ID_ACCELERATOR32771, ASCII, NOINVERT
# VK_INSERT, ID_ACCELERATOR32772, VIRTKEY, CONTROL, NOINVERT
# END
accelerators_re_ = lazy_re.compile(r'''
# Match the ID on the first line
^(?P<id1>[A-Z0-9_]+)\s+ACCELERATORS\s+
|
# Match accelerators specified as VK_XXX
\s+VK_[A-Z0-9_]+,\s*(?P<id2>[A-Z0-9_]+)\s*,
|
# Match accelerators specified as e.g. "^C"
\s+"[^"]*",\s+(?P<id3>[A-Z0-9_]+)\s*,
''', re.MULTILINE | re.VERBOSE)
def Parse(self):
'''Knows how to parse ACCELERATORS resource sections.'''
self.ReadSection()
self._RegExpParse(self.accelerators_re_, self.text_)
| bsd-3-clause | 836,820,075,483,443,300 | 1,876,702,480,425,192,400 | 31.685131 | 89 | 0.611631 | false |
sriprasanna/django-1.3.1 | django/contrib/gis/gdal/prototypes/generation.py | 321 | 3766 | """
This module contains functions that generate ctypes prototypes for the
GDAL routines.
"""
from ctypes import c_char_p, c_double, c_int, c_void_p
from django.contrib.gis.gdal.prototypes.errcheck import \
check_arg_errcode, check_errcode, check_geom, check_geom_offset, \
check_pointer, check_srs, check_str_arg, check_string, check_const_string
class gdal_char_p(c_char_p):
pass
def double_output(func, argtypes, errcheck=False, strarg=False):
"Generates a ctypes function that returns a double value."
func.argtypes = argtypes
func.restype = c_double
if errcheck: func.errcheck = check_arg_errcode
if strarg: func.errcheck = check_str_arg
return func
def geom_output(func, argtypes, offset=None):
"""
Generates a function that returns a Geometry either by reference
or directly (if the return_geom keyword is set to True).
"""
# Setting the argument types
func.argtypes = argtypes
if not offset:
# When a geometry pointer is directly returned.
func.restype = c_void_p
func.errcheck = check_geom
else:
# Error code returned, geometry is returned by-reference.
func.restype = c_int
def geomerrcheck(result, func, cargs):
return check_geom_offset(result, func, cargs, offset)
func.errcheck = geomerrcheck
return func
def int_output(func, argtypes):
"Generates a ctypes function that returns an integer value."
func.argtypes = argtypes
func.restype = c_int
return func
def srs_output(func, argtypes):
"""
Generates a ctypes prototype for the given function with
the given C arguments that returns a pointer to an OGR
Spatial Reference System.
"""
func.argtypes = argtypes
func.restype = c_void_p
func.errcheck = check_srs
return func
def const_string_output(func, argtypes, offset=None):
func.argtypes = argtypes
if offset:
func.restype = c_int
else:
func.restype = c_char_p
def _check_const(result, func, cargs):
return check_const_string(result, func, cargs, offset=offset)
func.errcheck = _check_const
return func
def string_output(func, argtypes, offset=-1, str_result=False):
"""
Generates a ctypes prototype for the given function with the
given argument types that returns a string from a GDAL pointer.
The `const` flag indicates whether the allocated pointer should
be freed via the GDAL library routine VSIFree -- but only applies
only when `str_result` is True.
"""
func.argtypes = argtypes
if str_result:
# Use subclass of c_char_p so the error checking routine
# can free the memory at the pointer's address.
func.restype = gdal_char_p
else:
# Error code is returned
func.restype = c_int
# Dynamically defining our error-checking function with the
# given offset.
def _check_str(result, func, cargs):
return check_string(result, func, cargs,
offset=offset, str_result=str_result)
func.errcheck = _check_str
return func
def void_output(func, argtypes, errcheck=True):
"""
For functions that don't only return an error code that needs to
be examined.
"""
if argtypes: func.argtypes = argtypes
if errcheck:
# `errcheck` keyword may be set to False for routines that
# return void, rather than a status code.
func.restype = c_int
func.errcheck = check_errcode
else:
func.restype = None
return func
def voidptr_output(func, argtypes):
"For functions that return c_void_p."
func.argtypes = argtypes
func.restype = c_void_p
func.errcheck = check_pointer
return func
| bsd-3-clause | -6,385,481,027,871,848,000 | -4,138,950,159,320,018,400 | 30.647059 | 77 | 0.668083 | false |
xkmato/yowsup | yowsup/layers/interface/interface.py | 27 | 3897 | from yowsup.layers import YowLayer, YowLayerEvent
from yowsup.layers.protocol_iq.protocolentities import IqProtocolEntity
from yowsup.layers.network import YowNetworkLayer
from yowsup.layers.auth import YowAuthenticationProtocolLayer
from yowsup.layers.protocol_receipts.protocolentities import OutgoingReceiptProtocolEntity
from yowsup.layers.protocol_acks.protocolentities import IncomingAckProtocolEntity
import inspect
class ProtocolEntityCallback(object):
def __init__(self, entityType):
self.entityType = entityType
def __call__(self, fn):
fn.callback = self.entityType
return fn
class YowInterfaceLayer(YowLayer):
def __init__(self):
super(YowInterfaceLayer, self).__init__()
self.callbacks = {}
self.iqRegistry = {}
# self.receiptsRegistry = {}
members = inspect.getmembers(self, predicate=inspect.ismethod)
for m in members:
if hasattr(m[1], "callback"):
fname = m[0]
fn = m[1]
self.callbacks[fn.callback] = getattr(self, fname)
def _sendIq(self, iqEntity, onSuccess = None, onError = None):
assert iqEntity.getTag() == "iq", "Expected *IqProtocolEntity in _sendIq, got %s" % iqEntity.getTag()
self.iqRegistry[iqEntity.getId()] = (iqEntity, onSuccess, onError)
self.toLower(iqEntity)
# def _sendReceipt(self, outgoingReceiptProtocolEntity, onAck = None):
# assert outgoingReceiptProtocolEntity.__class__ == OutgoingReceiptProtocolEntity,\
# "Excepted OutgoingReceiptProtocolEntity in _sendReceipt, got %s" % outgoingReceiptProtocolEntity.__class__
# self.receiptsRegistry[outgoingReceiptProtocolEntity.getId()] = (outgoingReceiptProtocolEntity, onAck)
# self.toLower(outgoingReceiptProtocolEntity)
# def processReceiptsRegistry(self, incomingAckProtocolEntity):
# '''
# entity: IncomingAckProtocolEntity
# '''
#
# if incomingAckProtocolEntity.__class__ != IncomingAckProtocolEntity:
# return False
#
# receipt_id = incomingAckProtocolEntity.getId()
# if receipt_id in self.receiptsRegistry:
# originalReceiptEntity, ackClbk = self.receiptsRegistry[receipt_id]
# del self.receiptsRegistry[receipt_id]
#
# if ackClbk:
# ackClbk(incomingAckProtocolEntity, originalReceiptEntity)
#
# return True
#
# return False
def processIqRegistry(self, entity):
"""
:type entity: IqProtocolEntity
"""
if entity.getTag() == "iq":
iq_id = entity.getId()
if iq_id in self.iqRegistry:
originalIq, successClbk, errorClbk = self.iqRegistry[iq_id]
del self.iqRegistry[iq_id]
if entity.getType() == IqProtocolEntity.TYPE_RESULT and successClbk:
successClbk(entity, originalIq)
elif entity.getType() == IqProtocolEntity.TYPE_ERROR and errorClbk:
errorClbk(entity, originalIq)
return True
return False
def getOwnJid(self, full = True):
return self.getLayerInterface(YowAuthenticationProtocolLayer).getUsername(full)
def connect(self):
self.getLayerInterface(YowNetworkLayer).connect()
def disconnect(self):
disconnectEvent = YowLayerEvent(YowNetworkLayer.EVENT_STATE_DISCONNECT)
self.broadcastEvent(disconnectEvent)
def send(self, data):
self.toLower(data)
def receive(self, entity):
if not self.processIqRegistry(entity):
entityType = entity.getTag()
if entityType in self.callbacks:
self.callbacks[entityType](entity)
else:
self.toUpper(entity)
def __str__(self):
return "Interface Layer"
| gpl-3.0 | 6,913,453,337,735,876,000 | -2,186,289,408,288,188,200 | 36.471154 | 120 | 0.645625 | false |
leiferikb/bitpop | src/third_party/libxml/src/genUnicode.py | 337 | 12985 | #!/usr/bin/python -u
#
# Original script modified in November 2003 to take advantage of
# the character-validation range routines, and updated to the
# current Unicode information (Version 4.0.1)
#
# NOTE: there is an 'alias' facility for blocks which are not present in
# the current release, but are needed for ABI compatibility. This
# must be accomplished MANUALLY! Please see the comments below under
# 'blockAliases'
#
import sys
import string
import time
webpage = "http://www.unicode.org/Public/4.0-Update1/UCD-4.0.1.html"
sources = "Blocks-4.0.1.txt UnicodeData-4.0.1.txt"
#
# blockAliases is a small hack - it is used for mapping block names which
# were were used in the 3.1 release, but are missing or changed in the current
# release. The format is "OldBlockName:NewBlockName1[,NewBlockName2[,...]]"
blockAliases = []
blockAliases.append("CombiningMarksforSymbols:CombiningDiacriticalMarksforSymbols")
blockAliases.append("Greek:GreekandCoptic")
blockAliases.append("PrivateUse:PrivateUseArea,SupplementaryPrivateUseArea-A," +
"SupplementaryPrivateUseArea-B")
# minTableSize gives the minimum number of ranges which must be present
# before a range table is produced. If there are less than this
# number, inline comparisons are generated
minTableSize = 8
(blockfile, catfile) = string.split(sources)
#
# Now process the "blocks" file, reducing it to a dictionary
# indexed by blockname, containing a tuple with the applicable
# block range
#
BlockNames = {}
try:
blocks = open(blockfile, "r")
except:
print "Missing %s, aborting ..." % blockfile
sys.exit(1)
for line in blocks.readlines():
if line[0] == '#':
continue
line = string.strip(line)
if line == '':
continue
try:
fields = string.split(line, ';')
range = string.strip(fields[0])
(start, end) = string.split(range, "..")
name = string.strip(fields[1])
name = string.replace(name, ' ', '')
except:
print "Failed to process line: %s" % (line)
continue
start = "0x" + start
end = "0x" + end
try:
BlockNames[name].append((start, end))
except:
BlockNames[name] = [(start, end)]
blocks.close()
print "Parsed %d blocks descriptions" % (len(BlockNames.keys()))
for block in blockAliases:
alias = string.split(block,':')
alist = string.split(alias[1],',')
for comp in alist:
if BlockNames.has_key(comp):
if alias[0] not in BlockNames:
BlockNames[alias[0]] = []
for r in BlockNames[comp]:
BlockNames[alias[0]].append(r)
else:
print "Alias %s: %s not in Blocks" % (alias[0], comp)
continue
#
# Next process the Categories file. This is more complex, since
# the file is in code sequence, and we need to invert it. We use
# a dictionary with index category-name, with each entry containing
# all the ranges (codepoints) of that category. Note that category
# names comprise two parts - the general category, and the "subclass"
# within that category. Therefore, both "general category" (which is
# the first character of the 2-character category-name) and the full
# (2-character) name are entered into this dictionary.
#
try:
data = open(catfile, "r")
except:
print "Missing %s, aborting ..." % catfile
sys.exit(1)
nbchar = 0;
Categories = {}
for line in data.readlines():
if line[0] == '#':
continue
line = string.strip(line)
if line == '':
continue
try:
fields = string.split(line, ';')
point = string.strip(fields[0])
value = 0
while point != '':
value = value * 16
if point[0] >= '0' and point[0] <= '9':
value = value + ord(point[0]) - ord('0')
elif point[0] >= 'A' and point[0] <= 'F':
value = value + 10 + ord(point[0]) - ord('A')
elif point[0] >= 'a' and point[0] <= 'f':
value = value + 10 + ord(point[0]) - ord('a')
point = point[1:]
name = fields[2]
except:
print "Failed to process line: %s" % (line)
continue
nbchar = nbchar + 1
# update entry for "full name"
try:
Categories[name].append(value)
except:
try:
Categories[name] = [value]
except:
print "Failed to process line: %s" % (line)
# update "general category" name
try:
Categories[name[0]].append(value)
except:
try:
Categories[name[0]] = [value]
except:
print "Failed to process line: %s" % (line)
blocks.close()
print "Parsed %d char generating %d categories" % (nbchar, len(Categories.keys()))
#
# The data is now all read. Time to process it into a more useful form.
#
# reduce the number list into ranges
for cat in Categories.keys():
list = Categories[cat]
start = -1
prev = -1
end = -1
ranges = []
for val in list:
if start == -1:
start = val
prev = val
continue
elif val == prev + 1:
prev = val
continue
elif prev == start:
ranges.append((prev, prev))
start = val
prev = val
continue
else:
ranges.append((start, prev))
start = val
prev = val
continue
if prev == start:
ranges.append((prev, prev))
else:
ranges.append((start, prev))
Categories[cat] = ranges
#
# Assure all data is in alphabetic order, since we will be doing binary
# searches on the tables.
#
bkeys = BlockNames.keys()
bkeys.sort()
ckeys = Categories.keys()
ckeys.sort()
#
# Generate the resulting files
#
try:
header = open("include/libxml/xmlunicode.h", "w")
except:
print "Failed to open include/libxml/xmlunicode.h"
sys.exit(1)
try:
output = open("xmlunicode.c", "w")
except:
print "Failed to open xmlunicode.c"
sys.exit(1)
date = time.asctime(time.localtime(time.time()))
header.write(
"""/*
* Summary: Unicode character APIs
* Description: API for the Unicode character APIs
*
* This file is automatically generated from the
* UCS description files of the Unicode Character Database
* %s
* using the genUnicode.py Python script.
*
* Generation date: %s
* Sources: %s
* Author: Daniel Veillard
*/
#ifndef __XML_UNICODE_H__
#define __XML_UNICODE_H__
#include <libxml/xmlversion.h>
#ifdef LIBXML_UNICODE_ENABLED
#ifdef __cplusplus
extern "C" {
#endif
""" % (webpage, date, sources));
output.write(
"""/*
* xmlunicode.c: this module implements the Unicode character APIs
*
* This file is automatically generated from the
* UCS description files of the Unicode Character Database
* %s
* using the genUnicode.py Python script.
*
* Generation date: %s
* Sources: %s
* Daniel Veillard <veillard@redhat.com>
*/
#define IN_LIBXML
#include "libxml.h"
#ifdef LIBXML_UNICODE_ENABLED
#include <string.h>
#include <libxml/xmlversion.h>
#include <libxml/xmlunicode.h>
#include <libxml/chvalid.h>
typedef int (xmlIntFunc)(int); /* just to keep one's mind untwisted */
typedef struct {
const char *rangename;
xmlIntFunc *func;
} xmlUnicodeRange;
typedef struct {
xmlUnicodeRange *table;
int numentries;
} xmlUnicodeNameTable;
static xmlIntFunc *xmlUnicodeLookup(xmlUnicodeNameTable *tptr, const char *tname);
static xmlUnicodeRange xmlUnicodeBlocks[] = {
""" % (webpage, date, sources));
flag = 0
for block in bkeys:
name = string.replace(block, '-', '')
if flag:
output.write(',\n')
else:
flag = 1
output.write(' {"%s", xmlUCSIs%s}' % (block, name))
output.write('};\n\n')
output.write('static xmlUnicodeRange xmlUnicodeCats[] = {\n')
flag = 0;
for name in ckeys:
if flag:
output.write(',\n')
else:
flag = 1
output.write(' {"%s", xmlUCSIsCat%s}' % (name, name))
output.write('};\n\n')
#
# For any categories with more than minTableSize ranges we generate
# a range table suitable for xmlCharInRange
#
for name in ckeys:
if len(Categories[name]) > minTableSize:
numshort = 0
numlong = 0
ranges = Categories[name]
sptr = "NULL"
lptr = "NULL"
for range in ranges:
(low, high) = range
if high < 0x10000:
if numshort == 0:
pline = "static const xmlChSRange xml%sS[] = {" % name
sptr = "xml%sS" % name
else:
pline += ", "
numshort += 1
else:
if numlong == 0:
if numshort > 0:
output.write(pline + " };\n")
pline = "static const xmlChLRange xml%sL[] = {" % name
lptr = "xml%sL" % name
else:
pline += ", "
numlong += 1
if len(pline) > 60:
output.write(pline + "\n")
pline = " "
pline += "{%s, %s}" % (hex(low), hex(high))
output.write(pline + " };\nstatic xmlChRangeGroup xml%sG = {%s,%s,%s,%s};\n\n"
% (name, numshort, numlong, sptr, lptr))
output.write(
"""static xmlUnicodeNameTable xmlUnicodeBlockTbl = {xmlUnicodeBlocks, %s};
static xmlUnicodeNameTable xmlUnicodeCatTbl = {xmlUnicodeCats, %s};
/**
* xmlUnicodeLookup:
* @tptr: pointer to the name table
* @name: name to be found
*
* binary table lookup for user-supplied name
*
* Returns pointer to range function if found, otherwise NULL
*/
static xmlIntFunc
*xmlUnicodeLookup(xmlUnicodeNameTable *tptr, const char *tname) {
int low, high, mid, cmp;
xmlUnicodeRange *sptr;
if ((tptr == NULL) || (tname == NULL)) return(NULL);
low = 0;
high = tptr->numentries - 1;
sptr = tptr->table;
while (low <= high) {
mid = (low + high) / 2;
if ((cmp=strcmp(tname, sptr[mid].rangename)) == 0)
return (sptr[mid].func);
if (cmp < 0)
high = mid - 1;
else
low = mid + 1;
}
return (NULL);
}
""" % (len(BlockNames), len(Categories)) )
for block in bkeys:
name = string.replace(block, '-', '')
header.write("XMLPUBFUN int XMLCALL xmlUCSIs%s\t(int code);\n" % name)
output.write("/**\n * xmlUCSIs%s:\n * @code: UCS code point\n" % (name))
output.write(" *\n * Check whether the character is part of %s UCS Block\n"%
(block))
output.write(" *\n * Returns 1 if true 0 otherwise\n */\n");
output.write("int\nxmlUCSIs%s(int code) {\n return(" % name)
flag = 0
for (start, end) in BlockNames[block]:
if flag:
output.write(" ||\n ")
else:
flag = 1
output.write("((code >= %s) && (code <= %s))" % (start, end))
output.write(");\n}\n\n")
header.write("\nXMLPUBFUN int XMLCALL xmlUCSIsBlock\t(int code, const char *block);\n\n")
output.write(
"""/**
* xmlUCSIsBlock:
* @code: UCS code point
* @block: UCS block name
*
* Check whether the character is part of the UCS Block
*
* Returns 1 if true, 0 if false and -1 on unknown block
*/
int
xmlUCSIsBlock(int code, const char *block) {
xmlIntFunc *func;
func = xmlUnicodeLookup(&xmlUnicodeBlockTbl, block);
if (func == NULL)
return (-1);
return (func(code));
}
""")
for name in ckeys:
ranges = Categories[name]
header.write("XMLPUBFUN int XMLCALL xmlUCSIsCat%s\t(int code);\n" % name)
output.write("/**\n * xmlUCSIsCat%s:\n * @code: UCS code point\n" % (name))
output.write(" *\n * Check whether the character is part of %s UCS Category\n"%
(name))
output.write(" *\n * Returns 1 if true 0 otherwise\n */\n");
output.write("int\nxmlUCSIsCat%s(int code) {\n" % name)
if len(Categories[name]) > minTableSize:
output.write(" return(xmlCharInRange((unsigned int)code, &xml%sG)"
% name)
else:
start = 1
for range in ranges:
(begin, end) = range;
if start:
output.write(" return(");
start = 0
else:
output.write(" ||\n ");
if (begin == end):
output.write("(code == %s)" % (hex(begin)))
else:
output.write("((code >= %s) && (code <= %s))" % (
hex(begin), hex(end)))
output.write(");\n}\n\n")
header.write("\nXMLPUBFUN int XMLCALL xmlUCSIsCat\t(int code, const char *cat);\n")
output.write(
"""/**
* xmlUCSIsCat:
* @code: UCS code point
* @cat: UCS Category name
*
* Check whether the character is part of the UCS Category
*
* Returns 1 if true, 0 if false and -1 on unknown category
*/
int
xmlUCSIsCat(int code, const char *cat) {
xmlIntFunc *func;
func = xmlUnicodeLookup(&xmlUnicodeCatTbl, cat);
if (func == NULL)
return (-1);
return (func(code));
}
#define bottom_xmlunicode
#include "elfgcchack.h"
#endif /* LIBXML_UNICODE_ENABLED */
""")
header.write("""
#ifdef __cplusplus
}
#endif
#endif /* LIBXML_UNICODE_ENABLED */
#endif /* __XML_UNICODE_H__ */
""");
header.close()
output.close()
| gpl-3.0 | 2,775,788,591,743,521,000 | 2,242,675,844,318,133,500 | 26.165272 | 89 | 0.602695 | false |
guewen/odoo | openerp/addons/base/res/res_currency.py | 17 | 12786 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import re
import time
from openerp import tools
from openerp.osv import fields, osv
from openerp.tools import float_round, float_is_zero, float_compare
from openerp.tools.translate import _
CURRENCY_DISPLAY_PATTERN = re.compile(r'(\w+)\s*(?:\((.*)\))?')
class res_currency(osv.osv):
def _current_rate(self, cr, uid, ids, name, arg, context=None):
return self._get_current_rate(cr, uid, ids, context=context)
def _current_rate_silent(self, cr, uid, ids, name, arg, context=None):
return self._get_current_rate(cr, uid, ids, raise_on_no_rate=False, context=context)
def _get_current_rate(self, cr, uid, ids, raise_on_no_rate=True, context=None):
if context is None:
context = {}
res = {}
date = context.get('date') or time.strftime('%Y-%m-%d')
# Convert False values to None ...
currency_rate_type = context.get('currency_rate_type_id') or None
# ... and use 'is NULL' instead of '= some-id'.
operator = '=' if currency_rate_type else 'is'
for id in ids:
cr.execute('SELECT rate FROM res_currency_rate '
'WHERE currency_id = %s '
'AND name <= %s '
'AND currency_rate_type_id ' + operator + ' %s '
'ORDER BY name desc LIMIT 1',
(id, date, currency_rate_type))
if cr.rowcount:
res[id] = cr.fetchone()[0]
elif not raise_on_no_rate:
res[id] = 0
else:
currency = self.browse(cr, uid, id, context=context)
raise osv.except_osv(_('Error!'),_("No currency rate associated for currency '%s' for the given period" % (currency.name)))
return res
_name = "res.currency"
_description = "Currency"
_columns = {
# Note: 'code' column was removed as of v6.0, the 'name' should now hold the ISO code.
'name': fields.char('Currency', size=32, required=True, help="Currency Code (ISO 4217)"),
'symbol': fields.char('Symbol', size=4, help="Currency sign, to be used when printing amounts."),
'rate': fields.function(_current_rate, string='Current Rate', digits=(12,6),
help='The rate of the currency to the currency of rate 1.'),
# Do not use for computation ! Same as rate field with silent failing
'rate_silent': fields.function(_current_rate_silent, string='Current Rate', digits=(12,6),
help='The rate of the currency to the currency of rate 1 (0 if no rate defined).'),
'rate_ids': fields.one2many('res.currency.rate', 'currency_id', 'Rates'),
'accuracy': fields.integer('Computational Accuracy'),
'rounding': fields.float('Rounding Factor', digits=(12,6)),
'active': fields.boolean('Active'),
'company_id':fields.many2one('res.company', 'Company'),
'date': fields.date('Date'),
'base': fields.boolean('Base'),
'position': fields.selection([('after','After Amount'),('before','Before Amount')], 'Symbol Position', help="Determines where the currency symbol should be placed after or before the amount.")
}
_defaults = {
'active': 1,
'position' : 'after',
'rounding': 0.01,
'accuracy': 4,
'company_id': False,
}
_sql_constraints = [
# this constraint does not cover all cases due to SQL NULL handling for company_id,
# so it is complemented with a unique index (see below). The constraint and index
# share the same prefix so that IntegrityError triggered by the index will be caught
# and reported to the user with the constraint's error message.
('unique_name_company_id', 'unique (name, company_id)', 'The currency code must be unique per company!'),
]
_order = "name"
def init(self, cr):
# CONSTRAINT/UNIQUE INDEX on (name,company_id)
# /!\ The unique constraint 'unique_name_company_id' is not sufficient, because SQL92
# only support field names in constraint definitions, and we need a function here:
# we need to special-case company_id to treat all NULL company_id as equal, otherwise
# we would allow duplicate "global" currencies (all having company_id == NULL)
cr.execute("""SELECT indexname FROM pg_indexes WHERE indexname = 'res_currency_unique_name_company_id_idx'""")
if not cr.fetchone():
cr.execute("""CREATE UNIQUE INDEX res_currency_unique_name_company_id_idx
ON res_currency
(name, (COALESCE(company_id,-1)))""")
def read(self, cr, user, ids, fields=None, context=None, load='_classic_read'):
res = super(res_currency, self).read(cr, user, ids, fields, context, load)
currency_rate_obj = self.pool.get('res.currency.rate')
values = res
if not isinstance(values, list):
values = [values]
for r in values:
if r.__contains__('rate_ids'):
rates=r['rate_ids']
if rates:
currency_date = currency_rate_obj.read(cr, user, rates[0], ['name'])['name']
r['date'] = currency_date
return res
def name_search(self, cr, user, name='', args=None, operator='ilike', context=None, limit=100):
if not args:
args = []
results = super(res_currency,self)\
.name_search(cr, user, name, args, operator=operator, context=context, limit=limit)
if not results:
name_match = CURRENCY_DISPLAY_PATTERN.match(name)
if name_match:
results = super(res_currency,self)\
.name_search(cr, user, name_match.group(1), args, operator=operator, context=context, limit=limit)
return results
def name_get(self, cr, uid, ids, context=None):
if not ids:
return []
if isinstance(ids, (int, long)):
ids = [ids]
reads = self.read(cr, uid, ids, ['name','symbol'], context=context, load='_classic_write')
return [(x['id'], tools.ustr(x['name'])) for x in reads]
def round(self, cr, uid, currency, amount):
"""Return ``amount`` rounded according to ``currency``'s
rounding rules.
:param browse_record currency: currency for which we are rounding
:param float amount: the amount to round
:return: rounded float
"""
return float_round(amount, precision_rounding=currency.rounding)
def compare_amounts(self, cr, uid, currency, amount1, amount2):
"""Compare ``amount1`` and ``amount2`` after rounding them according to the
given currency's precision..
An amount is considered lower/greater than another amount if their rounded
value is different. This is not the same as having a non-zero difference!
For example 1.432 and 1.431 are equal at 2 digits precision,
so this method would return 0.
However 0.006 and 0.002 are considered different (returns 1) because
they respectively round to 0.01 and 0.0, even though
0.006-0.002 = 0.004 which would be considered zero at 2 digits precision.
:param browse_record currency: currency for which we are rounding
:param float amount1: first amount to compare
:param float amount2: second amount to compare
:return: (resp.) -1, 0 or 1, if ``amount1`` is (resp.) lower than,
equal to, or greater than ``amount2``, according to
``currency``'s rounding.
"""
return float_compare(amount1, amount2, precision_rounding=currency.rounding)
def is_zero(self, cr, uid, currency, amount):
"""Returns true if ``amount`` is small enough to be treated as
zero according to ``currency``'s rounding rules.
Warning: ``is_zero(amount1-amount2)`` is not always equivalent to
``compare_amounts(amount1,amount2) == 0``, as the former will round after
computing the difference, while the latter will round before, giving
different results for e.g. 0.006 and 0.002 at 2 digits precision.
:param browse_record currency: currency for which we are rounding
:param float amount: amount to compare with currency's zero
"""
return float_is_zero(amount, precision_rounding=currency.rounding)
def _get_conversion_rate(self, cr, uid, from_currency, to_currency, context=None):
if context is None:
context = {}
ctx = context.copy()
ctx.update({'currency_rate_type_id': ctx.get('currency_rate_type_from')})
from_currency = self.browse(cr, uid, from_currency.id, context=ctx)
ctx.update({'currency_rate_type_id': ctx.get('currency_rate_type_to')})
to_currency = self.browse(cr, uid, to_currency.id, context=ctx)
if from_currency.rate == 0 or to_currency.rate == 0:
date = context.get('date', time.strftime('%Y-%m-%d'))
if from_currency.rate == 0:
currency_symbol = from_currency.symbol
else:
currency_symbol = to_currency.symbol
raise osv.except_osv(_('Error'), _('No rate found \n' \
'for the currency: %s \n' \
'at the date: %s') % (currency_symbol, date))
return to_currency.rate/from_currency.rate
def compute(self, cr, uid, from_currency_id, to_currency_id, from_amount,
round=True, currency_rate_type_from=False, currency_rate_type_to=False, context=None):
if not context:
context = {}
if not from_currency_id:
from_currency_id = to_currency_id
if not to_currency_id:
to_currency_id = from_currency_id
xc = self.browse(cr, uid, [from_currency_id,to_currency_id], context=context)
from_currency = (xc[0].id == from_currency_id and xc[0]) or xc[1]
to_currency = (xc[0].id == to_currency_id and xc[0]) or xc[1]
if (to_currency_id == from_currency_id) and (currency_rate_type_from == currency_rate_type_to):
if round:
return self.round(cr, uid, to_currency, from_amount)
else:
return from_amount
else:
context.update({'currency_rate_type_from': currency_rate_type_from, 'currency_rate_type_to': currency_rate_type_to})
rate = self._get_conversion_rate(cr, uid, from_currency, to_currency, context=context)
if round:
return self.round(cr, uid, to_currency, from_amount * rate)
else:
return from_amount * rate
class res_currency_rate_type(osv.osv):
_name = "res.currency.rate.type"
_description = "Currency Rate Type"
_columns = {
'name': fields.char('Name', size=64, required=True, translate=True),
}
class res_currency_rate(osv.osv):
_name = "res.currency.rate"
_description = "Currency Rate"
_columns = {
'name': fields.datetime('Date', required=True, select=True),
'rate': fields.float('Rate', digits=(12, 6), help='The rate of the currency to the currency of rate 1'),
'currency_id': fields.many2one('res.currency', 'Currency', readonly=True),
'currency_rate_type_id': fields.many2one('res.currency.rate.type', 'Currency Rate Type', help="Allow you to define your own currency rate types, like 'Average' or 'Year to Date'. Leave empty if you simply want to use the normal 'spot' rate type"),
}
_defaults = {
'name': lambda *a: time.strftime('%Y-%m-%d'),
}
_order = "name desc"
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -3,543,029,577,060,721,000 | -5,383,214,504,276,762,000 | 47.988506 | 255 | 0.601439 | false |
debugger06/MiroX | osx/build/bdist.macosx-10.5-fat/lib.macosx-10.5-fat-2.7/miro/iconcache.py | 3 | 12195 | # Miro - an RSS based video player application
# Copyright (C) 2005, 2006, 2007, 2008, 2009, 2010, 2011
# Participatory Culture Foundation
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
# In addition, as a special exception, the copyright holders give
# permission to link the code of portions of this program with the OpenSSL
# library.
#
# You must obey the GNU General Public License in all respects for all of
# the code used other than OpenSSL. If you modify file(s) with this
# exception, you may extend this exception to your version of the file(s),
# but you are not obligated to do so. If you do not wish to do so, delete
# this exception statement from your version. If you delete this exception
# statement from all source files in the program, then also delete it here.
import os
import logging
import collections
from miro import httpclient
from miro import eventloop
from miro.database import DDBObject, ObjectNotFoundError
from miro.download_utils import next_free_filename, get_file_url_path
from miro.util import unicodify
from miro.plat.utils import unicode_to_filename
from miro import app
from miro import prefs
from miro import fileutil
RUNNING_MAX = 3
class IconCacheUpdater:
def __init__(self):
self.idle = collections.deque()
self.vital = collections.deque()
self.running_count = 0
self.in_shutdown = False
def request_update(self, item, is_vital=False):
if is_vital:
item.dbItem.confirm_db_thread()
if (item.filename and fileutil.access(item.filename, os.R_OK)
and item.url == item.dbItem.get_thumbnail_url()):
is_vital = False
if self.running_count < RUNNING_MAX:
eventloop.add_idle(item.request_icon, "Icon Request")
self.running_count += 1
else:
if is_vital:
self.vital.append(item)
else:
self.idle.append(item)
def update_finished(self):
if self.in_shutdown:
self.running_count -= 1
return
if len(self.vital) > 0:
item = self.vital.popleft()
elif len(self.idle) > 0:
item = self.idle.popleft()
else:
self.running_count -= 1
return
eventloop.add_idle(item.request_icon, "Icon Request")
@eventloop.as_idle
def clear_vital(self):
self.vital = collections.deque()
@eventloop.as_idle
def shutdown(self):
self.in_shutdown = True
# FIXME - should create an IconCacheUpdater at startup, NOT at
# module import time.
icon_cache_updater = IconCacheUpdater()
class IconCache(DDBObject):
def setup_new(self, dbItem):
self.etag = None
self.modified = None
self.filename = None
self.url = None
self.updating = False
self.needsUpdate = False
self.dbItem = dbItem
self.removed = False
self.request_update(is_vital=dbItem.ICON_CACHE_VITAL)
@classmethod
def orphaned_view(cls):
"""Downloaders with no items associated with them."""
return cls.make_view("id NOT IN (SELECT icon_cache_id from item "
"UNION select icon_cache_id from channel_guide "
"UNION select icon_cache_id from feed)")
@classmethod
def all_filenames(cls):
return [r[0] for r in cls.select(["filename"], 'filename IS NOT NULL')]
def icon_changed(self, needs_save=True):
self.signal_change(needs_save=needs_save)
if hasattr(self.dbItem, 'icon_changed'):
self.dbItem.icon_changed()
else:
self.dbItem.signal_change(needs_save=False)
def remove(self):
self.removed = True
if self.filename:
self.remove_file(self.filename)
DDBObject.remove(self)
def reset(self):
if self.filename:
self.remove_file(self.filename)
self.filename = None
self.url = None
self.etag = None
self.modified = None
self.removed = False
self.updating = False
self.needsUpdate = False
self.icon_changed()
def remove_file(self, filename):
try:
fileutil.remove(filename)
except OSError:
pass
def error_callback(self, url, error=None):
self.dbItem.confirm_db_thread()
if self.removed:
icon_cache_updater.update_finished()
return
# Don't clear the cache on an error.
if self.url != url:
self.url = url
self.etag = None
self.modified = None
self.icon_changed()
self.updating = False
if self.needsUpdate:
self.needsUpdate = False
self.request_update(True)
icon_cache_updater.update_finished()
def update_icon_cache(self, url, info):
self.dbItem.confirm_db_thread()
if self.removed:
icon_cache_updater.update_finished()
return
needs_save = False
needsChange = False
if info == None or (info['status'] != 304 and info['status'] != 200):
self.error_callback(url, "bad response")
return
try:
# Our cache is good. Hooray!
if info['status'] == 304:
return
needsChange = True
# We have to update it, and if we can't write to the file, we
# should pick a new filename.
if ((self.filename and
not fileutil.access(self.filename, os.R_OK | os.W_OK))):
self.filename = None
cachedir = app.config.get(prefs.ICON_CACHE_DIRECTORY)
try:
fileutil.makedirs(cachedir)
except OSError:
pass
try:
# Write to a temp file.
if self.filename:
tmp_filename = self.filename + ".part"
else:
tmp_filename = os.path.join(cachedir, info["filename"]) + ".part"
tmp_filename, output = next_free_filename(tmp_filename)
output.write(info["body"])
output.close()
except IOError:
self.remove_file(tmp_filename)
return
except ValueError:
logging.warn('update_icon_cache: next_free_filename failed '
'#1, candidate = %r', tmp_filename)
return
filename = unicode(info["filename"])
filename = unicode_to_filename(filename, cachedir)
filename = os.path.join(cachedir, filename)
needs_save = True
try:
filename, fp = next_free_filename(filename)
except ValueError:
logging.warn('update_icon_cache: next_free_filename failed '
'#2, candidate = %r', filename)
return
if self.filename:
filename = self.filename
self.filename = None
self.remove_file(filename)
# we need to move the file here--so we close the file
# pointer and then move the file.
fp.close()
try:
self.remove_file(filename)
fileutil.rename(tmp_filename, filename)
except (IOError, OSError):
logging.exception("iconcache: fileutil.move failed")
filename = None
self.filename = filename
etag = unicodify(info.get("etag"))
modified = unicodify(info.get("modified"))
if self.etag != etag:
needs_save = True
self.etag = etag
if self.modified != modified:
needs_save = True
self.modified = modified
if self.url != url:
needs_save = True
self.url = url
finally:
if needsChange:
self.icon_changed(needs_save=needs_save)
self.updating = False
if self.needsUpdate:
self.needsUpdate = False
self.request_update(True)
icon_cache_updater.update_finished()
def request_icon(self):
if self.removed:
icon_cache_updater.update_finished()
return
self.dbItem.confirm_db_thread()
if self.updating:
self.needsUpdate = True
icon_cache_updater.update_finished()
return
if hasattr(self.dbItem, "get_thumbnail_url"):
url = self.dbItem.get_thumbnail_url()
else:
url = self.url
# Only verify each icon once per run unless the url changes
if (url == self.url and self.filename
and fileutil.access(self.filename, os.R_OK)):
icon_cache_updater.update_finished()
return
self.updating = True
# No need to extract the icon again if we already have it.
if url is None or url.startswith(u"/") or url.startswith(u"file://"):
self.error_callback(url)
return
# Last try, get the icon from HTTP.
httpclient.grab_url(url, lambda info: self.update_icon_cache(url, info),
lambda error: self.error_callback(url, error))
def request_update(self, is_vital=False):
if hasattr(self, "updating") and hasattr(self, "dbItem"):
if self.removed:
return
icon_cache_updater.request_update(self, is_vital=is_vital)
def setup_restored(self):
self.removed = False
self.updating = False
self.needsUpdate = False
def is_valid(self):
self.dbItem.confirm_db_thread()
return self.filename and fileutil.exists(self.filename)
def get_filename(self):
self.dbItem.confirm_db_thread()
if self.url and self.url.startswith(u"file://"):
return get_file_url_path(self.url)
elif self.url and self.url.startswith(u"/"):
return unicode_to_filename(self.url)
else:
return self.filename
def make_icon_cache(obj):
if obj.icon_cache_id is not None:
try:
icon_cache = IconCache.get_by_id(obj.icon_cache_id)
except ObjectNotFoundError:
logging.warn("Icon Cache Not in database for %s (id: %s)",
obj, obj.icon_cache_id)
else:
icon_cache.dbItem = obj
icon_cache.request_update()
return icon_cache
return IconCache(obj)
class IconCacheOwnerMixin(object):
"""Mixin class for objects that own IconCache instances
(currently, Feed, Item and ChannelGuide).
"""
def setup_new_icon_cache(self):
self._icon_cache = IconCache(self)
self.icon_cache_id = self._icon_cache.id
# the icon_cache attribute is fetched lazily
def _icon_cache_getter(self):
try:
return self._icon_cache
except AttributeError:
self._icon_cache = make_icon_cache(self)
if self.icon_cache_id != self._icon_cache.id:
self.icon_cache_id = self._icon_cache.id
self.signal_change()
return self._icon_cache
icon_cache = property(_icon_cache_getter)
def remove_icon_cache(self):
if self.icon_cache_id is not None:
self.icon_cache.remove()
self._icon_cache = self.icon_cache_id = None
| gpl-2.0 | -8,910,286,071,940,640,000 | 6,639,874,407,141,681,000 | 32.595041 | 85 | 0.583436 | false |
pchauncey/ansible | lib/ansible/modules/network/f5/bigip_gtm_datacenter.py | 26 | 11100 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2016 F5 Networks Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: bigip_gtm_datacenter
short_description: Manage Datacenter configuration in BIG-IP
description:
- Manage BIG-IP data center configuration. A data center defines the location
where the physical network components reside, such as the server and link
objects that share the same subnet on the network. This module is able to
manipulate the data center definitions in a BIG-IP
version_added: "2.2"
options:
contact:
description:
- The name of the contact for the data center.
description:
description:
- The description of the data center.
enabled:
description:
- Whether the data center should be enabled. At least one of C(state) and
C(enabled) are required.
choices:
- yes
- no
location:
description:
- The location of the data center.
name:
description:
- The name of the data center.
required: true
state:
description:
- The state of the datacenter on the BIG-IP. When C(present), guarantees
that the data center exists. When C(absent) removes the data center
from the BIG-IP. C(enabled) will enable the data center and C(disabled)
will ensure the data center is disabled. At least one of state and
enabled are required.
choices:
- present
- absent
notes:
- Requires the f5-sdk Python package on the host. This is as easy as
pip install f5-sdk.
extends_documentation_fragment: f5
requirements:
- f5-sdk
author:
- Tim Rupp (@caphrim007)
'''
EXAMPLES = '''
- name: Create data center "New York"
bigip_gtm_datacenter:
server: "big-ip"
name: "New York"
location: "222 West 23rd"
delegate_to: localhost
'''
RETURN = '''
contact:
description: The contact that was set on the datacenter
returned: changed
type: string
sample: "admin@root.local"
description:
description: The description that was set for the datacenter
returned: changed
type: string
sample: "Datacenter in NYC"
enabled:
description: Whether the datacenter is enabled or not
returned: changed
type: bool
sample: true
location:
description: The location that is set for the datacenter
returned: changed
type: string
sample: "222 West 23rd"
name:
description: Name of the datacenter being manipulated
returned: changed
type: string
sample: "foo"
'''
try:
from f5.bigip import ManagementRoot
from icontrol.session import iControlUnexpectedHTTPError
HAS_F5SDK = True
except ImportError:
HAS_F5SDK = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import camel_dict_to_snake_dict
from ansible.module_utils.f5_utils import F5ModuleError, f5_argument_spec
class BigIpGtmDatacenter(object):
def __init__(self, *args, **kwargs):
if not HAS_F5SDK:
raise F5ModuleError("The python f5-sdk module is required")
# The params that change in the module
self.cparams = dict()
# Stores the params that are sent to the module
self.params = kwargs
self.api = ManagementRoot(kwargs['server'],
kwargs['user'],
kwargs['password'],
port=kwargs['server_port'])
def create(self):
params = dict()
check_mode = self.params['check_mode']
contact = self.params['contact']
description = self.params['description']
location = self.params['location']
name = self.params['name']
partition = self.params['partition']
enabled = self.params['enabled']
# Specifically check for None because a person could supply empty
# values which would technically still be valid
if contact is not None:
params['contact'] = contact
if description is not None:
params['description'] = description
if location is not None:
params['location'] = location
if enabled is not None:
params['enabled'] = True
else:
params['disabled'] = False
params['name'] = name
params['partition'] = partition
self.cparams = camel_dict_to_snake_dict(params)
if check_mode:
return True
d = self.api.tm.gtm.datacenters.datacenter
d.create(**params)
if not self.exists():
raise F5ModuleError("Failed to create the datacenter")
return True
def read(self):
"""Read information and transform it
The values that are returned by BIG-IP in the f5-sdk can have encoding
attached to them as well as be completely missing in some cases.
Therefore, this method will transform the data from the BIG-IP into a
format that is more easily consumable by the rest of the class and the
parameters that are supported by the module.
"""
p = dict()
name = self.params['name']
partition = self.params['partition']
r = self.api.tm.gtm.datacenters.datacenter.load(
name=name,
partition=partition
)
if hasattr(r, 'servers'):
# Deliberately using sets to suppress duplicates
p['servers'] = set([str(x) for x in r.servers])
if hasattr(r, 'contact'):
p['contact'] = str(r.contact)
if hasattr(r, 'location'):
p['location'] = str(r.location)
if hasattr(r, 'description'):
p['description'] = str(r.description)
if r.enabled:
p['enabled'] = True
else:
p['enabled'] = False
p['name'] = name
return p
def update(self):
changed = False
params = dict()
current = self.read()
check_mode = self.params['check_mode']
contact = self.params['contact']
description = self.params['description']
location = self.params['location']
name = self.params['name']
partition = self.params['partition']
enabled = self.params['enabled']
if contact is not None:
if 'contact' in current:
if contact != current['contact']:
params['contact'] = contact
else:
params['contact'] = contact
if description is not None:
if 'description' in current:
if description != current['description']:
params['description'] = description
else:
params['description'] = description
if location is not None:
if 'location' in current:
if location != current['location']:
params['location'] = location
else:
params['location'] = location
if enabled is not None:
if current['enabled'] != enabled:
if enabled is True:
params['enabled'] = True
params['disabled'] = False
else:
params['disabled'] = True
params['enabled'] = False
if params:
changed = True
if check_mode:
return changed
self.cparams = camel_dict_to_snake_dict(params)
else:
return changed
r = self.api.tm.gtm.datacenters.datacenter.load(
name=name,
partition=partition
)
r.update(**params)
r.refresh()
return True
def delete(self):
params = dict()
check_mode = self.params['check_mode']
params['name'] = self.params['name']
params['partition'] = self.params['partition']
self.cparams = camel_dict_to_snake_dict(params)
if check_mode:
return True
dc = self.api.tm.gtm.datacenters.datacenter.load(**params)
dc.delete()
if self.exists():
raise F5ModuleError("Failed to delete the datacenter")
return True
def present(self):
changed = False
if self.exists():
changed = self.update()
else:
changed = self.create()
return changed
def absent(self):
changed = False
if self.exists():
changed = self.delete()
return changed
def exists(self):
name = self.params['name']
partition = self.params['partition']
return self.api.tm.gtm.datacenters.datacenter.exists(
name=name,
partition=partition
)
def flush(self):
result = dict()
state = self.params['state']
enabled = self.params['enabled']
if state is None and enabled is None:
raise F5ModuleError("Neither 'state' nor 'enabled' set")
try:
if state == "present":
changed = self.present()
# Ensure that this field is not returned to the user since it
# is not a valid parameter to the module.
if 'disabled' in self.cparams:
del self.cparams['disabled']
elif state == "absent":
changed = self.absent()
except iControlUnexpectedHTTPError as e:
raise F5ModuleError(str(e))
result.update(**self.cparams)
result.update(dict(changed=changed))
return result
def main():
argument_spec = f5_argument_spec()
meta_args = dict(
contact=dict(required=False, default=None),
description=dict(required=False, default=None),
enabled=dict(required=False, type='bool', default=None),
location=dict(required=False, default=None),
name=dict(required=True)
)
argument_spec.update(meta_args)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True
)
try:
obj = BigIpGtmDatacenter(check_mode=module.check_mode, **module.params)
result = obj.flush()
module.exit_json(**result)
except F5ModuleError as e:
module.fail_json(msg=str(e))
if __name__ == '__main__':
main()
| gpl-3.0 | -8,509,495,500,302,107,000 | -3,382,752,801,186,099,700 | 28.679144 | 79 | 0.594324 | false |
agiliq/django | django/contrib/auth/tests/test_middleware.py | 56 | 1409 | from django.contrib.auth.middleware import SessionAuthenticationMiddleware
from django.contrib.auth.models import User
from django.http import HttpRequest
from django.test import TestCase
class TestSessionAuthenticationMiddleware(TestCase):
def setUp(self):
self.user_password = 'test_password'
self.user = User.objects.create_user('test_user',
'test@example.com',
self.user_password)
def test_changed_password_invalidates_session(self):
"""
Tests that changing a user's password invalidates the session.
"""
verification_middleware = SessionAuthenticationMiddleware()
self.assertTrue(self.client.login(
username=self.user.username,
password=self.user_password,
))
request = HttpRequest()
request.session = self.client.session
request.user = self.user
verification_middleware.process_request(request)
self.assertIsNotNone(request.user)
self.assertFalse(request.user.is_anonymous())
# After password change, user should be anonymous
request.user.set_password('new_password')
request.user.save()
verification_middleware.process_request(request)
self.assertIsNotNone(request.user)
self.assertTrue(request.user.is_anonymous())
| bsd-3-clause | 3,161,447,702,693,594,000 | 9,180,600,812,606,134,000 | 39.257143 | 74 | 0.652945 | false |
weidongxu84/info-gatherer | django/contrib/admindocs/views.py | 77 | 15064 | import inspect
import os
import re
from django import template
from django.template import RequestContext
from django.conf import settings
from django.contrib.admin.views.decorators import staff_member_required
from django.db import models
from django.shortcuts import render_to_response
from django.core.exceptions import ImproperlyConfigured, ViewDoesNotExist
from django.http import Http404
from django.core import urlresolvers
from django.contrib.admindocs import utils
from django.contrib.sites.models import Site
from django.utils.importlib import import_module
from django.utils.translation import ugettext as _
from django.utils.safestring import mark_safe
# Exclude methods starting with these strings from documentation
MODEL_METHODS_EXCLUDE = ('_', 'add_', 'delete', 'save', 'set_')
class GenericSite(object):
domain = 'example.com'
name = 'my site'
@staff_member_required
def doc_index(request):
if not utils.docutils_is_available:
return missing_docutils_page(request)
return render_to_response('admin_doc/index.html', {
'root_path': urlresolvers.reverse('admin:index'),
}, context_instance=RequestContext(request))
@staff_member_required
def bookmarklets(request):
admin_root = urlresolvers.reverse('admin:index')
return render_to_response('admin_doc/bookmarklets.html', {
'root_path': admin_root,
'admin_url': mark_safe("%s://%s%s" % (request.is_secure() and 'https' or 'http', request.get_host(), admin_root)),
}, context_instance=RequestContext(request))
@staff_member_required
def template_tag_index(request):
if not utils.docutils_is_available:
return missing_docutils_page(request)
load_all_installed_template_libraries()
tags = []
app_libs = template.libraries.items()
builtin_libs = [(None, lib) for lib in template.builtins]
for module_name, library in builtin_libs + app_libs:
for tag_name, tag_func in library.tags.items():
title, body, metadata = utils.parse_docstring(tag_func.__doc__)
if title:
title = utils.parse_rst(title, 'tag', _('tag:') + tag_name)
if body:
body = utils.parse_rst(body, 'tag', _('tag:') + tag_name)
for key in metadata:
metadata[key] = utils.parse_rst(metadata[key], 'tag', _('tag:') + tag_name)
if library in template.builtins:
tag_library = None
else:
tag_library = module_name.split('.')[-1]
tags.append({
'name': tag_name,
'title': title,
'body': body,
'meta': metadata,
'library': tag_library,
})
return render_to_response('admin_doc/template_tag_index.html', {
'root_path': urlresolvers.reverse('admin:index'),
'tags': tags
}, context_instance=RequestContext(request))
@staff_member_required
def template_filter_index(request):
if not utils.docutils_is_available:
return missing_docutils_page(request)
load_all_installed_template_libraries()
filters = []
app_libs = template.libraries.items()
builtin_libs = [(None, lib) for lib in template.builtins]
for module_name, library in builtin_libs + app_libs:
for filter_name, filter_func in library.filters.items():
title, body, metadata = utils.parse_docstring(filter_func.__doc__)
if title:
title = utils.parse_rst(title, 'filter', _('filter:') + filter_name)
if body:
body = utils.parse_rst(body, 'filter', _('filter:') + filter_name)
for key in metadata:
metadata[key] = utils.parse_rst(metadata[key], 'filter', _('filter:') + filter_name)
if library in template.builtins:
tag_library = None
else:
tag_library = module_name.split('.')[-1]
filters.append({
'name': filter_name,
'title': title,
'body': body,
'meta': metadata,
'library': tag_library,
})
return render_to_response('admin_doc/template_filter_index.html', {
'root_path': urlresolvers.reverse('admin:index'),
'filters': filters
}, context_instance=RequestContext(request))
@staff_member_required
def view_index(request):
if not utils.docutils_is_available:
return missing_docutils_page(request)
if settings.ADMIN_FOR:
settings_modules = [import_module(m) for m in settings.ADMIN_FOR]
else:
settings_modules = [settings]
views = []
for settings_mod in settings_modules:
urlconf = import_module(settings_mod.ROOT_URLCONF)
view_functions = extract_views_from_urlpatterns(urlconf.urlpatterns)
if Site._meta.installed:
site_obj = Site.objects.get(pk=settings_mod.SITE_ID)
else:
site_obj = GenericSite()
for (func, regex) in view_functions:
views.append({
'full_name': '%s.%s' % (func.__module__, getattr(func, '__name__', func.__class__.__name__)),
'site_id': settings_mod.SITE_ID,
'site': site_obj,
'url': simplify_regex(regex),
})
return render_to_response('admin_doc/view_index.html', {
'root_path': urlresolvers.reverse('admin:index'),
'views': views
}, context_instance=RequestContext(request))
@staff_member_required
def view_detail(request, view):
if not utils.docutils_is_available:
return missing_docutils_page(request)
mod, func = urlresolvers.get_mod_func(view)
try:
view_func = getattr(import_module(mod), func)
except (ImportError, AttributeError):
raise Http404
title, body, metadata = utils.parse_docstring(view_func.__doc__)
if title:
title = utils.parse_rst(title, 'view', _('view:') + view)
if body:
body = utils.parse_rst(body, 'view', _('view:') + view)
for key in metadata:
metadata[key] = utils.parse_rst(metadata[key], 'model', _('view:') + view)
return render_to_response('admin_doc/view_detail.html', {
'root_path': urlresolvers.reverse('admin:index'),
'name': view,
'summary': title,
'body': body,
'meta': metadata,
}, context_instance=RequestContext(request))
@staff_member_required
def model_index(request):
if not utils.docutils_is_available:
return missing_docutils_page(request)
m_list = [m._meta for m in models.get_models()]
return render_to_response('admin_doc/model_index.html', {
'root_path': urlresolvers.reverse('admin:index'),
'models': m_list
}, context_instance=RequestContext(request))
@staff_member_required
def model_detail(request, app_label, model_name):
if not utils.docutils_is_available:
return missing_docutils_page(request)
# Get the model class.
try:
app_mod = models.get_app(app_label)
except ImproperlyConfigured:
raise Http404(_("App %r not found") % app_label)
model = None
for m in models.get_models(app_mod):
if m._meta.object_name.lower() == model_name:
model = m
break
if model is None:
raise Http404(_("Model %(model_name)r not found in app %(app_label)r") % {'model_name': model_name, 'app_label': app_label})
opts = model._meta
# Gather fields/field descriptions.
fields = []
for field in opts.fields:
# ForeignKey is a special case since the field will actually be a
# descriptor that returns the other object
if isinstance(field, models.ForeignKey):
data_type = field.rel.to.__name__
app_label = field.rel.to._meta.app_label
verbose = utils.parse_rst((_("the related `%(app_label)s.%(data_type)s` object") % {'app_label': app_label, 'data_type': data_type}), 'model', _('model:') + data_type)
else:
data_type = get_readable_field_data_type(field)
verbose = field.verbose_name
fields.append({
'name': field.name,
'data_type': data_type,
'verbose': verbose,
'help_text': field.help_text,
})
# Gather many-to-many fields.
for field in opts.many_to_many:
data_type = field.rel.to.__name__
app_label = field.rel.to._meta.app_label
verbose = _("related `%(app_label)s.%(object_name)s` objects") % {'app_label': app_label, 'object_name': data_type}
fields.append({
'name': "%s.all" % field.name,
"data_type": 'List',
'verbose': utils.parse_rst(_("all %s") % verbose , 'model', _('model:') + opts.module_name),
})
fields.append({
'name' : "%s.count" % field.name,
'data_type' : 'Integer',
'verbose' : utils.parse_rst(_("number of %s") % verbose , 'model', _('model:') + opts.module_name),
})
# Gather model methods.
for func_name, func in model.__dict__.items():
if (inspect.isfunction(func) and len(inspect.getargspec(func)[0]) == 1):
try:
for exclude in MODEL_METHODS_EXCLUDE:
if func_name.startswith(exclude):
raise StopIteration
except StopIteration:
continue
verbose = func.__doc__
if verbose:
verbose = utils.parse_rst(utils.trim_docstring(verbose), 'model', _('model:') + opts.module_name)
fields.append({
'name': func_name,
'data_type': get_return_data_type(func_name),
'verbose': verbose,
})
# Gather related objects
for rel in opts.get_all_related_objects() + opts.get_all_related_many_to_many_objects():
verbose = _("related `%(app_label)s.%(object_name)s` objects") % {'app_label': rel.opts.app_label, 'object_name': rel.opts.object_name}
accessor = rel.get_accessor_name()
fields.append({
'name' : "%s.all" % accessor,
'data_type' : 'List',
'verbose' : utils.parse_rst(_("all %s") % verbose , 'model', _('model:') + opts.module_name),
})
fields.append({
'name' : "%s.count" % accessor,
'data_type' : 'Integer',
'verbose' : utils.parse_rst(_("number of %s") % verbose , 'model', _('model:') + opts.module_name),
})
return render_to_response('admin_doc/model_detail.html', {
'root_path': urlresolvers.reverse('admin:index'),
'name': '%s.%s' % (opts.app_label, opts.object_name),
'summary': _("Fields on %s objects") % opts.object_name,
'description': model.__doc__,
'fields': fields,
}, context_instance=RequestContext(request))
@staff_member_required
def template_detail(request, template):
templates = []
for site_settings_module in settings.ADMIN_FOR:
settings_mod = import_module(site_settings_module)
if Site._meta.installed:
site_obj = Site.objects.get(pk=settings_mod.SITE_ID)
else:
site_obj = GenericSite()
for dir in settings_mod.TEMPLATE_DIRS:
template_file = os.path.join(dir, template)
templates.append({
'file': template_file,
'exists': os.path.exists(template_file),
'contents': lambda: os.path.exists(template_file) and open(template_file).read() or '',
'site_id': settings_mod.SITE_ID,
'site': site_obj,
'order': list(settings_mod.TEMPLATE_DIRS).index(dir),
})
return render_to_response('admin_doc/template_detail.html', {
'root_path': urlresolvers.reverse('admin:index'),
'name': template,
'templates': templates,
}, context_instance=RequestContext(request))
####################
# Helper functions #
####################
def missing_docutils_page(request):
"""Display an error message for people without docutils"""
return render_to_response('admin_doc/missing_docutils.html')
def load_all_installed_template_libraries():
# Load/register all template tag libraries from installed apps.
for module_name in template.get_templatetags_modules():
mod = import_module(module_name)
try:
libraries = [
os.path.splitext(p)[0]
for p in os.listdir(os.path.dirname(mod.__file__))
if p.endswith('.py') and p[0].isalpha()
]
except OSError:
libraries = []
for library_name in libraries:
try:
lib = template.get_library(library_name)
except template.InvalidTemplateLibrary, e:
pass
def get_return_data_type(func_name):
"""Return a somewhat-helpful data type given a function name"""
if func_name.startswith('get_'):
if func_name.endswith('_list'):
return 'List'
elif func_name.endswith('_count'):
return 'Integer'
return ''
def get_readable_field_data_type(field):
"""Returns the description for a given field type, if it exists,
Fields' descriptions can contain format strings, which will be interpolated
against the values of field.__dict__ before being output."""
return field.description % field.__dict__
def extract_views_from_urlpatterns(urlpatterns, base=''):
"""
Return a list of views from a list of urlpatterns.
Each object in the returned list is a two-tuple: (view_func, regex)
"""
views = []
for p in urlpatterns:
if hasattr(p, 'url_patterns'):
try:
patterns = p.url_patterns
except ImportError:
continue
views.extend(extract_views_from_urlpatterns(patterns, base + p.regex.pattern))
elif hasattr(p, 'callback'):
try:
views.append((p.callback, base + p.regex.pattern))
except ViewDoesNotExist:
continue
else:
raise TypeError(_("%s does not appear to be a urlpattern object") % p)
return views
named_group_matcher = re.compile(r'\(\?P(<\w+>).+?\)')
non_named_group_matcher = re.compile(r'\(.*?\)')
def simplify_regex(pattern):
"""
Clean up urlpattern regexes into something somewhat readable by Mere Humans:
turns something like "^(?P<sport_slug>\w+)/athletes/(?P<athlete_slug>\w+)/$"
into "<sport_slug>/athletes/<athlete_slug>/"
"""
# handle named groups first
pattern = named_group_matcher.sub(lambda m: m.group(1), pattern)
# handle non-named groups
pattern = non_named_group_matcher.sub("<var>", pattern)
# clean up any outstanding regex-y characters.
pattern = pattern.replace('^', '').replace('$', '').replace('?', '').replace('//', '/').replace('\\', '')
if not pattern.startswith('/'):
pattern = '/' + pattern
return pattern
| mit | -8,688,425,509,579,085,000 | -1,879,740,174,351,267,300 | 38.434555 | 180 | 0.594264 | false |
asayler/tutamen-pytutamen | pytutamen/base.py | 1 | 5612 | # -*- coding: utf-8 -*-
# Andy Sayler
# 2015
# pytutamen Package
# Tutamen Client Library
### Imports ###
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
from builtins import *
import os
import os.path
import requests
### Constants ###
_API_BASE = 'api'
_API_VERSION = 'v1'
_TOKENS_DELIMINATOR = ':'
_TOKENS_HEADER = 'tutamen-tokens'
### Exceptions ###
class ServerConnectionException(Exception):
pass
class ClientException(Exception):
pass
### Objects ###
class ServerConnection(object):
def __init__(self, server_url=None, server_name=None, server_ca_crt_path=None,
account_uid=None, client_uid=None, no_client_crt=False,
conf=None, conf_path=None):
# Check Args
if not server_url:
raise(ServerConnectionException("server_url required"))
if not server_name:
raise(ServerConnectionException("server_name required"))
# Call Parent
super().__init__()
# Setup Properties
self._url_server = server_url
self._server_name = server_name
self._path_ca = server_ca_crt_path
self._session = None
# Setup Conf
if not conf:
conf = config.ClientConfig(conf_path=conf_path)
self._conf = conf
# Get UIDs
if not account_uid:
account_uid = conf.defaults_get_account_uid()
if not account_uid:
raise(ACServerConnectionException("Missing Default Account UID"))
self._account_uid = account_uid
if not client_uid:
client_uid = conf.defaults_get_client_uid()
if not client_uid:
raise(ACServerConnectionException("Missing Default Client UID"))
self._client_uid = client_uid
# Get Certs
if not no_client_crt:
client_key_path = conf.path_client_key(account_uid, client_uid)
if not os.path.isfile(client_key_path):
raise(ServerConnectionException("Missing Client Key"))
self._client_key_path = client_key_path
client_crt_path = conf.path_client_crt(account_uid, client_uid, server_name)
if not os.path.isfile(client_crt_path):
raise(ServerConnectionException("Missing Client Cert"))
self._client_crt_path = client_crt_path
else:
self._client_key_path = None
self._client_crt_path = None
def open(self):
if not self._session:
ses = requests.Session()
if self._path_ca:
ses.verify = self._path_ca
else:
ses.verify = True
if self._client_crt_path and self._client_key_path:
ses.cert = (self._client_crt_path, self._client_key_path)
self._session = ses
def close(self):
if self._session:
self._session.close()
self._session = None
def __enter__(self):
self.open()
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
return False
@property
def is_open(self):
return bool(self._session)
@property
def server_name(self):
return self._server_name
@property
def url_srv(self):
return self._url_server
@property
def url_api(self):
return "{}/{}/{}".format(self.url_srv, _API_BASE, _API_VERSION)
def _tokens_to_header(self, tokens=None):
if tokens is None:
tokens = []
tokens_str = ""
for token in tokens:
tokens_str += token.strip()
tokens_str += _TOKENS_DELIMINATOR
tokens_str = tokens_str.strip(_TOKENS_DELIMINATOR)
header = {_TOKENS_HEADER: tokens_str}
return header
def http_post(self, endpoint, json=None, tokens=None, auth=None):
url = "{:s}/{:s}/".format(self.url_api, endpoint)
header = self._tokens_to_header(tokens)
res = self._session.post(url, json=json, headers=header, auth=auth)
res.raise_for_status()
return res.json()
def http_put(self, endpoint, json=None, tokens=None, auth=None):
url = "{:s}/{:s}/".format(self.url_api, endpoint)
header = self._tokens_to_header(tokens)
res = self._session.put(url, json=json, headers=header, auth=auth)
res.raise_for_status()
return res.json()
def http_get(self, endpoint=None, tokens=None, auth=None):
url = "{:s}/{:s}/".format(self.url_api, endpoint)
header = self._tokens_to_header(tokens)
res = self._session.get(url, headers=header, auth=auth)
res.raise_for_status()
return res.json()
def http_delete(self, endpoint=None, tokens=None, auth=None):
url = "{:s}/{:s}/".format(self.url_api, endpoint)
header = self._tokens_to_header(tokens)
res = self._session.delete(url, headers=header, auth=auth)
res.raise_for_status()
return res.json()
class ObjectClient(object):
def __init__(self, connection):
# Check Args
if not isinstance(connection, ServerConnection):
raise(TypeError("'connection' must of an instance of {}".format(ServerConnection)))
# Call Parent
super().__init__()
# Setup Properties
self._connection = connection
@property
def connection(self):
return self._connection
| lgpl-2.1 | -8,484,623,489,865,426,000 | -7,377,583,424,831,631,000 | 27.343434 | 95 | 0.590164 | false |
pombredanne/https-git.fedorahosted.org-git-kobo | kobo/http.py | 1 | 4351 | # -*- coding: utf-8 -*-
# based on: http://code.activestate.com/recipes/146306/
import httplib
import mimetypes
import os
from kobo.shortcuts import random_string
class POSTTransport(object):
"""
POST transport.
USAGE:
>>> import kobo.http
t = kobo.http.POSTTransport()
t.add_variable("foo", "bar")
t.add_file("foofile", "/tmp/some_file")
t.send_to_host("somehost", "/cgi-bin/upload")
"""
def __init__(self):
self._variables = []
self._files = []
self._boundary = random_string(32)
self.last_response = None
def get_content_type(self, file_name):
"""Guess the mime type of a file.
@param file_name: file name
@type file_name: str
@return: MIME type
@rtype: str
"""
return mimetypes.guess_type(file_name)[0] or "application/octet-stream"
def add_variable(self, key, value):
"""Add a variable to the POST request."""
self._variables.append((str(key), str(value)))
def add_file(self, key, file_name):
"""
Add a file to the POST request.
@param key: key
@type key: str
@param file_name: file name
@type file_name: str
"""
if type(file_name) is not str:
raise TypeError("Invalid type of 'file_name': %s" % type(file_name))
if not os.path.isfile(file_name):
raise OSError("Not a file: %s" % file_name)
self._files.append((str(key), str(file_name)))
def flush_data(self):
"""Flush variables and files from the request."""
self._variables = []
self._files = []
def send_to_host(self, host, selector, port=None, secure=False, flush=True):
"""
Send the POST request to a host.
@param host: host address
@type host: str
@param selector: selector/path string
@type selector: str
@param port: port number
@type port: int
@param secure: use https
@type secure: bool
@param flush: flush data after send
@type flush: bool
@return: (response status code, response data body)
@rtype: (int, str)
"""
content_length = 0
variables = []
for key, value in self._variables:
variables.extend((
"--%s" % self._boundary,
'Content-Disposition: form-data; name="%s"' % key,
"",
value,
))
variables_data = "\r\n".join(variables)
content_length += len(variables_data)
content_length += 2 # '\r\n'
files = []
for key, file_name in self._files:
file_data = "\r\n".join((
"--%s" % self._boundary,
'Content-Disposition: form-data; name="%s"; filename="%s"' % (key, os.path.basename(file_name)),
"Content-Type: %s" % self.get_content_type(file_name),
"",
"", # this adds extra newline before file data
))
files.append((file_name, file_data))
content_length += len(file_data)
content_length += os.path.getsize(file_name)
content_length += 2 # '\r\n'
footer_data = "\r\n".join(("--%s--" % self._boundary, ""))
content_length += len(footer_data)
content_type = "multipart/form-data; boundary=" + self._boundary
if secure:
request = httplib.HTTPSConnection(host, port)
else:
request = httplib.HTTPConnection(host, port)
request.putrequest("POST", selector)
request.putheader("content-type", content_type)
request.putheader("content-length", str(content_length))
request.endheaders()
request.send(variables_data)
request.send("\r\n")
for file_name, file_data in files:
request.send(file_data)
file_obj = open(file_name, "r")
while 1:
chunk = file_obj.read(1024**2)
if not chunk:
break
request.send(chunk)
request.send("\r\n")
request.send(footer_data)
response = request.getresponse()
if flush:
self.flush_data()
return response.status, response.read()
| lgpl-2.1 | 148,699,286,725,510,500 | 5,266,782,358,022,172,000 | 29.215278 | 112 | 0.535969 | false |
leiferikb/bitpop | src/third_party/WebKit/Tools/Scripts/webkitpy/tool/multicommandtool.py | 51 | 13189 | # Copyright (c) 2009 Google Inc. All rights reserved.
# Copyright (c) 2009 Apple Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# MultiCommandTool provides a framework for writing svn-like/git-like tools
# which are called with the following format:
# tool-name [global options] command-name [command options]
import logging
import sys
from optparse import OptionParser, IndentedHelpFormatter, SUPPRESS_USAGE, make_option
from webkitpy.tool.grammar import pluralize
_log = logging.getLogger(__name__)
class TryAgain(Exception):
pass
class Command(object):
name = None
show_in_main_help = False
def __init__(self, help_text, argument_names=None, options=None, long_help=None, requires_local_commits=False):
self.help_text = help_text
self.long_help = long_help
self.argument_names = argument_names
self.required_arguments = self._parse_required_arguments(argument_names)
self.options = options
self.requires_local_commits = requires_local_commits
self._tool = None
# option_parser can be overriden by the tool using set_option_parser
# This default parser will be used for standalone_help printing.
self.option_parser = HelpPrintingOptionParser(usage=SUPPRESS_USAGE, add_help_option=False, option_list=self.options)
def _exit(self, code):
sys.exit(code)
# This design is slightly awkward, but we need the
# the tool to be able to create and modify the option_parser
# before it knows what Command to run.
def set_option_parser(self, option_parser):
self.option_parser = option_parser
self._add_options_to_parser()
def _add_options_to_parser(self):
options = self.options or []
for option in options:
self.option_parser.add_option(option)
# The tool calls bind_to_tool on each Command after adding it to its list.
def bind_to_tool(self, tool):
# Command instances can only be bound to one tool at a time.
if self._tool and tool != self._tool:
raise Exception("Command already bound to tool!")
self._tool = tool
@staticmethod
def _parse_required_arguments(argument_names):
required_args = []
if not argument_names:
return required_args
split_args = argument_names.split(" ")
for argument in split_args:
if argument[0] == '[':
# For now our parser is rather dumb. Do some minimal validation that
# we haven't confused it.
if argument[-1] != ']':
raise Exception("Failure to parse argument string %s. Argument %s is missing ending ]" % (argument_names, argument))
else:
required_args.append(argument)
return required_args
def name_with_arguments(self):
usage_string = self.name
if self.options:
usage_string += " [options]"
if self.argument_names:
usage_string += " " + self.argument_names
return usage_string
def parse_args(self, args):
return self.option_parser.parse_args(args)
def check_arguments_and_execute(self, options, args, tool=None):
if len(args) < len(self.required_arguments):
_log.error("%s required, %s provided. Provided: %s Required: %s\nSee '%s help %s' for usage." % (
pluralize("argument", len(self.required_arguments)),
pluralize("argument", len(args)),
"'%s'" % " ".join(args),
" ".join(self.required_arguments),
tool.name(),
self.name))
return 1
return self.execute(options, args, tool) or 0
def standalone_help(self):
help_text = self.name_with_arguments().ljust(len(self.name_with_arguments()) + 3) + self.help_text + "\n\n"
if self.long_help:
help_text += "%s\n\n" % self.long_help
help_text += self.option_parser.format_option_help(IndentedHelpFormatter())
return help_text
def execute(self, options, args, tool):
raise NotImplementedError, "subclasses must implement"
# main() exists so that Commands can be turned into stand-alone scripts.
# Other parts of the code will likely require modification to work stand-alone.
def main(self, args=sys.argv):
(options, args) = self.parse_args(args)
# Some commands might require a dummy tool
return self.check_arguments_and_execute(options, args)
# FIXME: This should just be rolled into Command. help_text and argument_names do not need to be instance variables.
class AbstractDeclarativeCommand(Command):
help_text = None
argument_names = None
long_help = None
def __init__(self, options=None, **kwargs):
Command.__init__(self, self.help_text, self.argument_names, options=options, long_help=self.long_help, **kwargs)
class HelpPrintingOptionParser(OptionParser):
def __init__(self, epilog_method=None, *args, **kwargs):
self.epilog_method = epilog_method
OptionParser.__init__(self, *args, **kwargs)
def error(self, msg):
self.print_usage(sys.stderr)
error_message = "%s: error: %s\n" % (self.get_prog_name(), msg)
# This method is overriden to add this one line to the output:
error_message += "\nType \"%s --help\" to see usage.\n" % self.get_prog_name()
self.exit(1, error_message)
# We override format_epilog to avoid the default formatting which would paragraph-wrap the epilog
# and also to allow us to compute the epilog lazily instead of in the constructor (allowing it to be context sensitive).
def format_epilog(self, epilog):
if self.epilog_method:
return "\n%s\n" % self.epilog_method()
return ""
class HelpCommand(AbstractDeclarativeCommand):
name = "help"
help_text = "Display information about this program or its subcommands"
argument_names = "[COMMAND]"
def __init__(self):
options = [
make_option("-a", "--all-commands", action="store_true", dest="show_all_commands", help="Print all available commands"),
]
AbstractDeclarativeCommand.__init__(self, options)
self.show_all_commands = False # A hack used to pass --all-commands to _help_epilog even though it's called by the OptionParser.
def _help_epilog(self):
# Only show commands which are relevant to this checkout's SCM system. Might this be confusing to some users?
if self.show_all_commands:
epilog = "All %prog commands:\n"
relevant_commands = self._tool.commands[:]
else:
epilog = "Common %prog commands:\n"
relevant_commands = filter(self._tool.should_show_in_main_help, self._tool.commands)
longest_name_length = max(map(lambda command: len(command.name), relevant_commands))
relevant_commands.sort(lambda a, b: cmp(a.name, b.name))
command_help_texts = map(lambda command: " %s %s\n" % (command.name.ljust(longest_name_length), command.help_text), relevant_commands)
epilog += "%s\n" % "".join(command_help_texts)
epilog += "See '%prog help --all-commands' to list all commands.\n"
epilog += "See '%prog help COMMAND' for more information on a specific command.\n"
return epilog.replace("%prog", self._tool.name()) # Use of %prog here mimics OptionParser.expand_prog_name().
# FIXME: This is a hack so that we don't show --all-commands as a global option:
def _remove_help_options(self):
for option in self.options:
self.option_parser.remove_option(option.get_opt_string())
def execute(self, options, args, tool):
if args:
command = self._tool.command_by_name(args[0])
if command:
print command.standalone_help()
return 0
self.show_all_commands = options.show_all_commands
self._remove_help_options()
self.option_parser.print_help()
return 0
class MultiCommandTool(object):
global_options = None
def __init__(self, name=None, commands=None):
self._name = name or OptionParser(prog=name).get_prog_name() # OptionParser has nice logic for fetching the name.
# Allow the unit tests to disable command auto-discovery.
self.commands = commands or [cls() for cls in self._find_all_commands() if cls.name]
self.help_command = self.command_by_name(HelpCommand.name)
# Require a help command, even if the manual test list doesn't include one.
if not self.help_command:
self.help_command = HelpCommand()
self.commands.append(self.help_command)
for command in self.commands:
command.bind_to_tool(self)
@classmethod
def _add_all_subclasses(cls, class_to_crawl, seen_classes):
for subclass in class_to_crawl.__subclasses__():
if subclass not in seen_classes:
seen_classes.add(subclass)
cls._add_all_subclasses(subclass, seen_classes)
@classmethod
def _find_all_commands(cls):
commands = set()
cls._add_all_subclasses(Command, commands)
return sorted(commands)
def name(self):
return self._name
def _create_option_parser(self):
usage = "Usage: %prog [options] COMMAND [ARGS]"
return HelpPrintingOptionParser(epilog_method=self.help_command._help_epilog, prog=self.name(), usage=usage)
@staticmethod
def _split_command_name_from_args(args):
# Assume the first argument which doesn't start with "-" is the command name.
command_index = 0
for arg in args:
if arg[0] != "-":
break
command_index += 1
else:
return (None, args[:])
command = args[command_index]
return (command, args[:command_index] + args[command_index + 1:])
def command_by_name(self, command_name):
for command in self.commands:
if command_name == command.name:
return command
return None
def path(self):
raise NotImplementedError, "subclasses must implement"
def command_completed(self):
pass
def should_show_in_main_help(self, command):
return command.show_in_main_help
def should_execute_command(self, command):
return True
def _add_global_options(self, option_parser):
global_options = self.global_options or []
for option in global_options:
option_parser.add_option(option)
def handle_global_options(self, options):
pass
def main(self, argv=sys.argv):
(command_name, args) = self._split_command_name_from_args(argv[1:])
option_parser = self._create_option_parser()
self._add_global_options(option_parser)
command = self.command_by_name(command_name) or self.help_command
if not command:
option_parser.error("%s is not a recognized command" % command_name)
command.set_option_parser(option_parser)
(options, args) = command.parse_args(args)
self.handle_global_options(options)
(should_execute, failure_reason) = self.should_execute_command(command)
if not should_execute:
_log.error(failure_reason)
return 0 # FIXME: Should this really be 0?
while True:
try:
result = command.check_arguments_and_execute(options, args, self)
break
except TryAgain, e:
pass
self.command_completed()
return result
| gpl-3.0 | -5,378,686,390,860,808,000 | -3,276,025,753,589,647,400 | 40.344828 | 146 | 0.646372 | false |
2Guys1Python/Project-Cacophonum | data/states/player_menu.py | 1 | 2099 | """
This is the state where the player can look at
his inventory, equip items and check stats.
Most of the logic is in menugui.MenuGUI()
"""
import pygame as pg
from .. import tools, setup, menugui
from .. import constants as c
class Player_Menu(object):
def __init__(self, game_data, level):
inventory = game_data['player inventory']
conductors = game_data['conductors']
self.get_image = tools.get_image
self.allow_input = False
self.font = pg.font.Font(setup.FONTS[c.MAIN_FONT], 18)
self.background = self.make_background()
self.gui = menugui.MenuGui(level, inventory, conductors)
def make_background(self):
"""
Makes the generic black/blue background.
"""
background = pg.sprite.Sprite()
surface = pg.Surface(c.SCREEN_SIZE).convert()
surface.fill(c.BLACK_BLUE)
text = "Z = confirm, X = cancel, <v^> = navigation"
text_image = self.font.render(text, True, c.WHITE)
text_rect = text_image.get_rect(x=15, y=620)
text_rect.centerx = surface.get_rect().centerx
surface.blit(text_image, text_rect)
background.image = surface
background.rect = background.image.get_rect()
return background
'''
def make_sprite(self, key, coordx, coordy, x=40, y=25):
"""
Get the image for the player.
"""
spritesheet = setup.GFX[key]
surface = pg.Surface((32, 32))
surface.set_colorkey(c.BLACK)
image = self.get_image(coordx, coordy, 32, 32, spritesheet)
rect = image.get_rect()
surface.blit(image, rect)
surface = pg.transform.scale(surface, (192, 192))
rect = surface.get_rect(left=x, top=y)
sprite = pg.sprite.Sprite()
sprite.image = surface
sprite.rect = rect
return sprite
'''
def update(self, surface, keys):
self.gui.update(keys)
self.draw(surface)
def draw(self, surface):
surface.blit(self.background.image, self.background.rect)
self.gui.draw(surface)
| mit | 2,512,298,740,869,411,000 | -2,152,626,080,815,917,000 | 30.80303 | 67 | 0.608861 | false |
ojengwa/django-1 | django/apps/config.py | 121 | 8077 | import os
from importlib import import_module
from django.core.exceptions import AppRegistryNotReady, ImproperlyConfigured
from django.utils._os import upath
from django.utils.module_loading import module_has_submodule
MODELS_MODULE_NAME = 'models'
class AppConfig(object):
"""
Class representing a Django application and its configuration.
"""
def __init__(self, app_name, app_module):
# Full Python path to the application eg. 'django.contrib.admin'.
self.name = app_name
# Root module for the application eg. <module 'django.contrib.admin'
# from 'django/contrib/admin/__init__.pyc'>.
self.module = app_module
# The following attributes could be defined at the class level in a
# subclass, hence the test-and-set pattern.
# Last component of the Python path to the application eg. 'admin'.
# This value must be unique across a Django project.
if not hasattr(self, 'label'):
self.label = app_name.rpartition(".")[2]
# Human-readable name for the application eg. "Admin".
if not hasattr(self, 'verbose_name'):
self.verbose_name = self.label.title()
# Filesystem path to the application directory eg.
# u'/usr/lib/python2.7/dist-packages/django/contrib/admin'. Unicode on
# Python 2 and a str on Python 3.
if not hasattr(self, 'path'):
self.path = self._path_from_module(app_module)
# Module containing models eg. <module 'django.contrib.admin.models'
# from 'django/contrib/admin/models.pyc'>. Set by import_models().
# None if the application doesn't have a models module.
self.models_module = None
# Mapping of lower case model names to model classes. Initially set to
# None to prevent accidental access before import_models() runs.
self.models = None
def __repr__(self):
return '<%s: %s>' % (self.__class__.__name__, self.label)
def _path_from_module(self, module):
"""Attempt to determine app's filesystem path from its module."""
# See #21874 for extended discussion of the behavior of this method in
# various cases.
# Convert paths to list because Python 3.3 _NamespacePath does not
# support indexing.
paths = list(getattr(module, '__path__', []))
if len(paths) != 1:
filename = getattr(module, '__file__', None)
if filename is not None:
paths = [os.path.dirname(filename)]
if len(paths) > 1:
raise ImproperlyConfigured(
"The app module %r has multiple filesystem locations (%r); "
"you must configure this app with an AppConfig subclass "
"with a 'path' class attribute." % (module, paths))
elif not paths:
raise ImproperlyConfigured(
"The app module %r has no filesystem location, "
"you must configure this app with an AppConfig subclass "
"with a 'path' class attribute." % (module,))
return upath(paths[0])
@classmethod
def create(cls, entry):
"""
Factory that creates an app config from an entry in INSTALLED_APPS.
"""
try:
# If import_module succeeds, entry is a path to an app module,
# which may specify an app config class with default_app_config.
# Otherwise, entry is a path to an app config class or an error.
module = import_module(entry)
except ImportError:
# Track that importing as an app module failed. If importing as an
# app config class fails too, we'll trigger the ImportError again.
module = None
mod_path, _, cls_name = entry.rpartition('.')
# Raise the original exception when entry cannot be a path to an
# app config class.
if not mod_path:
raise
else:
try:
# If this works, the app module specifies an app config class.
entry = module.default_app_config
except AttributeError:
# Otherwise, it simply uses the default app config class.
return cls(entry, module)
else:
mod_path, _, cls_name = entry.rpartition('.')
# If we're reaching this point, we must attempt to load the app config
# class located at <mod_path>.<cls_name>
mod = import_module(mod_path)
try:
cls = getattr(mod, cls_name)
except AttributeError:
if module is None:
# If importing as an app module failed, that error probably
# contains the most informative traceback. Trigger it again.
import_module(entry)
else:
raise
# Check for obvious errors. (This check prevents duck typing, but
# it could be removed if it became a problem in practice.)
if not issubclass(cls, AppConfig):
raise ImproperlyConfigured(
"'%s' isn't a subclass of AppConfig." % entry)
# Obtain app name here rather than in AppClass.__init__ to keep
# all error checking for entries in INSTALLED_APPS in one place.
try:
app_name = cls.name
except AttributeError:
raise ImproperlyConfigured(
"'%s' must supply a name attribute." % entry)
# Ensure app_name points to a valid module.
app_module = import_module(app_name)
# Entry is a path to an app config class.
return cls(app_name, app_module)
def check_models_ready(self):
"""
Raises an exception if models haven't been imported yet.
"""
if self.models is None:
raise AppRegistryNotReady(
"Models for app '%s' haven't been imported yet." % self.label)
def get_model(self, model_name):
"""
Returns the model with the given case-insensitive model_name.
Raises LookupError if no model exists with this name.
"""
self.check_models_ready()
try:
return self.models[model_name.lower()]
except KeyError:
raise LookupError(
"App '%s' doesn't have a '%s' model." % (self.label, model_name))
def get_models(self, include_auto_created=False,
include_deferred=False, include_swapped=False):
"""
Returns an iterable of models.
By default, the following models aren't included:
- auto-created models for many-to-many relations without
an explicit intermediate table,
- models created to satisfy deferred attribute queries,
- models that have been swapped out.
Set the corresponding keyword argument to True to include such models.
Keyword arguments aren't documented; they're a private API.
"""
self.check_models_ready()
for model in self.models.values():
if model._deferred and not include_deferred:
continue
if model._meta.auto_created and not include_auto_created:
continue
if model._meta.swapped and not include_swapped:
continue
yield model
def import_models(self, all_models):
# Dictionary of models for this app, primarily maintained in the
# 'all_models' attribute of the Apps this AppConfig is attached to.
# Injected as a parameter because it gets populated when models are
# imported, which might happen before populate() imports models.
self.models = all_models
if module_has_submodule(self.module, MODELS_MODULE_NAME):
models_module_name = '%s.%s' % (self.name, MODELS_MODULE_NAME)
self.models_module = import_module(models_module_name)
def ready(self):
"""
Override this method in subclasses to run code when Django starts.
"""
| bsd-3-clause | 6,042,700,086,889,867,000 | -6,564,103,056,930,004,000 | 38.788177 | 81 | 0.601709 | false |
justajeffy/arsenalsuite | python/blur/examples/path_lister.py | 11 | 2216 | #!/usr/bin/python
from PyQt4.QtCore import *
from PyQt4.QtSql import *
from PyQt4.QtGui import *
from blur.Stone import *
from blur.Classes import *
from blur.Classesui import *
import blur.email, blur.jabber
import sys, time, re, os
from math import ceil
import traceback
try:
import popen2
except: pass
app = QApplication(sys.argv)
initConfig( "/etc/db.ini", "/var/log/path_lister.log" )
blur.RedirectOutputToLog()
blurqt_loader()
VERBOSE_DEBUG = False
if VERBOSE_DEBUG:
Database.instance().setEchoMode( Database.EchoUpdate | Database.EchoDelete )# | Database.EchoSelect )
FreezerCore.instance().reconnect()
class ProjectChooserDialog( QDialog ):
def __init__(self):
QDialog.__init__(self)
# Project Chooser Widget
self.projectCombo = ProjectCombo(self)
self.projectCombo.setShowSpecialItem(False)
# Ok | Cancel buttons
dbb = QDialogButtonBox( QDialogButtonBox.Ok | QDialogButtonBox.Cancel, Qt.Horizontal, self )
self.connect( dbb, SIGNAL( 'accepted()' ), self.accept )
self.connect( dbb, SIGNAL( 'rejected()' ), self.reject )
# Layout
l = QVBoxLayout(self)
l.addWidget( self.projectCombo )
l.addWidget( dbb )
def project(self):
return self.projectCombo.project()
project = None
regenPaths = len(sys.argv) > 2 and sys.argv[2]
if len(sys.argv) > 1:
project = Project.recordByName( sys.argv[1] )
if not project or not project.isRecord():
d = ProjectChooserDialog()
if d.exec_() == QDialog.Accepted:
project = d.project()
if project.isRecord():
storageLocations = project.projectStorages()
def print_paths( asset, tabs = 0 ):
for c in asset.children():
print (' ' * tabs), c.name(), c.assetTemplate().name(), c.pathTemplate().name()
for storage in storageLocations:
path = c.path(storage)
pt = c.pathTracker(storage)
if not path.isEmpty() or pt.isRecord():
print (' ' * tabs) + ' ', path
gen_path = pt.generatePathFromTemplate(storage)
if path != gen_path:
if regenPaths:
print "Changing path to match template: ", gen_path
pt.setPath(gen_path)
pt.commit()
else:
print "Path doesnt match template: ", gen_path
print_paths( c, tabs + 1 )
print_paths( project )
| gpl-2.0 | 235,504,604,891,032,540 | -7,777,117,748,299,685,000 | 23.898876 | 102 | 0.686372 | false |
Astoriane/jCrunchyroll | lib/crunchy-xml-decoder/unidecode/x00e.py | 252 | 4092 | data = (
'[?]', # 0x00
'k', # 0x01
'kh', # 0x02
'kh', # 0x03
'kh', # 0x04
'kh', # 0x05
'kh', # 0x06
'ng', # 0x07
'cch', # 0x08
'ch', # 0x09
'ch', # 0x0a
'ch', # 0x0b
'ch', # 0x0c
'y', # 0x0d
'd', # 0x0e
't', # 0x0f
'th', # 0x10
'th', # 0x11
'th', # 0x12
'n', # 0x13
'd', # 0x14
't', # 0x15
'th', # 0x16
'th', # 0x17
'th', # 0x18
'n', # 0x19
'b', # 0x1a
'p', # 0x1b
'ph', # 0x1c
'f', # 0x1d
'ph', # 0x1e
'f', # 0x1f
'ph', # 0x20
'm', # 0x21
'y', # 0x22
'r', # 0x23
'R', # 0x24
'l', # 0x25
'L', # 0x26
'w', # 0x27
's', # 0x28
's', # 0x29
's', # 0x2a
'h', # 0x2b
'l', # 0x2c
'`', # 0x2d
'h', # 0x2e
'~', # 0x2f
'a', # 0x30
'a', # 0x31
'aa', # 0x32
'am', # 0x33
'i', # 0x34
'ii', # 0x35
'ue', # 0x36
'uue', # 0x37
'u', # 0x38
'uu', # 0x39
'\'', # 0x3a
'[?]', # 0x3b
'[?]', # 0x3c
'[?]', # 0x3d
'[?]', # 0x3e
'Bh.', # 0x3f
'e', # 0x40
'ae', # 0x41
'o', # 0x42
'ai', # 0x43
'ai', # 0x44
'ao', # 0x45
'+', # 0x46
'', # 0x47
'', # 0x48
'', # 0x49
'', # 0x4a
'', # 0x4b
'', # 0x4c
'M', # 0x4d
'', # 0x4e
' * ', # 0x4f
'0', # 0x50
'1', # 0x51
'2', # 0x52
'3', # 0x53
'4', # 0x54
'5', # 0x55
'6', # 0x56
'7', # 0x57
'8', # 0x58
'9', # 0x59
' // ', # 0x5a
' /// ', # 0x5b
'[?]', # 0x5c
'[?]', # 0x5d
'[?]', # 0x5e
'[?]', # 0x5f
'[?]', # 0x60
'[?]', # 0x61
'[?]', # 0x62
'[?]', # 0x63
'[?]', # 0x64
'[?]', # 0x65
'[?]', # 0x66
'[?]', # 0x67
'[?]', # 0x68
'[?]', # 0x69
'[?]', # 0x6a
'[?]', # 0x6b
'[?]', # 0x6c
'[?]', # 0x6d
'[?]', # 0x6e
'[?]', # 0x6f
'[?]', # 0x70
'[?]', # 0x71
'[?]', # 0x72
'[?]', # 0x73
'[?]', # 0x74
'[?]', # 0x75
'[?]', # 0x76
'[?]', # 0x77
'[?]', # 0x78
'[?]', # 0x79
'[?]', # 0x7a
'[?]', # 0x7b
'[?]', # 0x7c
'[?]', # 0x7d
'[?]', # 0x7e
'[?]', # 0x7f
'[?]', # 0x80
'k', # 0x81
'kh', # 0x82
'[?]', # 0x83
'kh', # 0x84
'[?]', # 0x85
'[?]', # 0x86
'ng', # 0x87
'ch', # 0x88
'[?]', # 0x89
's', # 0x8a
'[?]', # 0x8b
'[?]', # 0x8c
'ny', # 0x8d
'[?]', # 0x8e
'[?]', # 0x8f
'[?]', # 0x90
'[?]', # 0x91
'[?]', # 0x92
'[?]', # 0x93
'd', # 0x94
'h', # 0x95
'th', # 0x96
'th', # 0x97
'[?]', # 0x98
'n', # 0x99
'b', # 0x9a
'p', # 0x9b
'ph', # 0x9c
'f', # 0x9d
'ph', # 0x9e
'f', # 0x9f
'[?]', # 0xa0
'm', # 0xa1
'y', # 0xa2
'r', # 0xa3
'[?]', # 0xa4
'l', # 0xa5
'[?]', # 0xa6
'w', # 0xa7
'[?]', # 0xa8
'[?]', # 0xa9
's', # 0xaa
'h', # 0xab
'[?]', # 0xac
'`', # 0xad
'', # 0xae
'~', # 0xaf
'a', # 0xb0
'', # 0xb1
'aa', # 0xb2
'am', # 0xb3
'i', # 0xb4
'ii', # 0xb5
'y', # 0xb6
'yy', # 0xb7
'u', # 0xb8
'uu', # 0xb9
'[?]', # 0xba
'o', # 0xbb
'l', # 0xbc
'ny', # 0xbd
'[?]', # 0xbe
'[?]', # 0xbf
'e', # 0xc0
'ei', # 0xc1
'o', # 0xc2
'ay', # 0xc3
'ai', # 0xc4
'[?]', # 0xc5
'+', # 0xc6
'[?]', # 0xc7
'', # 0xc8
'', # 0xc9
'', # 0xca
'', # 0xcb
'', # 0xcc
'M', # 0xcd
'[?]', # 0xce
'[?]', # 0xcf
'0', # 0xd0
'1', # 0xd1
'2', # 0xd2
'3', # 0xd3
'4', # 0xd4
'5', # 0xd5
'6', # 0xd6
'7', # 0xd7
'8', # 0xd8
'9', # 0xd9
'[?]', # 0xda
'[?]', # 0xdb
'hn', # 0xdc
'hm', # 0xdd
'[?]', # 0xde
'[?]', # 0xdf
'[?]', # 0xe0
'[?]', # 0xe1
'[?]', # 0xe2
'[?]', # 0xe3
'[?]', # 0xe4
'[?]', # 0xe5
'[?]', # 0xe6
'[?]', # 0xe7
'[?]', # 0xe8
'[?]', # 0xe9
'[?]', # 0xea
'[?]', # 0xeb
'[?]', # 0xec
'[?]', # 0xed
'[?]', # 0xee
'[?]', # 0xef
'[?]', # 0xf0
'[?]', # 0xf1
'[?]', # 0xf2
'[?]', # 0xf3
'[?]', # 0xf4
'[?]', # 0xf5
'[?]', # 0xf6
'[?]', # 0xf7
'[?]', # 0xf8
'[?]', # 0xf9
'[?]', # 0xfa
'[?]', # 0xfb
'[?]', # 0xfc
'[?]', # 0xfd
'[?]', # 0xfe
)
| lgpl-3.0 | -9,131,557,175,929,021,000 | -1,788,665,775,922,247,400 | 14.922179 | 18 | 0.294477 | false |
evangelistalab/forte | forte/solvers/active_space_solver.py | 1 | 6737 | from forte.core import flog
from forte.solvers.solver import Feature, Solver
from forte.forte import ForteOptions
from forte import to_state_nroots_map
from forte import forte_options
from forte.forte import make_active_space_ints, make_active_space_solver
class ActiveSpaceSolver(Solver):
"""
A class to diagonalize the Hamiltonian in a subset of molecular orbitals.
Multi-state computations need a dict[StateInfo,list(float)] object,
which can be cumbersome to prepare for all computations.
Therefore, this class accepts various inputs, for example:
>>> # single state (one eigenstate of state ``state``)
>>> state = StateInfo(...)
>>> _parse_states(state)
>>> # list of states (one eigenstate of ``state_1`` and one of ``state_2``)
>>> state_1 = StateInfo(...)
>>> state_2 = StateInfo(...)
>>> _parse_states([state_1,state_2])
>>> # dict of states (5 eigenstate of ``state_1`` and 3 of ``state_2``)
>>> state_info_1 = StateInfo(...)
>>> state_info_2 = StateInfo(...)
>>> _parse_states({state_info_1: 5,state_info_2: 3})
>>> # dict of states with weights (5 eigenstate of ``state_1`` and 3 of ``state_2``)
>>> state_info_1 = StateInfo(...)
>>> state_info_2 = StateInfo(...)
>>> _parse_states({state_info_1: [1.0,1.0,0.5,0.5,0.5],state_info_2: [0.25,0.25,0.25]})
"""
def __init__(
self,
input_nodes,
states,
type,
mo_spaces=None,
e_convergence=1.0e-10,
r_convergence=1.0e-6,
options=None,
cbh=None
):
"""
Initialize an ActiveSpaceSolver object
Parameters
----------
input_nodes: Solver
The solver that will provide the molecular orbitals
states: StateInfo, or list(StateInfo), or dict[StateInfo,int], or dict[StateInfo,list(float)]
The state(s) to be computed passed in one of the following ways:
1. A single state
2. A list of single states (will compute one level for each type of state)
3. A dictionary that maps StateInfo objects to the number of states to compute
4. A dictionary that maps StateInfo objects to a list of weights for the states to compute
If explicit weights are passed, these are used in procedures that average properties
over states (e.g., state-averaged CASSCF)
type: {'FCI','ACI','CAS','DETCI','ASCI','PCI'}
The type of solver
mo_spaces: dict[str,list(int)]
A dictionary that specifies the number of MOs per irrep that belong to a given orbital space.
The available spaces are: frozen_docc, restricted_docc, active, restricted_uocc, frozen_uocc, and gas1-gas6.
Please consult the manual for the meaning of these orbital spaces.
A convenience function is provided by the Input class [mo_spaces()] to facilitate the creation of this dict.
Note that an empty dict implies that all orbitals are treated as active.
e_convergence: float
energy convergence criterion
r_convergence: float
residual convergence criterion
options: dict()
Additional options passed to control the active space solver
cbh: CallbackHandler
A callback object used to inject code into the HF class
"""
# initialize the base class
super().__init__(
input_nodes=input_nodes,
needs=[Feature.MODEL, Feature.ORBITALS],
provides=[Feature.MODEL, Feature.ORBITALS, Feature.RDMS],
options=options,
cbh=cbh
)
self._data = self.input_nodes[0].data
# parse the states parameter
self._states = self._parse_states(states)
self._type = type.upper()
self._mo_space_info_map = {} if mo_spaces is None else mo_spaces
self._e_convergence = e_convergence
self._r_convergence = r_convergence
def __repr__(self):
"""
return a string representation of this object
"""
return f'{self._type}(mo_space={self._mo_space_info_map},e_convergence={self._e_convergence},r_convergence={self._r_convergence})'
def __str__(self):
"""
return a string representation of this object
"""
return repr(self)
@property
def e_convergence(self):
return self._e_convergence
@property
def r_convergence(self):
return self._r_convergence
def _run(self):
"""Run an active space solver computation"""
# compute the guess orbitals
if not self.input_nodes[0].executed:
flog('info', 'ActiveSpaceSolver: MOs not available in mo_solver. Calling mo_solver run()')
self.input_nodes[0].run()
else:
flog('info', 'ActiveSpaceSolver: MOs read from mo_solver object')
# make the state_map
state_map = to_state_nroots_map(self._states)
# make the mo_space_info object
self.make_mo_space_info(self._mo_space_info_map)
# prepare the options
options = {'E_CONVERGENCE': self.e_convergence, 'R_CONVERGENCE': self.r_convergence}
# values from self._options (user specified) replace those from options
full_options = {**options, **self._options}
flog('info', 'ActiveSpaceSolver: adding options')
local_options = ForteOptions(forte_options)
local_options.set_from_dict(full_options)
flog('info', 'ActiveSpaceSolver: getting integral from the model object')
self.ints = self.model.ints(self.data, local_options)
# Make an active space integral object
flog('info', 'ActiveSpaceSolver: making active space integrals')
self.as_ints = make_active_space_ints(self.mo_space_info, self.ints, "ACTIVE", ["RESTRICTED_DOCC"])
# create an active space solver object and compute the energy
flog('info', 'ActiveSpaceSolver: creating active space solver object')
self._active_space_solver = make_active_space_solver(
self._type, state_map, self.scf_info, self.mo_space_info, self.as_ints, local_options
)
flog('info', 'ActiveSpaceSolver: calling compute_energy() on active space solver object')
state_energies_list = self._active_space_solver.compute_energy()
flog('info', 'ActiveSpaceSolver: compute_energy() done')
flog('info', f'ActiveSpaceSolver: active space energy = {state_energies_list}')
self._results.add('active space energy', state_energies_list, 'Active space energy', 'Eh')
return self
| lgpl-3.0 | -243,353,181,779,070,560 | -5,247,143,463,595,696,000 | 39.584337 | 138 | 0.620751 | false |
tombstone/models | research/maskgan/models/rnn.py | 5 | 7132 | # Copyright 2017 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Simple RNN model definitions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six.moves import xrange
import tensorflow as tf
# ZoneoutWrapper.
from regularization import zoneout
FLAGS = tf.app.flags.FLAGS
def generator(hparams,
inputs,
targets,
targets_present,
is_training,
is_validating,
reuse=None):
"""Define the Generator graph.
G will now impute tokens that have been masked from the input seqeunce.
"""
tf.logging.warning(
'Undirectional generative model is not a useful model for this MaskGAN '
'because future context is needed. Use only for debugging purposes.')
init_scale = 0.05
initializer = tf.random_uniform_initializer(-init_scale, init_scale)
with tf.variable_scope('gen', reuse=reuse, initializer=initializer):
def lstm_cell():
return tf.contrib.rnn.LayerNormBasicLSTMCell(
hparams.gen_rnn_size, reuse=reuse)
attn_cell = lstm_cell
if FLAGS.zoneout_drop_prob > 0.0:
def attn_cell():
return zoneout.ZoneoutWrapper(
lstm_cell(),
zoneout_drop_prob=FLAGS.zoneout_drop_prob,
is_training=is_training)
cell_gen = tf.contrib.rnn.MultiRNNCell(
[attn_cell() for _ in range(hparams.gen_num_layers)],
state_is_tuple=True)
initial_state = cell_gen.zero_state(FLAGS.batch_size, tf.float32)
with tf.variable_scope('rnn'):
sequence, logits, log_probs = [], [], []
embedding = tf.get_variable('embedding',
[FLAGS.vocab_size, hparams.gen_rnn_size])
softmax_w = tf.get_variable('softmax_w',
[hparams.gen_rnn_size, FLAGS.vocab_size])
softmax_b = tf.get_variable('softmax_b', [FLAGS.vocab_size])
rnn_inputs = tf.nn.embedding_lookup(embedding, inputs)
for t in xrange(FLAGS.sequence_length):
if t > 0:
tf.get_variable_scope().reuse_variables()
# Input to the model is the first token to provide context. The
# model will then predict token t > 0.
if t == 0:
# Always provide the real input at t = 0.
state_gen = initial_state
rnn_inp = rnn_inputs[:, t]
# If the target at the last time-step was present, read in the real.
# If the target at the last time-step was not present, read in the fake.
else:
real_rnn_inp = rnn_inputs[:, t]
fake_rnn_inp = tf.nn.embedding_lookup(embedding, fake)
# Use teacher forcing.
if (is_training and
FLAGS.gen_training_strategy == 'cross_entropy') or is_validating:
rnn_inp = real_rnn_inp
else:
# Note that targets_t-1 == inputs_(t)
rnn_inp = tf.where(targets_present[:, t - 1], real_rnn_inp,
fake_rnn_inp)
# RNN.
rnn_out, state_gen = cell_gen(rnn_inp, state_gen)
logit = tf.matmul(rnn_out, softmax_w) + softmax_b
# Real sample.
real = targets[:, t]
# Fake sample.
categorical = tf.contrib.distributions.Categorical(logits=logit)
fake = categorical.sample()
log_prob = categorical.log_prob(fake)
# Output for Generator will either be generated or the target.
# If present: Return real.
# If not present: Return fake.
output = tf.where(targets_present[:, t], real, fake)
# Append to lists.
sequence.append(output)
logits.append(logit)
log_probs.append(log_prob)
# Produce the RNN state had the model operated only
# over real data.
real_state_gen = initial_state
for t in xrange(FLAGS.sequence_length):
tf.get_variable_scope().reuse_variables()
rnn_inp = rnn_inputs[:, t]
# RNN.
rnn_out, real_state_gen = cell_gen(rnn_inp, real_state_gen)
final_state = real_state_gen
return (tf.stack(sequence, axis=1), tf.stack(logits, axis=1), tf.stack(
log_probs, axis=1), initial_state, final_state)
def discriminator(hparams, sequence, is_training, reuse=None):
"""Define the Discriminator graph.
Args:
hparams: Hyperparameters for the MaskGAN.
FLAGS: Current flags.
sequence: [FLAGS.batch_size, FLAGS.sequence_length]
is_training:
reuse
Returns:
predictions:
"""
tf.logging.warning(
'Undirectional Discriminative model is not a useful model for this '
'MaskGAN because future context is needed. Use only for debugging '
'purposes.')
sequence = tf.cast(sequence, tf.int32)
if FLAGS.dis_share_embedding:
assert hparams.dis_rnn_size == hparams.gen_rnn_size, (
'If you wish to share Discriminator/Generator embeddings, they must be'
' same dimension.')
with tf.variable_scope('gen/rnn', reuse=True):
embedding = tf.get_variable('embedding',
[FLAGS.vocab_size, hparams.gen_rnn_size])
with tf.variable_scope('dis', reuse=reuse):
def lstm_cell():
return tf.contrib.rnn.LayerNormBasicLSTMCell(
hparams.dis_rnn_size, reuse=reuse)
attn_cell = lstm_cell
if FLAGS.zoneout_drop_prob > 0.0:
def attn_cell():
return zoneout.ZoneoutWrapper(
lstm_cell(),
zoneout_drop_prob=FLAGS.zoneout_drop_prob,
is_training=is_training)
cell_dis = tf.contrib.rnn.MultiRNNCell(
[attn_cell() for _ in range(hparams.dis_num_layers)],
state_is_tuple=True)
state_dis = cell_dis.zero_state(FLAGS.batch_size, tf.float32)
with tf.variable_scope('rnn') as vs:
predictions = []
if not FLAGS.dis_share_embedding:
embedding = tf.get_variable('embedding',
[FLAGS.vocab_size, hparams.dis_rnn_size])
rnn_inputs = tf.nn.embedding_lookup(embedding, sequence)
for t in xrange(FLAGS.sequence_length):
if t > 0:
tf.get_variable_scope().reuse_variables()
rnn_in = rnn_inputs[:, t]
rnn_out, state_dis = cell_dis(rnn_in, state_dis)
# Prediction is linear output for Discriminator.
pred = tf.contrib.layers.linear(rnn_out, 1, scope=vs)
predictions.append(pred)
predictions = tf.stack(predictions, axis=1)
return tf.squeeze(predictions, axis=2)
| apache-2.0 | 5,208,368,767,128,752,000 | 2,065,634,398,712,808,200 | 32.800948 | 80 | 0.6182 | false |
linktlh/Toontown-journey | toontown/suit/DistributedSuit.py | 3 | 25748 | import copy
from direct.directnotify import DirectNotifyGlobal
from direct.directtools.DirectGeometry import CLAMP
from direct.distributed.ClockDelta import *
from direct.fsm import ClassicFSM, State
from direct.fsm import State
from direct.interval.IntervalGlobal import *
from direct.task import Task
import math
from pandac.PandaModules import *
import random
import DistributedSuitBase
import DistributedSuitPlanner
import Suit
import SuitBase
import SuitDialog
import SuitTimings
from otp.avatar import DistributedAvatar
from otp.otpbase import OTPLocalizer
from toontown.battle import BattleProps
from toontown.battle import DistributedBattle
from toontown.chat.ChatGlobals import *
from toontown.distributed.DelayDeletable import DelayDeletable
from toontown.nametag import NametagGlobals
from toontown.nametag.NametagGlobals import *
from toontown.suit.SuitLegList import *
from toontown.toonbase import ToontownGlobals
STAND_OUTSIDE_DOOR = 2.5
BATTLE_IGNORE_TIME = 6
BATTLE_WAIT_TIME = 3
CATCHUP_SPEED_MULTIPLIER = 3
ALLOW_BATTLE_DETECT = 1
class DistributedSuit(DistributedSuitBase.DistributedSuitBase, DelayDeletable):
notify = DirectNotifyGlobal.directNotify.newCategory('DistributedSuit')
ENABLE_EXPANDED_NAME = 0
def __init__(self, cr):
try:
self.DistributedSuit_initialized
return
except:
self.DistributedSuit_initialized = 1
DistributedSuitBase.DistributedSuitBase.__init__(self, cr)
self.spDoId = None
self.pathEndpointStart = 0
self.pathEndpointEnd = 0
self.minPathLen = 0
self.maxPathLen = 0
self.pathPositionIndex = 0
self.pathPositionTimestamp = 0.0
self.pathState = 0
self.path = None
self.localPathState = 0
self.currentLeg = -1
self.pathStartTime = 0.0
self.legList = None
self.initState = None
self.finalState = None
self.buildingSuit = 0
self.fsm = ClassicFSM.ClassicFSM('DistributedSuit', [
State.State('Off',
self.enterOff,
self.exitOff, [
'FromSky',
'FromSuitBuilding',
'Walk',
'Battle',
'neutral',
'ToToonBuilding',
'ToSuitBuilding',
'ToCogHQ',
'FromCogHQ',
'ToSky',
'FlyAway',
'DanceThenFlyAway',
'WalkToStreet',
'WalkFromStreet']),
State.State('FromSky',
self.enterFromSky,
self.exitFromSky, [
'Walk',
'Battle',
'neutral',
'ToSky',
'WalkFromStreet']),
State.State('FromSuitBuilding',
self.enterFromSuitBuilding,
self.exitFromSuitBuilding, [
'WalkToStreet',
'Walk',
'Battle',
'neutral',
'ToSky']),
State.State('WalkToStreet',
self.enterWalkToStreet,
self.exitWalkToStreet, [
'Walk',
'Battle',
'neutral',
'ToSky',
'ToToonBuilding',
'ToSuitBuilding',
'ToCogHQ',
'WalkFromStreet']),
State.State('WalkFromStreet',
self.enterWalkFromStreet,
self.exitWalkFromStreet, [
'ToToonBuilding',
'ToSuitBuilding',
'ToCogHQ',
'Battle',
'neutral',
'ToSky']),
State.State('Walk',
self.enterWalk,
self.exitWalk, [
'WaitForBattle',
'Battle',
'neutral',
'WalkFromStreet',
'ToSky',
'ToCogHQ',
'Walk']),
State.State('Battle',
self.enterBattle,
self.exitBattle, [
'Walk',
'ToToonBuilding',
'ToCogHQ',
'ToSuitBuilding',
'ToSky']),
State.State('neutral',
self.enterNeutral,
self.exitNeutral, []),
State.State('WaitForBattle',
self.enterWaitForBattle,
self.exitWaitForBattle, [
'Battle',
'neutral',
'Walk',
'WalkToStreet',
'WalkFromStreet',
'ToToonBuilding',
'ToCogHQ',
'ToSuitBuilding',
'ToSky']),
State.State('ToToonBuilding',
self.enterToToonBuilding,
self.exitToToonBuilding, [
'neutral',
'Battle']),
State.State('ToSuitBuilding',
self.enterToSuitBuilding,
self.exitToSuitBuilding, [
'neutral',
'Battle']),
State.State('ToCogHQ',
self.enterToCogHQ,
self.exitToCogHQ, [
'neutral',
'Battle']),
State.State('FromCogHQ',
self.enterFromCogHQ,
self.exitFromCogHQ, [
'neutral',
'Battle',
'Walk']),
State.State('ToSky',
self.enterToSky,
self.exitToSky, [
'Battle']),
State.State('FlyAway',
self.enterFlyAway,
self.exitFlyAway,
[]),
State.State('DanceThenFlyAway',
self.enterDanceThenFlyAway,
self.exitDanceThenFlyAway,
[])],
'Off', 'Off')
self.fsm.enterInitialState()
self.soundSequenceList = []
self.__currentDialogue = None
return
def generate(self):
DistributedSuitBase.DistributedSuitBase.generate(self)
def disable(self):
for soundSequence in self.soundSequenceList:
soundSequence.finish()
self.soundSequenceList = []
self.notify.debug('DistributedSuit %d: disabling' % self.getDoId())
self.resumePath(0)
self.stopPathNow()
self.setState('Off')
DistributedSuitBase.DistributedSuitBase.disable(self)
def delete(self):
try:
self.DistributedSuit_deleted
except:
self.DistributedSuit_deleted = 1
self.notify.debug('DistributedSuit %d: deleting' % self.getDoId())
del self.fsm
DistributedSuitBase.DistributedSuitBase.delete(self)
def setPathEndpoints(self, start, end, minPathLen, maxPathLen):
if self.pathEndpointStart == start and self.pathEndpointEnd == end and self.minPathLen == minPathLen and self.maxPathLen == maxPathLen and self.path != None:
return
self.pathEndpointStart = start
self.pathEndpointEnd = end
self.minPathLen = minPathLen
self.maxPathLen = maxPathLen
self.path = None
self.pathLength = 0
self.currentLeg = -1
self.legList = None
if self.maxPathLen == 0:
return
if not self.verifySuitPlanner():
return
self.startPoint = self.sp.pointIndexes[self.pathEndpointStart]
self.endPoint = self.sp.pointIndexes[self.pathEndpointEnd]
path = self.sp.genPath(self.startPoint, self.endPoint, self.minPathLen, self.maxPathLen)
self.setPath(path)
self.makeLegList()
return
def verifySuitPlanner(self):
if self.sp == None and self.spDoId != 0:
self.notify.warning('Suit %d does not have a suit planner! Expected SP doId %s.' % (self.doId, self.spDoId))
self.sp = self.cr.doId2do.get(self.spDoId, None)
if self.sp == None:
return 0
return 1
def setPathPosition(self, index, timestamp):
if not self.verifySuitPlanner():
return
if self.path == None:
self.setPathEndpoints(self.pathEndpointStart, self.pathEndpointEnd, self.minPathLen, self.maxPathLen)
self.pathPositionIndex = index
self.pathPositionTimestamp = globalClockDelta.networkToLocalTime(timestamp)
if self.legList != None:
self.pathStartTime = self.pathPositionTimestamp - self.legList.getStartTime(self.pathPositionIndex)
return
def setPathState(self, state):
self.pathState = state
self.resumePath(state)
def debugSuitPosition(self, elapsed, currentLeg, x, y, timestamp):
now = globalClock.getFrameTime()
chug = globalClock.getRealTime() - now
messageAge = now - globalClockDelta.networkToLocalTime(timestamp, now)
if messageAge < -(chug + 0.5) or messageAge > chug + 1.0:
print 'Apparently out of sync with AI by %0.2f seconds. Suggest resync!' % messageAge
return
localElapsed = now - self.pathStartTime
timeDiff = localElapsed - (elapsed + messageAge)
if abs(timeDiff) > 0.2:
print "%s (%d) appears to be %0.2f seconds out of sync along its path. Suggest '~cogs sync'." % (self.getName(), self.getDoId(), timeDiff)
return
if self.legList == None:
print "%s (%d) doesn't have a legList yet." % (self.getName(), self.getDoId())
return
netPos = Point3(x, y, 0.0)
leg = self.legList.getLeg(currentLeg)
calcPos = leg.getPosAtTime(elapsed - leg.getStartTime())
calcPos.setZ(0.0)
calcDelta = Vec3(netPos - calcPos)
diff = calcDelta.length()
if diff > 4.0:
print '%s (%d) is %0.2f feet from the AI computed path!' % (self.getName(), self.getDoId(), diff)
print 'Probably your DNA files are out of sync.'
return
localPos = Point3(self.getX(), self.getY(), 0.0)
localDelta = Vec3(netPos - localPos)
diff = localDelta.length()
if diff > 10.0:
print '%s (%d) in state %s is %0.2f feet from its correct position!' % (self.getName(),
self.getDoId(),
self.fsm.getCurrentState().getName(),
diff)
print 'Should be at (%0.2f, %0.2f), but is at (%0.2f, %0.2f).' % (x,
y,
localPos[0],
localPos[1])
return
print '%s (%d) is in the correct position.' % (self.getName(), self.getDoId())
return
def denyBattle(self):
DistributedSuitBase.DistributedSuitBase.denyBattle(self)
self.disableBattleDetect()
def resumePath(self, state):
if self.localPathState != state:
self.localPathState = state
if state == 0:
self.stopPathNow()
elif state == 1:
self.moveToNextLeg(None)
elif state == 2:
self.stopPathNow()
if self.sp != None:
self.setState('Off')
self.setState('FlyAway')
elif state == 3:
pass
elif state == 4:
self.stopPathNow()
if self.sp != None:
self.setState('Off')
self.setState('DanceThenFlyAway')
else:
self.notify.error('No such state as: ' + str(state))
return
def moveToNextLeg(self, task):
if self.legList == None:
self.notify.warning('Suit %d does not have a path!' % self.getDoId())
return Task.done
now = globalClock.getFrameTime()
elapsed = now - self.pathStartTime
nextLeg = self.legList.getLegIndexAtTime(elapsed, self.currentLeg)
numLegs = self.legList.getNumLegs()
if self.currentLeg != nextLeg:
self.currentLeg = nextLeg
self.doPathLeg(self.legList.getLeg(nextLeg), elapsed - self.legList.getStartTime(nextLeg))
nextLeg += 1
if nextLeg < numLegs:
nextTime = self.legList.getStartTime(nextLeg)
delay = nextTime - elapsed
name = self.taskName('move')
taskMgr.remove(name)
taskMgr.doMethodLater(delay, self.moveToNextLeg, name)
return Task.done
def doPathLeg(self, leg, time):
self.fsm.request(leg.getTypeName(), [leg, time])
return 0
def stopPathNow(self):
name = self.taskName('move')
taskMgr.remove(name)
self.currentLeg = -1
def calculateHeading(self, a, b):
xdelta = b[0] - a[0]
ydelta = b[1] - a[1]
if ydelta == 0:
if xdelta > 0:
return -90
else:
return 90
elif xdelta == 0:
if ydelta > 0:
return 0
else:
return 180
else:
angle = math.atan2(ydelta, xdelta)
return rad2Deg(angle) - 90
def beginBuildingMove(self, moveIn, doneEvent, suit = 0):
doorPt = Point3(0)
buildingPt = Point3(0)
streetPt = Point3(0)
if self.virtualPos:
doorPt.assign(self.virtualPos)
else:
doorPt.assign(self.getPos())
if moveIn:
streetPt = self.prevPointPos()
else:
streetPt = self.currPointPos()
dx = doorPt[0] - streetPt[0]
dy = doorPt[1] - streetPt[1]
buildingPt = Point3(doorPt[0] + dx, doorPt[1] + dy, doorPt[2])
if moveIn:
if suit:
moveTime = SuitTimings.toSuitBuilding
else:
moveTime = SuitTimings.toToonBuilding
return self.beginMove(doneEvent, buildingPt, time=moveTime)
else:
return self.beginMove(doneEvent, doorPt, buildingPt, time=SuitTimings.fromSuitBuilding)
return None
def setSPDoId(self, doId):
self.spDoId = doId
self.sp = self.cr.doId2do.get(doId, None)
if self.sp == None and self.spDoId != 0:
self.notify.warning('Suit %s created before its suit planner, %d' % (self.doId, self.spDoId))
return
def d_requestBattle(self, pos, hpr):
self.cr.playGame.getPlace().setState('WaitForBattle')
self.sendUpdate('requestBattle', [pos[0],
pos[1],
pos[2],
hpr[0],
hpr[1],
hpr[2]])
def __handleToonCollision(self, collEntry):
if not base.localAvatar.wantBattles:
return
toonId = base.localAvatar.getDoId()
self.notify.debug('Distributed suit: requesting a Battle with ' + 'toon: %d' % toonId)
self.d_requestBattle(self.getPos(), self.getHpr())
self.setState('WaitForBattle')
def setAnimState(self, state):
self.setState(state)
def enterFromSky(self, leg, time):
self.enableBattleDetect('fromSky', self.__handleToonCollision)
self.loop('neutral', 0)
if not self.verifySuitPlanner():
return
a = leg.getPosA()
b = leg.getPosB()
h = self.calculateHeading(a, b)
self.setPosHprScale(a[0], a[1], a[2], h, 0.0, 0.0, 1.0, 1.0, 1.0)
self.mtrack = self.beginSupaFlyMove(a, 1, 'fromSky')
self.mtrack.start(time)
def exitFromSky(self):
self.disableBattleDetect()
self.mtrack.finish()
del self.mtrack
self.detachPropeller()
def enterWalkToStreet(self, leg, time):
self.enableBattleDetect('walkToStreet', self.__handleToonCollision)
self.loop('walk', 0)
a = leg.getPosA()
b = leg.getPosB()
delta = Vec3(b - a)
length = delta.length()
delta *= (length - STAND_OUTSIDE_DOOR) / length
a1 = Point3(b - delta)
self.enableRaycast(1)
h = self.calculateHeading(a, b)
self.setHprScale(h, 0.0, 0.0, 1.0, 1.0, 1.0)
self.mtrack = Sequence(LerpPosInterval(self, leg.getLegTime(), b, startPos=a1), name=self.taskName('walkToStreet'))
self.mtrack.start(time)
def exitWalkToStreet(self):
self.disableBattleDetect()
self.enableRaycast(0)
self.mtrack.finish()
del self.mtrack
def enterWalkFromStreet(self, leg, time):
self.enableBattleDetect('walkFromStreet', self.__handleToonCollision)
self.loop('walk', 0)
a = leg.getPosA()
b = leg.getPosB()
delta = Vec3(b - a)
length = delta.length()
delta *= (length - STAND_OUTSIDE_DOOR) / length
b1 = Point3(a + delta)
self.enableRaycast(1)
h = self.calculateHeading(a, b)
self.setHprScale(h, 0.0, 0.0, 1.0, 1.0, 1.0)
self.mtrack = Sequence(LerpPosInterval(self, leg.getLegTime(), b1, startPos=a), name=self.taskName('walkFromStreet'))
self.mtrack.start(time)
def exitWalkFromStreet(self):
self.disableBattleDetect()
self.enableRaycast(0)
self.mtrack.finish()
del self.mtrack
def enterWalk(self, leg, time):
self.enableBattleDetect('bellicose', self.__handleToonCollision)
self.loop('walk', 0)
a = leg.getPosA()
b = leg.getPosB()
h = self.calculateHeading(a, b)
pos = leg.getPosAtTime(time)
self.setPosHprScale(pos[0], pos[1], pos[2], h, 0.0, 0.0, 1.0, 1.0, 1.0)
self.mtrack = Sequence(LerpPosInterval(self, leg.getLegTime(), b, startPos=a), name=self.taskName('bellicose'))
self.mtrack.start(time)
def exitWalk(self):
self.disableBattleDetect()
self.mtrack.pause()
del self.mtrack
def enterToSky(self, leg, time):
self.enableBattleDetect('toSky', self.__handleToonCollision)
if not self.verifySuitPlanner():
return
a = leg.getPosA()
b = leg.getPosB()
h = self.calculateHeading(a, b)
self.setPosHprScale(b[0], b[1], b[2], h, 0.0, 0.0, 1.0, 1.0, 1.0)
self.mtrack = self.beginSupaFlyMove(b, 0, 'toSky')
self.mtrack.start(time)
def exitToSky(self):
self.disableBattleDetect()
self.mtrack.finish()
del self.mtrack
self.detachPropeller()
def enterFromSuitBuilding(self, leg, time):
self.enableBattleDetect('fromSuitBuilding', self.__handleToonCollision)
self.loop('walk', 0)
if not self.verifySuitPlanner():
return
a = leg.getPosA()
b = leg.getPosB()
delta = Vec3(b - a)
length = delta.length()
delta2 = delta * (self.sp.suitWalkSpeed * leg.getLegTime()) / length
delta *= (length - STAND_OUTSIDE_DOOR) / length
b1 = Point3(b - delta)
a1 = Point3(b1 - delta2)
self.enableRaycast(1)
h = self.calculateHeading(a, b)
self.setHprScale(h, 0.0, 0.0, 1.0, 1.0, 1.0)
self.mtrack = Sequence(LerpPosInterval(self, leg.getLegTime(), b1, startPos=a1), name=self.taskName('fromSuitBuilding'))
self.mtrack.start(time)
def exitFromSuitBuilding(self):
self.disableBattleDetect()
self.mtrack.finish()
del self.mtrack
def enterToToonBuilding(self, leg, time):
self.loop('neutral', 0)
def exitToToonBuilding(self):
pass
def enterToSuitBuilding(self, leg, time):
self.loop('walk', 0)
if not self.verifySuitPlanner():
return
a = leg.getPosA()
b = leg.getPosB()
delta = Vec3(b - a)
length = delta.length()
delta2 = delta * (self.sp.suitWalkSpeed * leg.getLegTime()) / length
delta *= (length - STAND_OUTSIDE_DOOR) / length
a1 = Point3(a + delta)
b1 = Point3(a1 + delta2)
self.enableRaycast(1)
h = self.calculateHeading(a, b)
self.setHprScale(h, 0.0, 0.0, 1.0, 1.0, 1.0)
self.mtrack = Sequence(LerpPosInterval(self, leg.getLegTime(), b1, startPos=a1), name=self.taskName('toSuitBuilding'))
self.mtrack.start(time)
def exitToSuitBuilding(self):
self.mtrack.finish()
del self.mtrack
def enterToCogHQ(self, leg, time):
self.loop('neutral', 0)
def exitToCogHQ(self):
pass
def enterFromCogHQ(self, leg, time):
self.loop('neutral', 0)
self.detachNode()
def exitFromCogHQ(self):
self.reparentTo(render)
def enterBattle(self):
DistributedSuitBase.DistributedSuitBase.enterBattle(self)
self.resumePath(0)
def enterNeutral(self):
self.notify.debug('DistributedSuit: Neutral (entering a Door)')
self.resumePath(0)
self.loop('neutral', 0)
def exitNeutral(self):
pass
def enterWaitForBattle(self):
DistributedSuitBase.DistributedSuitBase.enterWaitForBattle(self)
self.resumePath(0)
def enterFlyAway(self):
self.enableBattleDetect('flyAway', self.__handleToonCollision)
if not self.verifySuitPlanner():
return
b = Point3(self.getPos())
self.mtrack = self.beginSupaFlyMove(b, 0, 'flyAway')
self.mtrack.start()
def exitFlyAway(self):
self.disableBattleDetect()
self.mtrack.finish()
del self.mtrack
self.detachPropeller()
def enterDanceThenFlyAway(self):
self.enableBattleDetect('danceThenFlyAway', self.__handleToonCollision)
if not self.verifySuitPlanner():
return
danceTrack = self.actorInterval('victory')
b = Point3(self.getPos())
flyMtrack = self.beginSupaFlyMove(b, 0, 'flyAway')
self.mtrack = Sequence(danceTrack, flyMtrack, name=self.taskName('danceThenFlyAway'))
self.mtrack.start()
def exitDanceThenFlyAway(self):
self.disableBattleDetect()
self.mtrack.finish()
del self.mtrack
self.detachPropeller()
def playCurrentDialogue(self, dialogue, chatFlags, interrupt = 1):
if interrupt and self.__currentDialogue is not None:
self.__currentDialogue.stop()
self.__currentDialogue = dialogue
if dialogue:
base.playSfx(dialogue, node=self)
elif chatFlags & CFSpeech != 0:
if self.nametag.getNumChatPages() > 0:
self.playDialogueForString(self.nametag.getChatText())
if self.soundChatBubble != None:
base.playSfx(self.soundChatBubble, node=self)
elif self.nametag.getStompChatText():
self.playDialogueForString(self.nametag.getStompChatText(), self.nametag.CHAT_STOMP_DELAY)
def playDialogueForString(self, chatString, delay = 0.0):
if len(chatString) == 0:
return
searchString = chatString.lower()
if searchString.find(OTPLocalizer.DialogSpecial) >= 0:
type = 'special'
elif searchString.find(OTPLocalizer.DialogExclamation) >= 0:
type = 'exclamation'
elif searchString.find(OTPLocalizer.DialogQuestion) >= 0:
type = 'question'
elif random.randint(0, 1):
type = 'statementA'
else:
type = 'statementB'
stringLength = len(chatString)
if stringLength <= OTPLocalizer.DialogLength1:
length = 1
elif stringLength <= OTPLocalizer.DialogLength2:
length = 2
elif stringLength <= OTPLocalizer.DialogLength3:
length = 3
else:
length = 4
self.playDialogue(type, length, delay)
def playDialogue(self, type, length, delay = 0.0):
dialogueArray = self.getDialogueArray()
if dialogueArray == None:
return
sfxIndex = None
if type == 'statementA' or type == 'statementB':
if length == 1:
sfxIndex = 0
elif length == 2:
sfxIndex = 1
elif length >= 3:
sfxIndex = 2
elif type == 'question':
sfxIndex = 3
elif type == 'exclamation':
sfxIndex = 4
elif type == 'special':
sfxIndex = 5
else:
notify.error('unrecognized dialogue type: ', type)
if sfxIndex != None and sfxIndex < len(dialogueArray) and dialogueArray[sfxIndex] != None:
soundSequence = Sequence(Wait(delay), SoundInterval(dialogueArray[sfxIndex], node=None, listenerNode=base.localAvatar, loop=0, volume=1.0))
self.soundSequenceList.append(soundSequence)
soundSequence.start()
self.cleanUpSoundList()
return
def cleanUpSoundList(self):
removeList = []
for soundSequence in self.soundSequenceList:
if soundSequence.isStopped():
removeList.append(soundSequence)
for soundSequence in removeList:
self.soundSequenceList.remove(soundSequence)
| apache-2.0 | -2,203,010,179,095,892,000 | 7,811,133,819,989,899,000 | 35.678063 | 165 | 0.55115 | false |
patrickcurl/ztruck | dj/lib/python2.7/site-packages/django/core/serializers/xml_serializer.py | 95 | 15035 | """
XML serializer.
"""
from __future__ import unicode_literals
from xml.dom import pulldom
from xml.sax import handler
from xml.sax.expatreader import ExpatParser as _ExpatParser
from django.apps import apps
from django.conf import settings
from django.core.serializers import base
from django.db import DEFAULT_DB_ALIAS, models
from django.utils.encoding import smart_text
from django.utils.xmlutils import SimplerXMLGenerator
class Serializer(base.Serializer):
"""
Serializes a QuerySet to XML.
"""
def indent(self, level):
if self.options.get('indent', None) is not None:
self.xml.ignorableWhitespace('\n' + ' ' * self.options.get('indent', None) * level)
def start_serialization(self):
"""
Start serialization -- open the XML document and the root element.
"""
self.xml = SimplerXMLGenerator(self.stream, self.options.get("encoding", settings.DEFAULT_CHARSET))
self.xml.startDocument()
self.xml.startElement("django-objects", {"version": "1.0"})
def end_serialization(self):
"""
End serialization -- end the document.
"""
self.indent(0)
self.xml.endElement("django-objects")
self.xml.endDocument()
def start_object(self, obj):
"""
Called as each object is handled.
"""
if not hasattr(obj, "_meta"):
raise base.SerializationError("Non-model object (%s) encountered during serialization" % type(obj))
self.indent(1)
attrs = {"model": smart_text(obj._meta)}
if not self.use_natural_primary_keys or not hasattr(obj, 'natural_key'):
obj_pk = obj._get_pk_val()
if obj_pk is not None:
attrs['pk'] = smart_text(obj_pk)
self.xml.startElement("object", attrs)
def end_object(self, obj):
"""
Called after handling all fields for an object.
"""
self.indent(1)
self.xml.endElement("object")
def handle_field(self, obj, field):
"""
Called to handle each field on an object (except for ForeignKeys and
ManyToManyFields)
"""
self.indent(2)
self.xml.startElement("field", {
"name": field.name,
"type": field.get_internal_type()
})
# Get a "string version" of the object's data.
if getattr(obj, field.name) is not None:
self.xml.characters(field.value_to_string(obj))
else:
self.xml.addQuickElement("None")
self.xml.endElement("field")
def handle_fk_field(self, obj, field):
"""
Called to handle a ForeignKey (we need to treat them slightly
differently from regular fields).
"""
self._start_relational_field(field)
related_att = getattr(obj, field.get_attname())
if related_att is not None:
if self.use_natural_foreign_keys and hasattr(field.rel.to, 'natural_key'):
related = getattr(obj, field.name)
# If related object has a natural key, use it
related = related.natural_key()
# Iterable natural keys are rolled out as subelements
for key_value in related:
self.xml.startElement("natural", {})
self.xml.characters(smart_text(key_value))
self.xml.endElement("natural")
else:
self.xml.characters(smart_text(related_att))
else:
self.xml.addQuickElement("None")
self.xml.endElement("field")
def handle_m2m_field(self, obj, field):
"""
Called to handle a ManyToManyField. Related objects are only
serialized as references to the object's PK (i.e. the related *data*
is not dumped, just the relation).
"""
if field.rel.through._meta.auto_created:
self._start_relational_field(field)
if self.use_natural_foreign_keys and hasattr(field.rel.to, 'natural_key'):
# If the objects in the m2m have a natural key, use it
def handle_m2m(value):
natural = value.natural_key()
# Iterable natural keys are rolled out as subelements
self.xml.startElement("object", {})
for key_value in natural:
self.xml.startElement("natural", {})
self.xml.characters(smart_text(key_value))
self.xml.endElement("natural")
self.xml.endElement("object")
else:
def handle_m2m(value):
self.xml.addQuickElement("object", attrs={
'pk': smart_text(value._get_pk_val())
})
for relobj in getattr(obj, field.name).iterator():
handle_m2m(relobj)
self.xml.endElement("field")
def _start_relational_field(self, field):
"""
Helper to output the <field> element for relational fields
"""
self.indent(2)
self.xml.startElement("field", {
"name": field.name,
"rel": field.rel.__class__.__name__,
"to": smart_text(field.rel.to._meta),
})
class Deserializer(base.Deserializer):
"""
Deserialize XML.
"""
def __init__(self, stream_or_string, **options):
super(Deserializer, self).__init__(stream_or_string, **options)
self.event_stream = pulldom.parse(self.stream, self._make_parser())
self.db = options.pop('using', DEFAULT_DB_ALIAS)
self.ignore = options.pop('ignorenonexistent', False)
def _make_parser(self):
"""Create a hardened XML parser (no custom/external entities)."""
return DefusedExpatParser()
def __next__(self):
for event, node in self.event_stream:
if event == "START_ELEMENT" and node.nodeName == "object":
self.event_stream.expandNode(node)
return self._handle_object(node)
raise StopIteration
def _handle_object(self, node):
"""
Convert an <object> node to a DeserializedObject.
"""
# Look up the model using the model loading mechanism. If this fails,
# bail.
Model = self._get_model_from_node(node, "model")
# Start building a data dictionary from the object.
data = {}
if node.hasAttribute('pk'):
data[Model._meta.pk.attname] = Model._meta.pk.to_python(
node.getAttribute('pk'))
# Also start building a dict of m2m data (this is saved as
# {m2m_accessor_attribute : [list_of_related_objects]})
m2m_data = {}
field_names = {f.name for f in Model._meta.get_fields()}
# Deserialize each field.
for field_node in node.getElementsByTagName("field"):
# If the field is missing the name attribute, bail (are you
# sensing a pattern here?)
field_name = field_node.getAttribute("name")
if not field_name:
raise base.DeserializationError("<field> node is missing the 'name' attribute")
# Get the field from the Model. This will raise a
# FieldDoesNotExist if, well, the field doesn't exist, which will
# be propagated correctly unless ignorenonexistent=True is used.
if self.ignore and field_name not in field_names:
continue
field = Model._meta.get_field(field_name)
# As is usually the case, relation fields get the special treatment.
if field.rel and isinstance(field.rel, models.ManyToManyRel):
m2m_data[field.name] = self._handle_m2m_field_node(field_node, field)
elif field.rel and isinstance(field.rel, models.ManyToOneRel):
data[field.attname] = self._handle_fk_field_node(field_node, field)
else:
if field_node.getElementsByTagName('None'):
value = None
else:
value = field.to_python(getInnerText(field_node).strip())
data[field.name] = value
obj = base.build_instance(Model, data, self.db)
# Return a DeserializedObject so that the m2m data has a place to live.
return base.DeserializedObject(obj, m2m_data)
def _handle_fk_field_node(self, node, field):
"""
Handle a <field> node for a ForeignKey
"""
# Check if there is a child node named 'None', returning None if so.
if node.getElementsByTagName('None'):
return None
else:
if hasattr(field.rel.to._default_manager, 'get_by_natural_key'):
keys = node.getElementsByTagName('natural')
if keys:
# If there are 'natural' subelements, it must be a natural key
field_value = [getInnerText(k).strip() for k in keys]
obj = field.rel.to._default_manager.db_manager(self.db).get_by_natural_key(*field_value)
obj_pk = getattr(obj, field.rel.field_name)
# If this is a natural foreign key to an object that
# has a FK/O2O as the foreign key, use the FK value
if field.rel.to._meta.pk.rel:
obj_pk = obj_pk.pk
else:
# Otherwise, treat like a normal PK
field_value = getInnerText(node).strip()
obj_pk = field.rel.to._meta.get_field(field.rel.field_name).to_python(field_value)
return obj_pk
else:
field_value = getInnerText(node).strip()
return field.rel.to._meta.get_field(field.rel.field_name).to_python(field_value)
def _handle_m2m_field_node(self, node, field):
"""
Handle a <field> node for a ManyToManyField.
"""
if hasattr(field.rel.to._default_manager, 'get_by_natural_key'):
def m2m_convert(n):
keys = n.getElementsByTagName('natural')
if keys:
# If there are 'natural' subelements, it must be a natural key
field_value = [getInnerText(k).strip() for k in keys]
obj_pk = field.rel.to._default_manager.db_manager(self.db).get_by_natural_key(*field_value).pk
else:
# Otherwise, treat like a normal PK value.
obj_pk = field.rel.to._meta.pk.to_python(n.getAttribute('pk'))
return obj_pk
else:
m2m_convert = lambda n: field.rel.to._meta.pk.to_python(n.getAttribute('pk'))
return [m2m_convert(c) for c in node.getElementsByTagName("object")]
def _get_model_from_node(self, node, attr):
"""
Helper to look up a model from a <object model=...> or a <field
rel=... to=...> node.
"""
model_identifier = node.getAttribute(attr)
if not model_identifier:
raise base.DeserializationError(
"<%s> node is missing the required '%s' attribute"
% (node.nodeName, attr))
try:
return apps.get_model(model_identifier)
except (LookupError, TypeError):
raise base.DeserializationError(
"<%s> node has invalid model identifier: '%s'"
% (node.nodeName, model_identifier))
def getInnerText(node):
"""
Get all the inner text of a DOM node (recursively).
"""
# inspired by http://mail.python.org/pipermail/xml-sig/2005-March/011022.html
inner_text = []
for child in node.childNodes:
if child.nodeType == child.TEXT_NODE or child.nodeType == child.CDATA_SECTION_NODE:
inner_text.append(child.data)
elif child.nodeType == child.ELEMENT_NODE:
inner_text.extend(getInnerText(child))
else:
pass
return "".join(inner_text)
# Below code based on Christian Heimes' defusedxml
class DefusedExpatParser(_ExpatParser):
"""
An expat parser hardened against XML bomb attacks.
Forbids DTDs, external entity references
"""
def __init__(self, *args, **kwargs):
_ExpatParser.__init__(self, *args, **kwargs)
self.setFeature(handler.feature_external_ges, False)
self.setFeature(handler.feature_external_pes, False)
def start_doctype_decl(self, name, sysid, pubid, has_internal_subset):
raise DTDForbidden(name, sysid, pubid)
def entity_decl(self, name, is_parameter_entity, value, base,
sysid, pubid, notation_name):
raise EntitiesForbidden(name, value, base, sysid, pubid, notation_name)
def unparsed_entity_decl(self, name, base, sysid, pubid, notation_name):
# expat 1.2
raise EntitiesForbidden(name, None, base, sysid, pubid, notation_name)
def external_entity_ref_handler(self, context, base, sysid, pubid):
raise ExternalReferenceForbidden(context, base, sysid, pubid)
def reset(self):
_ExpatParser.reset(self)
parser = self._parser
parser.StartDoctypeDeclHandler = self.start_doctype_decl
parser.EntityDeclHandler = self.entity_decl
parser.UnparsedEntityDeclHandler = self.unparsed_entity_decl
parser.ExternalEntityRefHandler = self.external_entity_ref_handler
class DefusedXmlException(ValueError):
"""Base exception."""
def __repr__(self):
return str(self)
class DTDForbidden(DefusedXmlException):
"""Document type definition is forbidden."""
def __init__(self, name, sysid, pubid):
super(DTDForbidden, self).__init__()
self.name = name
self.sysid = sysid
self.pubid = pubid
def __str__(self):
tpl = "DTDForbidden(name='{}', system_id={!r}, public_id={!r})"
return tpl.format(self.name, self.sysid, self.pubid)
class EntitiesForbidden(DefusedXmlException):
"""Entity definition is forbidden."""
def __init__(self, name, value, base, sysid, pubid, notation_name):
super(EntitiesForbidden, self).__init__()
self.name = name
self.value = value
self.base = base
self.sysid = sysid
self.pubid = pubid
self.notation_name = notation_name
def __str__(self):
tpl = "EntitiesForbidden(name='{}', system_id={!r}, public_id={!r})"
return tpl.format(self.name, self.sysid, self.pubid)
class ExternalReferenceForbidden(DefusedXmlException):
"""Resolving an external reference is forbidden."""
def __init__(self, context, base, sysid, pubid):
super(ExternalReferenceForbidden, self).__init__()
self.context = context
self.base = base
self.sysid = sysid
self.pubid = pubid
def __str__(self):
tpl = "ExternalReferenceForbidden(system_id='{}', public_id={})"
return tpl.format(self.sysid, self.pubid)
| apache-2.0 | -6,438,153,656,640,151,000 | 4,239,525,627,431,640,000 | 37.75 | 114 | 0.586498 | false |
irvingprog/python-pilas-experimental | pilasengine/ejemplos/ejemplos_a_revisar/asteroides/escena_juego.py | 7 | 4423 | import pilas
import piedra_espacial
import random
class Estado:
"Representa una escena dentro del juego."
def actualizar(self):
raise Exception("Tienes que sobrescribir este metodo...")
class Jugando(Estado):
"Representa el estado de juego."
def __init__(self, juego, nivel):
self.nivel = nivel
self.juego = juego
self.juego.crear_naves(cantidad=nivel * 3)
# Cada un segundo le avisa al estado que cuente.
pilas.mundo.agregar_tarea(1, self.actualizar)
def actualizar(self):
if self.juego.ha_eliminado_todas_las_piedras():
self.juego.cambiar_estado(Iniciando(self.juego, self.nivel + 1))
return False
return True
class Iniciando(Estado):
"Estado que indica que el juego ha comenzado."
def __init__(self, juego, nivel):
self.texto = pilas.actores.Texto("Iniciando el nivel %d" %(nivel))
self.nivel = nivel
self.texto.color = pilas.colores.blanco
self.contador_de_segundos = 0
self.juego = juego
# Cada un segundo le avisa al estado que cuente.
pilas.mundo.agregar_tarea(1, self.actualizar)
def actualizar(self):
self.contador_de_segundos += 1
if self.contador_de_segundos > 2:
self.juego.cambiar_estado(Jugando(self.juego, self.nivel))
self.texto.eliminar()
return False
return True # para indicarle al contador que siga trabajado.
class PierdeVida(Estado):
def __init__(self, juego):
self.contador_de_segundos = 0
self.juego = juego
if self.juego.contador_de_vidas.le_quedan_vidas():
self.juego.contador_de_vidas.quitar_una_vida()
pilas.mundo.agregar_tarea(1, self.actualizar)
else:
juego.cambiar_estado(PierdeTodoElJuego(juego))
def actualizar(self):
self.contador_de_segundos += 1
if self.contador_de_segundos > 2:
self.juego.crear_nave()
return False
return True
class PierdeTodoElJuego(Estado):
def __init__(self, juego):
# Muestra el mensaje "has perdido"
mensaje = pilas.actores.Texto("Lo siento, has perdido!")
mensaje.color=pilas.colores.blanco
mensaje.abajo = 240
mensaje.abajo = [-20]
def actualizar(self):
pass
class Juego(pilas.escena.Base):
"Es la escena que permite controlar la nave y jugar"
def __init__(self):
pilas.escena.Base.__init__(self)
def iniciar(self):
pilas.fondos.Espacio()
self.pulsa_tecla_escape.conectar(self.cuando_pulsa_tecla_escape)
self.piedras = []
self.crear_nave()
self.crear_contador_de_vidas()
self.cambiar_estado(Iniciando(self, 1))
self.puntaje = pilas.actores.Puntaje(x=280, y=220, color=pilas.colores.blanco)
def cambiar_estado(self, estado):
self.estado = estado
def crear_nave(self):
nave = pilas.actores.Nave()
nave.aprender(pilas.habilidades.SeMantieneEnPantalla)
nave.definir_enemigos(self.piedras, self.cuando_explota_asterioide)
self.colisiones.agregar(nave, self.piedras, self.explotar_y_terminar)
def cuando_explota_asterioide(self):
self.puntaje.aumentar(1)
def crear_contador_de_vidas(self):
import contador_de_vidas
self.contador_de_vidas = contador_de_vidas.ContadorDeVidas(3)
def cuando_pulsa_tecla_escape(self, *k, **kv):
"Regresa al menu principal."
import escena_menu
pilas.cambiar_escena(escena_menu.EscenaMenu())
def explotar_y_terminar(self, nave, piedra):
"Responde a la colision entre la nave y una piedra."
nave.eliminar()
self.cambiar_estado(PierdeVida(self))
def crear_naves(self, cantidad):
"Genera una cantidad especifica de naves en el escenario."
fuera_de_la_pantalla = [-600, -650, -700, -750, -800]
tamanos = ['grande', 'media', 'chica']
for x in range(cantidad):
x = random.choice(fuera_de_la_pantalla)
y = random.choice(fuera_de_la_pantalla)
t = random.choice(tamanos)
piedra_nueva = piedra_espacial.PiedraEspacial(self.piedras, x=x, y=y, tamano=t)
self.piedras.append(piedra_nueva)
def ha_eliminado_todas_las_piedras(self):
return len(self.piedras) == 0
| lgpl-3.0 | -3,002,619,493,418,516,500 | -6,520,881,009,431,489,000 | 30.147887 | 91 | 0.628307 | false |
hbrunn/OCB | addons/web_analytics/__openerp__.py | 305 | 1432 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2010-2012 OpenERP s.a. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Google Analytics',
'version': '1.0',
'category': 'Tools',
'complexity': "easy",
'description': """
Google Analytics.
=================
Collects web application usage with Google Analytics.
""",
'author': 'OpenERP SA',
'website': 'https://www.odoo.com/page/website-builder',
'depends': ['web'],
'data': [
'views/web_analytics.xml',
],
'installable': True,
'active': False,
}
| agpl-3.0 | 4,614,989,328,097,445,000 | 5,791,783,318,096,118,000 | 33.926829 | 78 | 0.581006 | false |
ewindisch/nova | nova/tests/scheduler/test_caching_scheduler.py | 12 | 6554 | # Copyright (c) 2014 Rackspace Hosting
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from nova import exception
from nova.openstack.common import timeutils
from nova.scheduler import caching_scheduler
from nova.scheduler import host_manager
from nova.tests.scheduler import test_scheduler
ENABLE_PROFILER = False
class CachingSchedulerTestCase(test_scheduler.SchedulerTestCase):
"""Test case for Caching Scheduler."""
driver_cls = caching_scheduler.CachingScheduler
@mock.patch.object(caching_scheduler.CachingScheduler,
"_get_up_hosts")
def test_run_periodic_tasks_loads_hosts(self, mock_up_hosts):
mock_up_hosts.return_value = []
context = mock.Mock()
self.driver.run_periodic_tasks(context)
self.assertTrue(mock_up_hosts.called)
self.assertEqual([], self.driver.all_host_states)
context.elevated.assert_called_with()
@mock.patch.object(caching_scheduler.CachingScheduler,
"_get_up_hosts")
def test_get_all_host_states_returns_cached_value(self, mock_up_hosts):
self.driver.all_host_states = []
result = self.driver._get_all_host_states(self.context)
self.assertFalse(mock_up_hosts.called)
self.assertEqual([], self.driver.all_host_states)
@mock.patch.object(caching_scheduler.CachingScheduler,
"_get_up_hosts")
def test_get_all_host_states_loads_hosts(self, mock_up_hosts):
mock_up_hosts.return_value = ["asdf"]
result = self.driver._get_all_host_states(self.context)
self.assertTrue(mock_up_hosts.called)
self.assertEqual(["asdf"], self.driver.all_host_states)
self.assertEqual(["asdf"], result)
def test_get_up_hosts(self):
with mock.patch.object(self.driver.host_manager,
"get_all_host_states") as mock_get_hosts:
mock_get_hosts.return_value = ["asdf"]
result = self.driver._get_up_hosts(self.context)
self.assertTrue(mock_get_hosts.called)
self.assertEqual(mock_get_hosts.return_value, result)
def test_select_destination_raises_with_no_hosts(self):
fake_request_spec = self._get_fake_request_spec()
self.driver.all_host_states = []
self.assertRaises(exception.NoValidHost,
self.driver.select_destinations,
self.context, fake_request_spec, {})
def test_select_destination_works(self):
fake_request_spec = self._get_fake_request_spec()
fake_host = self._get_fake_host_state()
self.driver.all_host_states = [fake_host]
result = self._test_select_destinations(fake_request_spec)
self.assertEqual(1, len(result))
self.assertEqual(result[0]["host"], fake_host.host)
def _test_select_destinations(self, request_spec):
return self.driver.select_destinations(
self.context, request_spec, {})
def _get_fake_request_spec(self):
flavor = {
"flavorid": "small",
"memory_mb": 512,
"root_gb": 1,
"ephemeral_gb": 1,
"vcpus": 1,
}
instance_properties = {
"os_type": "linux",
"project_id": "1234",
"memory_mb": 512,
"root_gb": 1,
"ephemeral_gb": 1,
"vcpus": 1,
}
request_spec = {
"instance_type": flavor,
"instance_properties": instance_properties,
"num_instances": 1,
}
return request_spec
def _get_fake_host_state(self, index=0):
host_state = host_manager.HostState(
'host_%s' % index,
'node_%s' % index)
host_state.free_ram_mb = 50000
host_state.service = {
"disabled": False,
"updated_at": timeutils.utcnow(),
"created_at": timeutils.utcnow(),
}
return host_state
def test_performance_check_select_destination(self):
hosts = 2
requests = 1
self.flags(service_down_time=240)
request_spec = self._get_fake_request_spec()
host_states = []
for x in xrange(hosts):
host_state = self._get_fake_host_state(x)
host_states.append(host_state)
self.driver.all_host_states = host_states
def run_test():
a = timeutils.utcnow()
for x in xrange(requests):
self.driver.select_destinations(
self.context, request_spec, {})
b = timeutils.utcnow()
c = b - a
seconds = (c.days * 24 * 60 * 60 + c.seconds)
microseconds = seconds * 1000 + c.microseconds / 1000.0
per_request_ms = microseconds / requests
return per_request_ms
per_request_ms = None
if ENABLE_PROFILER:
import pycallgraph
from pycallgraph import output
config = pycallgraph.Config(max_depth=10)
config.trace_filter = pycallgraph.GlobbingFilter(exclude=[
'pycallgraph.*',
'unittest.*',
'nova.tests.*',
])
graphviz = output.GraphvizOutput(output_file='scheduler.png')
with pycallgraph.PyCallGraph(output=graphviz):
per_request_ms = run_test()
else:
per_request_ms = run_test()
# This has proved to be around 1 ms on a random dev box
# But this is here so you can do simply performance testing easily.
self.assertTrue(per_request_ms < 1000)
if __name__ == '__main__':
# A handy tool to help profile the schedulers performance
ENABLE_PROFILER = True
import unittest
suite = unittest.TestSuite()
test = "test_performance_check_select_destination"
test_case = CachingSchedulerTestCase(test)
suite.addTest(test_case)
runner = unittest.TextTestRunner()
runner.run(suite)
| apache-2.0 | 1,472,988,294,414,459,400 | 5,657,719,759,465,534,000 | 33.135417 | 78 | 0.603448 | false |
chenjiancan/rt-thread | bsp/tm4c129x/rtconfig.py | 21 | 2501 | # BSP Note: For TI EK-TM4C1294XL Tiva C Series Connected LancuhPad (REV D)
import os
# toolchains options
ARCH='arm'
CPU='cortex-m4'
CROSS_TOOL='keil'
if os.getenv('RTT_CC'):
CROSS_TOOL = os.getenv('RTT_CC')
#device options
PART_TYPE = 'PART_TM4C129XNCZAD'
# cross_tool provides the cross compiler
# EXEC_PATH is the compiler execute path, for example, CodeSourcery, Keil MDK, IAR
if CROSS_TOOL == 'gcc':
PLATFORM = 'gcc'
EXEC_PATH = r'D:\ArdaArmTools\Sourcery_Lite\bin'
elif CROSS_TOOL == 'keil':
PLATFORM = 'armcc'
EXEC_PATH = r'C:\Keil_v5'
elif CROSS_TOOL == 'iar':
print '================ERROR============================'
print 'Not support iar yet!'
print '================================================='
exit(0)
if os.getenv('RTT_EXEC_PATH'):
EXEC_PATH = os.getenv('RTT_EXEC_PATH')
#BUILD = 'debug'
BUILD = 'release'
if PLATFORM == 'gcc':
# tool-chains
PREFIX = 'arm-none-eabi-'
CC = PREFIX + 'gcc'
AS = PREFIX + 'gcc'
AR = PREFIX + 'ar'
LINK = PREFIX + 'gcc'
TARGET_EXT = 'axf'
SIZE = PREFIX + 'size'
OBJDUMP = PREFIX + 'objdump'
OBJCPY = PREFIX + 'objcopy'
DEVICE = ' -mcpu=cortex-m4 -mthumb -mfpu=fpv4-sp-d16 -mfloat-abi=softfp -ffunction-sections -fdata-sections'
CFLAGS = DEVICE + ' -std=c99 -Dgcc'
AFLAGS = ' -c' + DEVICE + ' -x assembler-with-cpp -Wa,-mimplicit-it=thumb '
LFLAGS = DEVICE + ' -Wl,--gc-sections,-Map=rtthread-tm4c129x.map,-cref,-u,Reset_Handler -T tm4c_rom.ld'
CPATH = ''
LPATH = ''
if BUILD == 'debug':
CFLAGS += ' -O0 -gdwarf-2 -g'
AFLAGS += ' -gdwarf-2'
else:
CFLAGS += ' -O2'
POST_ACTION = OBJCPY + ' -O binary $TARGET rtthread.bin\n' + SIZE + ' $TARGET \n'
elif PLATFORM == 'armcc':
# toolchains
CC = 'armcc'
AS = 'armasm'
AR = 'armar'
LINK = 'armlink'
TARGET_EXT = 'axf'
DEVICE = ' --cpu Cortex-M4.fp '
CFLAGS = '-c ' + DEVICE + ' --apcs=interwork --c99'
AFLAGS = DEVICE + ' --apcs=interwork '
LFLAGS = DEVICE + ' --scatter tm4c_rom.sct --info sizes --info totals --info unused --info veneers --list rtthread-tm4c129x.map --strict'
CFLAGS += ' -I' + EXEC_PATH + '/ARM/ARMCC/INC'
LFLAGS += ' --libpath ' + EXEC_PATH + '/ARM/ARMCC/LIB'
EXEC_PATH += '/arm/armcc/bin/'
if BUILD == 'debug':
CFLAGS += ' -g -O0'
AFLAGS += ' -g'
else:
CFLAGS += ' -O2'
POST_ACTION = 'fromelf --bin $TARGET --output rtthread.bin \nfromelf -z $TARGET'
| gpl-2.0 | 1,026,978,151,881,761,300 | -6,272,076,868,056,954,000 | 27.747126 | 141 | 0.570972 | false |
samanehsan/osf.io | scripts/tests/test_migrate_dates.py | 54 | 1512 | # -*- coding: utf-8 -*-
import datetime
from nose.tools import * # noqa
from scripts.osfstorage.utils import ensure_osf_files
from website import settings
ensure_osf_files(settings)
# Hack: Must configure add-ons before importing `OsfTestCase`
from website.addons.osfstorage.tests.factories import FileVersionFactory
from website.addons.osfstorage.model import OsfStorageFileRecord
from website.addons.osffiles.model import NodeFile
from tests.base import OsfTestCase
from tests.factories import ProjectFactory
from scripts.osfstorage.migrate_dates import main
class TestMigrateDates(OsfTestCase):
def setUp(self):
super(TestMigrateDates, self).setUp()
self.path = 'old-pizza'
self.project = ProjectFactory()
self.node_settings = self.project.get_addon('osfstorage')
self.node_file = NodeFile(path=self.path)
self.node_file.save()
self.node_file.reload()
self.date = self.node_file.date_modified
self.project.files_versions['old_pizza'] = [self.node_file._id]
self.project.save()
self.version = FileVersionFactory(date_modified=datetime.datetime.now())
self.record, _ = OsfStorageFileRecord.get_or_create(self.node_file.path, self.node_settings)
self.record.versions = [self.version]
self.record.save()
def test_migrate_dates(self):
assert_not_equal(self.version.date_modified, self.date)
main(dry_run=False)
assert_equal(self.version.date_created, self.date)
| apache-2.0 | -460,473,096,245,983,200 | 8,237,143,109,935,946,000 | 36.8 | 100 | 0.718254 | false |
jhamman/mtclim5 | mtclim/share.py | 1 | 3374 | '''
share.py
'''
# Mountain Climate Simulator, meteorological forcing disaggregator
# Copyright (C) 2015 Joe Hamman
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import numpy as np
constants = \
{'SECPERRAD': 13750.9871, # seconds per radian of hour angle
'RADPERDAY': 0.017214, # radians of Earth orbit per julian day
'RADPERDEG': 0.01745329, # radians per degree
'MINDECL': -0.4092797, # minimum declination (radians)
'DAYSOFF': 11.25, # julian day offset of winter solstice
'SRADDT': 30.0, # timestep for radiation routine (seconds)
# Note: Make sure that 3600 % SRADDT == 0
'MA': 28.9644e-3, # (kg mol-1) molecular weight of air
'R': 8.3143, # (m3 Pa mol-1 K-1) gas law constant
'G_STD': 9.80665, # (m s-2) standard gravitational accel.
'P_STD': 101325.0, # (Pa) standard pressure at 0. m elevation
'T_STD': 288.15, # (K) standard temp at 0. m elevation
'CP': 1010.0, # (J kg-1 K-1) specific heat of air
'LR_STD': 0.0065, # (-K m-1) standard temperature lapse rate
# optical airmass by degrees
'OPTAM': np.array([2.90, 3.05, 3.21, 3.39, 3.69, 3.82, 4.07, 4.37,
4.72, 5.12, 5.60, 6.18, 6.88, 7.77, 8.90, 10.39,
12.44, 15.36, 19.79, 26.96, 30.00]),
'KELVIN': 273.15,
'EPS': 0.62196351,
}
default_parameters = \
{'TDAYCOEF': 0.45, # (dim) daylight air temperature coefficient
# parameters for the snowpack algorithm
'SNOW_TCRIT': -6.0, # (deg C) critical temperature for snowmelt
'SNOW_TRATE': 0.042, # (cm/degC/day) snowmelt rate
# parameters for the radiation algorithm
# (dim) stands for dimensionless values
'TBASE': 0.870, # (dim) max inst. trans., 0m, nadir, dry atm
'ABASE': -6.1e-5, # (1/Pa) vapor pressure effect on
# transmittance
'C': 1.5, # (dim) radiation parameter
'B0': 0.031, # (dim) radiation parameter
'B1': 0.201, # (dim) radiation parameter
'B2': 0.185, # (dim) radiation parameter
'RAIN_SCALAR': 0.75, # (dim) correction to trans. for rain day
'DIF_ALB': 0.6, # (dim) diffuse albedo for horizon correction
'SC_INT': 1.32, # (MJ/m2/day) snow correction intercept
'SC_SLOPE': 0.096, # (MJ/m2/day/cm) snow correction slope
'site_elev': 0.,
'base_elev': 0.,
'tmax_lr': 0.0065,
'tmin_lr': 0.0065,
'site_isoh': None,
'base_isoh': None,
'site_lat': 0.,
'site_slope': 0.,
'site_aspect': 0.,
'site_east_horiz': 0.,
'site_west_horiz': 0.
}
default_options = \
{'SW_PREC_THRESH': 0.,
'VP_ITER': 'VP_ITER_ALWAYS',
'MTCLIM_SWE_CORR': False,
'LW_CLOUD': 'LW_CLOUD_DEARDORFF',
'LW_TYPE': 'LW_PRATA'}
| gpl-3.0 | -3,885,334,768,030,586,000 | 8,538,360,878,070,602,000 | 38.694118 | 71 | 0.613219 | false |
faun/django_test | tests/modeltests/aggregation/tests.py | 46 | 20519 | import datetime
from decimal import Decimal
from django.db.models import Avg, Sum, Count, Max, Min
from django.test import TestCase, Approximate
from models import Author, Publisher, Book, Store
class BaseAggregateTestCase(TestCase):
fixtures = ["initial_data.json"]
def test_empty_aggregate(self):
self.assertEqual(Author.objects.all().aggregate(), {})
def test_single_aggregate(self):
vals = Author.objects.aggregate(Avg("age"))
self.assertEqual(vals, {"age__avg": Approximate(37.4, places=1)})
def test_multiple_aggregates(self):
vals = Author.objects.aggregate(Sum("age"), Avg("age"))
self.assertEqual(vals, {"age__sum": 337, "age__avg": Approximate(37.4, places=1)})
def test_filter_aggregate(self):
vals = Author.objects.filter(age__gt=29).aggregate(Sum("age"))
self.assertEqual(len(vals), 1)
self.assertEqual(vals["age__sum"], 254)
def test_related_aggregate(self):
vals = Author.objects.aggregate(Avg("friends__age"))
self.assertEqual(len(vals), 1)
self.assertAlmostEqual(vals["friends__age__avg"], 34.07, places=2)
vals = Book.objects.filter(rating__lt=4.5).aggregate(Avg("authors__age"))
self.assertEqual(len(vals), 1)
self.assertAlmostEqual(vals["authors__age__avg"], 38.2857, places=2)
vals = Author.objects.all().filter(name__contains="a").aggregate(Avg("book__rating"))
self.assertEqual(len(vals), 1)
self.assertEqual(vals["book__rating__avg"], 4.0)
vals = Book.objects.aggregate(Sum("publisher__num_awards"))
self.assertEqual(len(vals), 1)
self.assertEquals(vals["publisher__num_awards__sum"], 30)
vals = Publisher.objects.aggregate(Sum("book__price"))
self.assertEqual(len(vals), 1)
self.assertEqual(vals["book__price__sum"], Decimal("270.27"))
def test_aggregate_multi_join(self):
vals = Store.objects.aggregate(Max("books__authors__age"))
self.assertEqual(len(vals), 1)
self.assertEqual(vals["books__authors__age__max"], 57)
vals = Author.objects.aggregate(Min("book__publisher__num_awards"))
self.assertEqual(len(vals), 1)
self.assertEqual(vals["book__publisher__num_awards__min"], 1)
def test_aggregate_alias(self):
vals = Store.objects.filter(name="Amazon.com").aggregate(amazon_mean=Avg("books__rating"))
self.assertEqual(len(vals), 1)
self.assertAlmostEqual(vals["amazon_mean"], 4.08, places=2)
def test_annotate_basic(self):
self.assertQuerysetEqual(
Book.objects.annotate().order_by('pk'), [
"The Definitive Guide to Django: Web Development Done Right",
"Sams Teach Yourself Django in 24 Hours",
"Practical Django Projects",
"Python Web Development with Django",
"Artificial Intelligence: A Modern Approach",
"Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp"
],
lambda b: b.name
)
books = Book.objects.annotate(mean_age=Avg("authors__age"))
b = books.get(pk=1)
self.assertEqual(
b.name,
u'The Definitive Guide to Django: Web Development Done Right'
)
self.assertEqual(b.mean_age, 34.5)
def test_annotate_m2m(self):
books = Book.objects.filter(rating__lt=4.5).annotate(Avg("authors__age")).order_by("name")
self.assertQuerysetEqual(
books, [
(u'Artificial Intelligence: A Modern Approach', 51.5),
(u'Practical Django Projects', 29.0),
(u'Python Web Development with Django', Approximate(30.3, places=1)),
(u'Sams Teach Yourself Django in 24 Hours', 45.0)
],
lambda b: (b.name, b.authors__age__avg),
)
books = Book.objects.annotate(num_authors=Count("authors")).order_by("name")
self.assertQuerysetEqual(
books, [
(u'Artificial Intelligence: A Modern Approach', 2),
(u'Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp', 1),
(u'Practical Django Projects', 1),
(u'Python Web Development with Django', 3),
(u'Sams Teach Yourself Django in 24 Hours', 1),
(u'The Definitive Guide to Django: Web Development Done Right', 2)
],
lambda b: (b.name, b.num_authors)
)
def test_backwards_m2m_annotate(self):
authors = Author.objects.filter(name__contains="a").annotate(Avg("book__rating")).order_by("name")
self.assertQuerysetEqual(
authors, [
(u'Adrian Holovaty', 4.5),
(u'Brad Dayley', 3.0),
(u'Jacob Kaplan-Moss', 4.5),
(u'James Bennett', 4.0),
(u'Paul Bissex', 4.0),
(u'Stuart Russell', 4.0)
],
lambda a: (a.name, a.book__rating__avg)
)
authors = Author.objects.annotate(num_books=Count("book")).order_by("name")
self.assertQuerysetEqual(
authors, [
(u'Adrian Holovaty', 1),
(u'Brad Dayley', 1),
(u'Jacob Kaplan-Moss', 1),
(u'James Bennett', 1),
(u'Jeffrey Forcier', 1),
(u'Paul Bissex', 1),
(u'Peter Norvig', 2),
(u'Stuart Russell', 1),
(u'Wesley J. Chun', 1)
],
lambda a: (a.name, a.num_books)
)
def test_reverse_fkey_annotate(self):
books = Book.objects.annotate(Sum("publisher__num_awards")).order_by("name")
self.assertQuerysetEqual(
books, [
(u'Artificial Intelligence: A Modern Approach', 7),
(u'Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp', 9),
(u'Practical Django Projects', 3),
(u'Python Web Development with Django', 7),
(u'Sams Teach Yourself Django in 24 Hours', 1),
(u'The Definitive Guide to Django: Web Development Done Right', 3)
],
lambda b: (b.name, b.publisher__num_awards__sum)
)
publishers = Publisher.objects.annotate(Sum("book__price")).order_by("name")
self.assertQuerysetEqual(
publishers, [
(u'Apress', Decimal("59.69")),
(u"Jonno's House of Books", None),
(u'Morgan Kaufmann', Decimal("75.00")),
(u'Prentice Hall', Decimal("112.49")),
(u'Sams', Decimal("23.09"))
],
lambda p: (p.name, p.book__price__sum)
)
def test_annotate_values(self):
books = list(Book.objects.filter(pk=1).annotate(mean_age=Avg("authors__age")).values())
self.assertEqual(
books, [
{
"contact_id": 1,
"id": 1,
"isbn": "159059725",
"mean_age": 34.5,
"name": "The Definitive Guide to Django: Web Development Done Right",
"pages": 447,
"price": Approximate(Decimal("30")),
"pubdate": datetime.date(2007, 12, 6),
"publisher_id": 1,
"rating": 4.5,
}
]
)
books = Book.objects.filter(pk=1).annotate(mean_age=Avg('authors__age')).values('pk', 'isbn', 'mean_age')
self.assertEqual(
list(books), [
{
"pk": 1,
"isbn": "159059725",
"mean_age": 34.5,
}
]
)
books = Book.objects.filter(pk=1).annotate(mean_age=Avg("authors__age")).values("name")
self.assertEqual(
list(books), [
{
"name": "The Definitive Guide to Django: Web Development Done Right"
}
]
)
books = Book.objects.filter(pk=1).values().annotate(mean_age=Avg('authors__age'))
self.assertEqual(
list(books), [
{
"contact_id": 1,
"id": 1,
"isbn": "159059725",
"mean_age": 34.5,
"name": "The Definitive Guide to Django: Web Development Done Right",
"pages": 447,
"price": Approximate(Decimal("30")),
"pubdate": datetime.date(2007, 12, 6),
"publisher_id": 1,
"rating": 4.5,
}
]
)
books = Book.objects.values("rating").annotate(n_authors=Count("authors__id"), mean_age=Avg("authors__age")).order_by("rating")
self.assertEqual(
list(books), [
{
"rating": 3.0,
"n_authors": 1,
"mean_age": 45.0,
},
{
"rating": 4.0,
"n_authors": 6,
"mean_age": Approximate(37.16, places=1)
},
{
"rating": 4.5,
"n_authors": 2,
"mean_age": 34.5,
},
{
"rating": 5.0,
"n_authors": 1,
"mean_age": 57.0,
}
]
)
authors = Author.objects.annotate(Avg("friends__age")).order_by("name")
self.assertEqual(len(authors), 9)
self.assertQuerysetEqual(
authors, [
(u'Adrian Holovaty', 32.0),
(u'Brad Dayley', None),
(u'Jacob Kaplan-Moss', 29.5),
(u'James Bennett', 34.0),
(u'Jeffrey Forcier', 27.0),
(u'Paul Bissex', 31.0),
(u'Peter Norvig', 46.0),
(u'Stuart Russell', 57.0),
(u'Wesley J. Chun', Approximate(33.66, places=1))
],
lambda a: (a.name, a.friends__age__avg)
)
def test_count(self):
vals = Book.objects.aggregate(Count("rating"))
self.assertEqual(vals, {"rating__count": 6})
vals = Book.objects.aggregate(Count("rating", distinct=True))
self.assertEqual(vals, {"rating__count": 4})
def test_fkey_aggregate(self):
explicit = list(Author.objects.annotate(Count('book__id')))
implicit = list(Author.objects.annotate(Count('book')))
self.assertEqual(explicit, implicit)
def test_annotate_ordering(self):
books = Book.objects.values('rating').annotate(oldest=Max('authors__age')).order_by('oldest', 'rating')
self.assertEqual(
list(books), [
{
"rating": 4.5,
"oldest": 35,
},
{
"rating": 3.0,
"oldest": 45
},
{
"rating": 4.0,
"oldest": 57,
},
{
"rating": 5.0,
"oldest": 57,
}
]
)
books = Book.objects.values("rating").annotate(oldest=Max("authors__age")).order_by("-oldest", "-rating")
self.assertEqual(
list(books), [
{
"rating": 5.0,
"oldest": 57,
},
{
"rating": 4.0,
"oldest": 57,
},
{
"rating": 3.0,
"oldest": 45,
},
{
"rating": 4.5,
"oldest": 35,
}
]
)
def test_aggregate_annotation(self):
vals = Book.objects.annotate(num_authors=Count("authors__id")).aggregate(Avg("num_authors"))
self.assertEqual(vals, {"num_authors__avg": Approximate(1.66, places=1)})
def test_filtering(self):
p = Publisher.objects.create(name='Expensive Publisher', num_awards=0)
Book.objects.create(
name='ExpensiveBook1',
pages=1,
isbn='111',
rating=3.5,
price=Decimal("1000"),
publisher=p,
contact_id=1,
pubdate=datetime.date(2008,12,1)
)
Book.objects.create(
name='ExpensiveBook2',
pages=1,
isbn='222',
rating=4.0,
price=Decimal("1000"),
publisher=p,
contact_id=1,
pubdate=datetime.date(2008,12,2)
)
Book.objects.create(
name='ExpensiveBook3',
pages=1,
isbn='333',
rating=4.5,
price=Decimal("35"),
publisher=p,
contact_id=1,
pubdate=datetime.date(2008,12,3)
)
publishers = Publisher.objects.annotate(num_books=Count("book__id")).filter(num_books__gt=1).order_by("pk")
self.assertQuerysetEqual(
publishers, [
"Apress",
"Prentice Hall",
"Expensive Publisher",
],
lambda p: p.name,
)
publishers = Publisher.objects.filter(book__price__lt=Decimal("40.0")).order_by("pk")
self.assertQuerysetEqual(
publishers, [
"Apress",
"Apress",
"Sams",
"Prentice Hall",
"Expensive Publisher",
],
lambda p: p.name
)
publishers = Publisher.objects.annotate(num_books=Count("book__id")).filter(num_books__gt=1, book__price__lt=Decimal("40.0")).order_by("pk")
self.assertQuerysetEqual(
publishers, [
"Apress",
"Prentice Hall",
"Expensive Publisher",
],
lambda p: p.name,
)
publishers = Publisher.objects.filter(book__price__lt=Decimal("40.0")).annotate(num_books=Count("book__id")).filter(num_books__gt=1).order_by("pk")
self.assertQuerysetEqual(
publishers, [
"Apress",
],
lambda p: p.name
)
publishers = Publisher.objects.annotate(num_books=Count("book")).filter(num_books__range=[1, 3]).order_by("pk")
self.assertQuerysetEqual(
publishers, [
"Apress",
"Sams",
"Prentice Hall",
"Morgan Kaufmann",
"Expensive Publisher",
],
lambda p: p.name
)
publishers = Publisher.objects.annotate(num_books=Count("book")).filter(num_books__range=[1, 2]).order_by("pk")
self.assertQuerysetEqual(
publishers, [
"Apress",
"Sams",
"Prentice Hall",
"Morgan Kaufmann",
],
lambda p: p.name
)
publishers = Publisher.objects.annotate(num_books=Count("book")).filter(num_books__in=[1, 3]).order_by("pk")
self.assertQuerysetEqual(
publishers, [
"Sams",
"Morgan Kaufmann",
"Expensive Publisher",
],
lambda p: p.name,
)
publishers = Publisher.objects.annotate(num_books=Count("book")).filter(num_books__isnull=True)
self.assertEqual(len(publishers), 0)
def test_annotation(self):
vals = Author.objects.filter(pk=1).aggregate(Count("friends__id"))
self.assertEqual(vals, {"friends__id__count": 2})
books = Book.objects.annotate(num_authors=Count("authors__name")).filter(num_authors__ge=2).order_by("pk")
self.assertQuerysetEqual(
books, [
"The Definitive Guide to Django: Web Development Done Right",
"Artificial Intelligence: A Modern Approach",
],
lambda b: b.name
)
authors = Author.objects.annotate(num_friends=Count("friends__id", distinct=True)).filter(num_friends=0).order_by("pk")
self.assertQuerysetEqual(
authors, [
"Brad Dayley",
],
lambda a: a.name
)
publishers = Publisher.objects.annotate(num_books=Count("book__id")).filter(num_books__gt=1).order_by("pk")
self.assertQuerysetEqual(
publishers, [
"Apress",
"Prentice Hall",
],
lambda p: p.name
)
publishers = Publisher.objects.filter(book__price__lt=Decimal("40.0")).annotate(num_books=Count("book__id")).filter(num_books__gt=1)
self.assertQuerysetEqual(
publishers, [
"Apress",
],
lambda p: p.name
)
books = Book.objects.annotate(num_authors=Count("authors__id")).filter(authors__name__contains="Norvig", num_authors__gt=1)
self.assertQuerysetEqual(
books, [
"Artificial Intelligence: A Modern Approach",
],
lambda b: b.name
)
def test_more_aggregation(self):
a = Author.objects.get(name__contains='Norvig')
b = Book.objects.get(name__contains='Done Right')
b.authors.add(a)
b.save()
vals = Book.objects.annotate(num_authors=Count("authors__id")).filter(authors__name__contains="Norvig", num_authors__gt=1).aggregate(Avg("rating"))
self.assertEqual(vals, {"rating__avg": 4.25})
def test_even_more_aggregate(self):
publishers = Publisher.objects.annotate(earliest_book=Min("book__pubdate")).exclude(earliest_book=None).order_by("earliest_book").values()
self.assertEqual(
list(publishers), [
{
'earliest_book': datetime.date(1991, 10, 15),
'num_awards': 9,
'id': 4,
'name': u'Morgan Kaufmann'
},
{
'earliest_book': datetime.date(1995, 1, 15),
'num_awards': 7,
'id': 3,
'name': u'Prentice Hall'
},
{
'earliest_book': datetime.date(2007, 12, 6),
'num_awards': 3,
'id': 1,
'name': u'Apress'
},
{
'earliest_book': datetime.date(2008, 3, 3),
'num_awards': 1,
'id': 2,
'name': u'Sams'
}
]
)
vals = Store.objects.aggregate(Max("friday_night_closing"), Min("original_opening"))
self.assertEqual(
vals,
{
"friday_night_closing__max": datetime.time(23, 59, 59),
"original_opening__min": datetime.datetime(1945, 4, 25, 16, 24, 14),
}
)
def test_annotate_values_list(self):
books = Book.objects.filter(pk=1).annotate(mean_age=Avg("authors__age")).values_list("pk", "isbn", "mean_age")
self.assertEqual(
list(books), [
(1, "159059725", 34.5),
]
)
books = Book.objects.filter(pk=1).annotate(mean_age=Avg("authors__age")).values_list("isbn")
self.assertEqual(
list(books), [
('159059725',)
]
)
books = Book.objects.filter(pk=1).annotate(mean_age=Avg("authors__age")).values_list("mean_age")
self.assertEqual(
list(books), [
(34.5,)
]
)
books = Book.objects.filter(pk=1).annotate(mean_age=Avg("authors__age")).values_list("mean_age", flat=True)
self.assertEqual(list(books), [34.5])
books = Book.objects.values_list("price").annotate(count=Count("price")).order_by("-count", "price")
self.assertEqual(
list(books), [
(Decimal("29.69"), 2),
(Decimal('23.09'), 1),
(Decimal('30'), 1),
(Decimal('75'), 1),
(Decimal('82.8'), 1),
]
)
| bsd-3-clause | 262,900,161,690,314,460 | -1,583,607,673,331,599,600 | 35.316814 | 155 | 0.488377 | false |
puttarajubr/commcare-hq | custom/_legacy/mvp/management/commands/mvp_test_indicator.py | 4 | 1081 | import dateutil
from django.core.management.base import BaseCommand
from corehq.apps.indicators.models import DynamicIndicatorDefinition
from corehq.apps.users.models import CommCareUser
from dimagi.utils.dates import DateSpan
from mvp.models import MVP
class Command(BaseCommand):
help = "Returns the value of the indicator slug in the domain, given the parameters"
args = '<domain> <indicator> <startdate> <enddate>'
def handle(self, *args, **options):
startdate = dateutil.parser.parse(args[2])
enddate = dateutil.parser.parse(args[3])
self.datespan = DateSpan(startdate, enddate)
self.domain = args[0]
self.user_ids = [user.user_id for user in CommCareUser.by_domain(self.domain)]
self.get_indicator_response(args[1])
def get_indicator_response(self, indicator_slug):
indicator = DynamicIndicatorDefinition.get_current(MVP.NAMESPACE, self.domain, indicator_slug,
wrap_correctly=True)
result = indicator.get_value(self.user_ids, datespan=self.datespan)
print result
| bsd-3-clause | -3,473,477,899,405,745,000 | -6,977,854,546,690,844,000 | 42.24 | 102 | 0.716004 | false |
40223112/0623test1 | static/Brython3.1.1-20150328-091302/Lib/logging/config.py | 739 | 35619 | # Copyright 2001-2013 by Vinay Sajip. All Rights Reserved.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose and without fee is hereby granted,
# provided that the above copyright notice appear in all copies and that
# both that copyright notice and this permission notice appear in
# supporting documentation, and that the name of Vinay Sajip
# not be used in advertising or publicity pertaining to distribution
# of the software without specific, written prior permission.
# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""
Configuration functions for the logging package for Python. The core package
is based on PEP 282 and comments thereto in comp.lang.python, and influenced
by Apache's log4j system.
Copyright (C) 2001-2013 Vinay Sajip. All Rights Reserved.
To use, simply 'import logging' and log away!
"""
import sys, logging, logging.handlers, socket, struct, traceback, re
import io
try:
import _thread as thread
import threading
except ImportError: #pragma: no cover
thread = None
from socketserver import ThreadingTCPServer, StreamRequestHandler
DEFAULT_LOGGING_CONFIG_PORT = 9030
if sys.platform == "win32":
RESET_ERROR = 10054 #WSAECONNRESET
else:
RESET_ERROR = 104 #ECONNRESET
#
# The following code implements a socket listener for on-the-fly
# reconfiguration of logging.
#
# _listener holds the server object doing the listening
_listener = None
def fileConfig(fname, defaults=None, disable_existing_loggers=True):
"""
Read the logging configuration from a ConfigParser-format file.
This can be called several times from an application, allowing an end user
the ability to select from various pre-canned configurations (if the
developer provides a mechanism to present the choices and load the chosen
configuration).
"""
import configparser
cp = configparser.ConfigParser(defaults)
if hasattr(fname, 'readline'):
cp.read_file(fname)
else:
cp.read(fname)
formatters = _create_formatters(cp)
# critical section
logging._acquireLock()
try:
logging._handlers.clear()
del logging._handlerList[:]
# Handlers add themselves to logging._handlers
handlers = _install_handlers(cp, formatters)
_install_loggers(cp, handlers, disable_existing_loggers)
finally:
logging._releaseLock()
def _resolve(name):
"""Resolve a dotted name to a global object."""
name = name.split('.')
used = name.pop(0)
found = __import__(used)
for n in name:
used = used + '.' + n
try:
found = getattr(found, n)
except AttributeError:
__import__(used)
found = getattr(found, n)
return found
def _strip_spaces(alist):
return map(lambda x: x.strip(), alist)
def _create_formatters(cp):
"""Create and return formatters"""
flist = cp["formatters"]["keys"]
if not len(flist):
return {}
flist = flist.split(",")
flist = _strip_spaces(flist)
formatters = {}
for form in flist:
sectname = "formatter_%s" % form
fs = cp.get(sectname, "format", raw=True, fallback=None)
dfs = cp.get(sectname, "datefmt", raw=True, fallback=None)
c = logging.Formatter
class_name = cp[sectname].get("class")
if class_name:
c = _resolve(class_name)
f = c(fs, dfs)
formatters[form] = f
return formatters
def _install_handlers(cp, formatters):
"""Install and return handlers"""
hlist = cp["handlers"]["keys"]
if not len(hlist):
return {}
hlist = hlist.split(",")
hlist = _strip_spaces(hlist)
handlers = {}
fixups = [] #for inter-handler references
for hand in hlist:
section = cp["handler_%s" % hand]
klass = section["class"]
fmt = section.get("formatter", "")
try:
klass = eval(klass, vars(logging))
except (AttributeError, NameError):
klass = _resolve(klass)
args = section["args"]
args = eval(args, vars(logging))
h = klass(*args)
if "level" in section:
level = section["level"]
h.setLevel(logging._levelNames[level])
if len(fmt):
h.setFormatter(formatters[fmt])
if issubclass(klass, logging.handlers.MemoryHandler):
target = section.get("target", "")
if len(target): #the target handler may not be loaded yet, so keep for later...
fixups.append((h, target))
handlers[hand] = h
#now all handlers are loaded, fixup inter-handler references...
for h, t in fixups:
h.setTarget(handlers[t])
return handlers
def _handle_existing_loggers(existing, child_loggers, disable_existing):
"""
When (re)configuring logging, handle loggers which were in the previous
configuration but are not in the new configuration. There's no point
deleting them as other threads may continue to hold references to them;
and by disabling them, you stop them doing any logging.
However, don't disable children of named loggers, as that's probably not
what was intended by the user. Also, allow existing loggers to NOT be
disabled if disable_existing is false.
"""
root = logging.root
for log in existing:
logger = root.manager.loggerDict[log]
if log in child_loggers:
logger.level = logging.NOTSET
logger.handlers = []
logger.propagate = True
else:
logger.disabled = disable_existing
def _install_loggers(cp, handlers, disable_existing):
"""Create and install loggers"""
# configure the root first
llist = cp["loggers"]["keys"]
llist = llist.split(",")
llist = list(map(lambda x: x.strip(), llist))
llist.remove("root")
section = cp["logger_root"]
root = logging.root
log = root
if "level" in section:
level = section["level"]
log.setLevel(logging._levelNames[level])
for h in root.handlers[:]:
root.removeHandler(h)
hlist = section["handlers"]
if len(hlist):
hlist = hlist.split(",")
hlist = _strip_spaces(hlist)
for hand in hlist:
log.addHandler(handlers[hand])
#and now the others...
#we don't want to lose the existing loggers,
#since other threads may have pointers to them.
#existing is set to contain all existing loggers,
#and as we go through the new configuration we
#remove any which are configured. At the end,
#what's left in existing is the set of loggers
#which were in the previous configuration but
#which are not in the new configuration.
existing = list(root.manager.loggerDict.keys())
#The list needs to be sorted so that we can
#avoid disabling child loggers of explicitly
#named loggers. With a sorted list it is easier
#to find the child loggers.
existing.sort()
#We'll keep the list of existing loggers
#which are children of named loggers here...
child_loggers = []
#now set up the new ones...
for log in llist:
section = cp["logger_%s" % log]
qn = section["qualname"]
propagate = section.getint("propagate", fallback=1)
logger = logging.getLogger(qn)
if qn in existing:
i = existing.index(qn) + 1 # start with the entry after qn
prefixed = qn + "."
pflen = len(prefixed)
num_existing = len(existing)
while i < num_existing:
if existing[i][:pflen] == prefixed:
child_loggers.append(existing[i])
i += 1
existing.remove(qn)
if "level" in section:
level = section["level"]
logger.setLevel(logging._levelNames[level])
for h in logger.handlers[:]:
logger.removeHandler(h)
logger.propagate = propagate
logger.disabled = 0
hlist = section["handlers"]
if len(hlist):
hlist = hlist.split(",")
hlist = _strip_spaces(hlist)
for hand in hlist:
logger.addHandler(handlers[hand])
#Disable any old loggers. There's no point deleting
#them as other threads may continue to hold references
#and by disabling them, you stop them doing any logging.
#However, don't disable children of named loggers, as that's
#probably not what was intended by the user.
#for log in existing:
# logger = root.manager.loggerDict[log]
# if log in child_loggers:
# logger.level = logging.NOTSET
# logger.handlers = []
# logger.propagate = 1
# elif disable_existing_loggers:
# logger.disabled = 1
_handle_existing_loggers(existing, child_loggers, disable_existing)
IDENTIFIER = re.compile('^[a-z_][a-z0-9_]*$', re.I)
def valid_ident(s):
m = IDENTIFIER.match(s)
if not m:
raise ValueError('Not a valid Python identifier: %r' % s)
return True
# The ConvertingXXX classes are wrappers around standard Python containers,
# and they serve to convert any suitable values in the container. The
# conversion converts base dicts, lists and tuples to their wrapped
# equivalents, whereas strings which match a conversion format are converted
# appropriately.
#
# Each wrapper should have a configurator attribute holding the actual
# configurator to use for conversion.
class ConvertingDict(dict):
"""A converting dictionary wrapper."""
def __getitem__(self, key):
value = dict.__getitem__(self, key)
result = self.configurator.convert(value)
#If the converted value is different, save for next time
if value is not result:
self[key] = result
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
result.key = key
return result
def get(self, key, default=None):
value = dict.get(self, key, default)
result = self.configurator.convert(value)
#If the converted value is different, save for next time
if value is not result:
self[key] = result
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
result.key = key
return result
def pop(self, key, default=None):
value = dict.pop(self, key, default)
result = self.configurator.convert(value)
if value is not result:
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
result.key = key
return result
class ConvertingList(list):
"""A converting list wrapper."""
def __getitem__(self, key):
value = list.__getitem__(self, key)
result = self.configurator.convert(value)
#If the converted value is different, save for next time
if value is not result:
self[key] = result
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
result.key = key
return result
def pop(self, idx=-1):
value = list.pop(self, idx)
result = self.configurator.convert(value)
if value is not result:
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
return result
class ConvertingTuple(tuple):
"""A converting tuple wrapper."""
def __getitem__(self, key):
value = tuple.__getitem__(self, key)
result = self.configurator.convert(value)
if value is not result:
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
result.key = key
return result
class BaseConfigurator(object):
"""
The configurator base class which defines some useful defaults.
"""
CONVERT_PATTERN = re.compile(r'^(?P<prefix>[a-z]+)://(?P<suffix>.*)$')
WORD_PATTERN = re.compile(r'^\s*(\w+)\s*')
DOT_PATTERN = re.compile(r'^\.\s*(\w+)\s*')
INDEX_PATTERN = re.compile(r'^\[\s*(\w+)\s*\]\s*')
DIGIT_PATTERN = re.compile(r'^\d+$')
value_converters = {
'ext' : 'ext_convert',
'cfg' : 'cfg_convert',
}
# We might want to use a different one, e.g. importlib
importer = staticmethod(__import__)
def __init__(self, config):
self.config = ConvertingDict(config)
self.config.configurator = self
def resolve(self, s):
"""
Resolve strings to objects using standard import and attribute
syntax.
"""
name = s.split('.')
used = name.pop(0)
try:
found = self.importer(used)
for frag in name:
used += '.' + frag
try:
found = getattr(found, frag)
except AttributeError:
self.importer(used)
found = getattr(found, frag)
return found
except ImportError:
e, tb = sys.exc_info()[1:]
v = ValueError('Cannot resolve %r: %s' % (s, e))
v.__cause__, v.__traceback__ = e, tb
raise v
def ext_convert(self, value):
"""Default converter for the ext:// protocol."""
return self.resolve(value)
def cfg_convert(self, value):
"""Default converter for the cfg:// protocol."""
rest = value
m = self.WORD_PATTERN.match(rest)
if m is None:
raise ValueError("Unable to convert %r" % value)
else:
rest = rest[m.end():]
d = self.config[m.groups()[0]]
#print d, rest
while rest:
m = self.DOT_PATTERN.match(rest)
if m:
d = d[m.groups()[0]]
else:
m = self.INDEX_PATTERN.match(rest)
if m:
idx = m.groups()[0]
if not self.DIGIT_PATTERN.match(idx):
d = d[idx]
else:
try:
n = int(idx) # try as number first (most likely)
d = d[n]
except TypeError:
d = d[idx]
if m:
rest = rest[m.end():]
else:
raise ValueError('Unable to convert '
'%r at %r' % (value, rest))
#rest should be empty
return d
def convert(self, value):
"""
Convert values to an appropriate type. dicts, lists and tuples are
replaced by their converting alternatives. Strings are checked to
see if they have a conversion format and are converted if they do.
"""
if not isinstance(value, ConvertingDict) and isinstance(value, dict):
value = ConvertingDict(value)
value.configurator = self
elif not isinstance(value, ConvertingList) and isinstance(value, list):
value = ConvertingList(value)
value.configurator = self
elif not isinstance(value, ConvertingTuple) and\
isinstance(value, tuple):
value = ConvertingTuple(value)
value.configurator = self
elif isinstance(value, str): # str for py3k
m = self.CONVERT_PATTERN.match(value)
if m:
d = m.groupdict()
prefix = d['prefix']
converter = self.value_converters.get(prefix, None)
if converter:
suffix = d['suffix']
converter = getattr(self, converter)
value = converter(suffix)
return value
def configure_custom(self, config):
"""Configure an object with a user-supplied factory."""
c = config.pop('()')
if not callable(c):
c = self.resolve(c)
props = config.pop('.', None)
# Check for valid identifiers
kwargs = dict([(k, config[k]) for k in config if valid_ident(k)])
result = c(**kwargs)
if props:
for name, value in props.items():
setattr(result, name, value)
return result
def as_tuple(self, value):
"""Utility function which converts lists to tuples."""
if isinstance(value, list):
value = tuple(value)
return value
class DictConfigurator(BaseConfigurator):
"""
Configure logging using a dictionary-like object to describe the
configuration.
"""
def configure(self):
"""Do the configuration."""
config = self.config
if 'version' not in config:
raise ValueError("dictionary doesn't specify a version")
if config['version'] != 1:
raise ValueError("Unsupported version: %s" % config['version'])
incremental = config.pop('incremental', False)
EMPTY_DICT = {}
logging._acquireLock()
try:
if incremental:
handlers = config.get('handlers', EMPTY_DICT)
for name in handlers:
if name not in logging._handlers:
raise ValueError('No handler found with '
'name %r' % name)
else:
try:
handler = logging._handlers[name]
handler_config = handlers[name]
level = handler_config.get('level', None)
if level:
handler.setLevel(logging._checkLevel(level))
except Exception as e:
raise ValueError('Unable to configure handler '
'%r: %s' % (name, e))
loggers = config.get('loggers', EMPTY_DICT)
for name in loggers:
try:
self.configure_logger(name, loggers[name], True)
except Exception as e:
raise ValueError('Unable to configure logger '
'%r: %s' % (name, e))
root = config.get('root', None)
if root:
try:
self.configure_root(root, True)
except Exception as e:
raise ValueError('Unable to configure root '
'logger: %s' % e)
else:
disable_existing = config.pop('disable_existing_loggers', True)
logging._handlers.clear()
del logging._handlerList[:]
# Do formatters first - they don't refer to anything else
formatters = config.get('formatters', EMPTY_DICT)
for name in formatters:
try:
formatters[name] = self.configure_formatter(
formatters[name])
except Exception as e:
raise ValueError('Unable to configure '
'formatter %r: %s' % (name, e))
# Next, do filters - they don't refer to anything else, either
filters = config.get('filters', EMPTY_DICT)
for name in filters:
try:
filters[name] = self.configure_filter(filters[name])
except Exception as e:
raise ValueError('Unable to configure '
'filter %r: %s' % (name, e))
# Next, do handlers - they refer to formatters and filters
# As handlers can refer to other handlers, sort the keys
# to allow a deterministic order of configuration
handlers = config.get('handlers', EMPTY_DICT)
deferred = []
for name in sorted(handlers):
try:
handler = self.configure_handler(handlers[name])
handler.name = name
handlers[name] = handler
except Exception as e:
if 'target not configured yet' in str(e):
deferred.append(name)
else:
raise ValueError('Unable to configure handler '
'%r: %s' % (name, e))
# Now do any that were deferred
for name in deferred:
try:
handler = self.configure_handler(handlers[name])
handler.name = name
handlers[name] = handler
except Exception as e:
raise ValueError('Unable to configure handler '
'%r: %s' % (name, e))
# Next, do loggers - they refer to handlers and filters
#we don't want to lose the existing loggers,
#since other threads may have pointers to them.
#existing is set to contain all existing loggers,
#and as we go through the new configuration we
#remove any which are configured. At the end,
#what's left in existing is the set of loggers
#which were in the previous configuration but
#which are not in the new configuration.
root = logging.root
existing = list(root.manager.loggerDict.keys())
#The list needs to be sorted so that we can
#avoid disabling child loggers of explicitly
#named loggers. With a sorted list it is easier
#to find the child loggers.
existing.sort()
#We'll keep the list of existing loggers
#which are children of named loggers here...
child_loggers = []
#now set up the new ones...
loggers = config.get('loggers', EMPTY_DICT)
for name in loggers:
if name in existing:
i = existing.index(name) + 1 # look after name
prefixed = name + "."
pflen = len(prefixed)
num_existing = len(existing)
while i < num_existing:
if existing[i][:pflen] == prefixed:
child_loggers.append(existing[i])
i += 1
existing.remove(name)
try:
self.configure_logger(name, loggers[name])
except Exception as e:
raise ValueError('Unable to configure logger '
'%r: %s' % (name, e))
#Disable any old loggers. There's no point deleting
#them as other threads may continue to hold references
#and by disabling them, you stop them doing any logging.
#However, don't disable children of named loggers, as that's
#probably not what was intended by the user.
#for log in existing:
# logger = root.manager.loggerDict[log]
# if log in child_loggers:
# logger.level = logging.NOTSET
# logger.handlers = []
# logger.propagate = True
# elif disable_existing:
# logger.disabled = True
_handle_existing_loggers(existing, child_loggers,
disable_existing)
# And finally, do the root logger
root = config.get('root', None)
if root:
try:
self.configure_root(root)
except Exception as e:
raise ValueError('Unable to configure root '
'logger: %s' % e)
finally:
logging._releaseLock()
def configure_formatter(self, config):
"""Configure a formatter from a dictionary."""
if '()' in config:
factory = config['()'] # for use in exception handler
try:
result = self.configure_custom(config)
except TypeError as te:
if "'format'" not in str(te):
raise
#Name of parameter changed from fmt to format.
#Retry with old name.
#This is so that code can be used with older Python versions
#(e.g. by Django)
config['fmt'] = config.pop('format')
config['()'] = factory
result = self.configure_custom(config)
else:
fmt = config.get('format', None)
dfmt = config.get('datefmt', None)
style = config.get('style', '%')
result = logging.Formatter(fmt, dfmt, style)
return result
def configure_filter(self, config):
"""Configure a filter from a dictionary."""
if '()' in config:
result = self.configure_custom(config)
else:
name = config.get('name', '')
result = logging.Filter(name)
return result
def add_filters(self, filterer, filters):
"""Add filters to a filterer from a list of names."""
for f in filters:
try:
filterer.addFilter(self.config['filters'][f])
except Exception as e:
raise ValueError('Unable to add filter %r: %s' % (f, e))
def configure_handler(self, config):
"""Configure a handler from a dictionary."""
config_copy = dict(config) # for restoring in case of error
formatter = config.pop('formatter', None)
if formatter:
try:
formatter = self.config['formatters'][formatter]
except Exception as e:
raise ValueError('Unable to set formatter '
'%r: %s' % (formatter, e))
level = config.pop('level', None)
filters = config.pop('filters', None)
if '()' in config:
c = config.pop('()')
if not callable(c):
c = self.resolve(c)
factory = c
else:
cname = config.pop('class')
klass = self.resolve(cname)
#Special case for handler which refers to another handler
if issubclass(klass, logging.handlers.MemoryHandler) and\
'target' in config:
try:
th = self.config['handlers'][config['target']]
if not isinstance(th, logging.Handler):
config.update(config_copy) # restore for deferred cfg
raise TypeError('target not configured yet')
config['target'] = th
except Exception as e:
raise ValueError('Unable to set target handler '
'%r: %s' % (config['target'], e))
elif issubclass(klass, logging.handlers.SMTPHandler) and\
'mailhost' in config:
config['mailhost'] = self.as_tuple(config['mailhost'])
elif issubclass(klass, logging.handlers.SysLogHandler) and\
'address' in config:
config['address'] = self.as_tuple(config['address'])
factory = klass
kwargs = dict([(k, config[k]) for k in config if valid_ident(k)])
try:
result = factory(**kwargs)
except TypeError as te:
if "'stream'" not in str(te):
raise
#The argument name changed from strm to stream
#Retry with old name.
#This is so that code can be used with older Python versions
#(e.g. by Django)
kwargs['strm'] = kwargs.pop('stream')
result = factory(**kwargs)
if formatter:
result.setFormatter(formatter)
if level is not None:
result.setLevel(logging._checkLevel(level))
if filters:
self.add_filters(result, filters)
return result
def add_handlers(self, logger, handlers):
"""Add handlers to a logger from a list of names."""
for h in handlers:
try:
logger.addHandler(self.config['handlers'][h])
except Exception as e:
raise ValueError('Unable to add handler %r: %s' % (h, e))
def common_logger_config(self, logger, config, incremental=False):
"""
Perform configuration which is common to root and non-root loggers.
"""
level = config.get('level', None)
if level is not None:
logger.setLevel(logging._checkLevel(level))
if not incremental:
#Remove any existing handlers
for h in logger.handlers[:]:
logger.removeHandler(h)
handlers = config.get('handlers', None)
if handlers:
self.add_handlers(logger, handlers)
filters = config.get('filters', None)
if filters:
self.add_filters(logger, filters)
def configure_logger(self, name, config, incremental=False):
"""Configure a non-root logger from a dictionary."""
logger = logging.getLogger(name)
self.common_logger_config(logger, config, incremental)
propagate = config.get('propagate', None)
if propagate is not None:
logger.propagate = propagate
def configure_root(self, config, incremental=False):
"""Configure a root logger from a dictionary."""
root = logging.getLogger()
self.common_logger_config(root, config, incremental)
dictConfigClass = DictConfigurator
def dictConfig(config):
"""Configure logging using a dictionary."""
dictConfigClass(config).configure()
def listen(port=DEFAULT_LOGGING_CONFIG_PORT):
"""
Start up a socket server on the specified port, and listen for new
configurations.
These will be sent as a file suitable for processing by fileConfig().
Returns a Thread object on which you can call start() to start the server,
and which you can join() when appropriate. To stop the server, call
stopListening().
"""
if not thread: #pragma: no cover
raise NotImplementedError("listen() needs threading to work")
class ConfigStreamHandler(StreamRequestHandler):
"""
Handler for a logging configuration request.
It expects a completely new logging configuration and uses fileConfig
to install it.
"""
def handle(self):
"""
Handle a request.
Each request is expected to be a 4-byte length, packed using
struct.pack(">L", n), followed by the config file.
Uses fileConfig() to do the grunt work.
"""
try:
conn = self.connection
chunk = conn.recv(4)
if len(chunk) == 4:
slen = struct.unpack(">L", chunk)[0]
chunk = self.connection.recv(slen)
while len(chunk) < slen:
chunk = chunk + conn.recv(slen - len(chunk))
chunk = chunk.decode("utf-8")
try:
import json
d =json.loads(chunk)
assert isinstance(d, dict)
dictConfig(d)
except:
#Apply new configuration.
file = io.StringIO(chunk)
try:
fileConfig(file)
except (KeyboardInterrupt, SystemExit): #pragma: no cover
raise
except:
traceback.print_exc()
if self.server.ready:
self.server.ready.set()
except socket.error as e:
if not isinstance(e.args, tuple):
raise
else:
errcode = e.args[0]
if errcode != RESET_ERROR:
raise
class ConfigSocketReceiver(ThreadingTCPServer):
"""
A simple TCP socket-based logging config receiver.
"""
allow_reuse_address = 1
def __init__(self, host='localhost', port=DEFAULT_LOGGING_CONFIG_PORT,
handler=None, ready=None):
ThreadingTCPServer.__init__(self, (host, port), handler)
logging._acquireLock()
self.abort = 0
logging._releaseLock()
self.timeout = 1
self.ready = ready
def serve_until_stopped(self):
import select
abort = 0
while not abort:
rd, wr, ex = select.select([self.socket.fileno()],
[], [],
self.timeout)
if rd:
self.handle_request()
logging._acquireLock()
abort = self.abort
logging._releaseLock()
self.socket.close()
class Server(threading.Thread):
def __init__(self, rcvr, hdlr, port):
super(Server, self).__init__()
self.rcvr = rcvr
self.hdlr = hdlr
self.port = port
self.ready = threading.Event()
def run(self):
server = self.rcvr(port=self.port, handler=self.hdlr,
ready=self.ready)
if self.port == 0:
self.port = server.server_address[1]
self.ready.set()
global _listener
logging._acquireLock()
_listener = server
logging._releaseLock()
server.serve_until_stopped()
return Server(ConfigSocketReceiver, ConfigStreamHandler, port)
def stopListening():
"""
Stop the listening server which was created with a call to listen().
"""
global _listener
logging._acquireLock()
try:
if _listener:
_listener.abort = 1
_listener = None
finally:
logging._releaseLock()
| gpl-3.0 | 3,095,500,722,424,637,000 | 336,144,802,424,022,400 | 37.548701 | 91 | 0.538533 | false |
CiscoSystems/avos | openstack_dashboard/dashboards/admin/flavors/panel.py | 46 | 1059 | # Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.utils.translation import ugettext_lazy as _
import horizon
from openstack_dashboard.dashboards.admin import dashboard
class Flavors(horizon.Panel):
name = _("Flavors")
slug = 'flavors'
permissions = ('openstack.services.compute',)
dashboard.Admin.register(Flavors)
| apache-2.0 | 876,417,983,764,858,100 | -6,944,001,215,828,772,000 | 32.09375 | 78 | 0.744098 | false |
pierreg/tensorflow | tensorflow/tools/test/system_info.py | 31 | 1089 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Library for getting system information during TensorFlow tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.tools.test import system_info_lib
def main(unused_args):
config = system_info_lib.gather_machine_configuration()
print(config)
if __name__ == "__main__":
tf.app.run()
| apache-2.0 | -7,330,884,684,648,143,000 | 7,011,301,009,950,257,000 | 32 | 80 | 0.695133 | false |
impromptuartist/impromptuartist.github.io | node_modules/pygmentize-bundled/vendor/pygments/pygments/formatters/bbcode.py | 362 | 3314 | # -*- coding: utf-8 -*-
"""
pygments.formatters.bbcode
~~~~~~~~~~~~~~~~~~~~~~~~~~
BBcode formatter.
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.formatter import Formatter
from pygments.util import get_bool_opt
__all__ = ['BBCodeFormatter']
class BBCodeFormatter(Formatter):
"""
Format tokens with BBcodes. These formatting codes are used by many
bulletin boards, so you can highlight your sourcecode with pygments before
posting it there.
This formatter has no support for background colors and borders, as there
are no common BBcode tags for that.
Some board systems (e.g. phpBB) don't support colors in their [code] tag,
so you can't use the highlighting together with that tag.
Text in a [code] tag usually is shown with a monospace font (which this
formatter can do with the ``monofont`` option) and no spaces (which you
need for indentation) are removed.
Additional options accepted:
`style`
The style to use, can be a string or a Style subclass (default:
``'default'``).
`codetag`
If set to true, put the output into ``[code]`` tags (default:
``false``)
`monofont`
If set to true, add a tag to show the code with a monospace font
(default: ``false``).
"""
name = 'BBCode'
aliases = ['bbcode', 'bb']
filenames = []
def __init__(self, **options):
Formatter.__init__(self, **options)
self._code = get_bool_opt(options, 'codetag', False)
self._mono = get_bool_opt(options, 'monofont', False)
self.styles = {}
self._make_styles()
def _make_styles(self):
for ttype, ndef in self.style:
start = end = ''
if ndef['color']:
start += '[color=#%s]' % ndef['color']
end = '[/color]' + end
if ndef['bold']:
start += '[b]'
end = '[/b]' + end
if ndef['italic']:
start += '[i]'
end = '[/i]' + end
if ndef['underline']:
start += '[u]'
end = '[/u]' + end
# there are no common BBcodes for background-color and border
self.styles[ttype] = start, end
def format_unencoded(self, tokensource, outfile):
if self._code:
outfile.write('[code]')
if self._mono:
outfile.write('[font=monospace]')
lastval = ''
lasttype = None
for ttype, value in tokensource:
while ttype not in self.styles:
ttype = ttype.parent
if ttype == lasttype:
lastval += value
else:
if lastval:
start, end = self.styles[lasttype]
outfile.write(''.join((start, lastval, end)))
lastval = value
lasttype = ttype
if lastval:
start, end = self.styles[lasttype]
outfile.write(''.join((start, lastval, end)))
if self._mono:
outfile.write('[/font]')
if self._code:
outfile.write('[/code]')
if self._code or self._mono:
outfile.write('\n')
| mit | 3,350,044,071,107,625,500 | -943,289,500,574,051,200 | 29.40367 | 78 | 0.539529 | false |
midma101/AndIWasJustGoingToBed | .venv/lib/python2.7/site-packages/pip/_vendor/html5lib/filters/lint.py | 500 | 4208 | from __future__ import absolute_import, division, unicode_literals
from . import _base
from ..constants import cdataElements, rcdataElements, voidElements
from ..constants import spaceCharacters
spaceCharacters = "".join(spaceCharacters)
class LintError(Exception):
pass
class Filter(_base.Filter):
def __iter__(self):
open_elements = []
contentModelFlag = "PCDATA"
for token in _base.Filter.__iter__(self):
type = token["type"]
if type in ("StartTag", "EmptyTag"):
name = token["name"]
if contentModelFlag != "PCDATA":
raise LintError("StartTag not in PCDATA content model flag: %(tag)s" % {"tag": name})
if not isinstance(name, str):
raise LintError("Tag name is not a string: %(tag)r" % {"tag": name})
if not name:
raise LintError("Empty tag name")
if type == "StartTag" and name in voidElements:
raise LintError("Void element reported as StartTag token: %(tag)s" % {"tag": name})
elif type == "EmptyTag" and name not in voidElements:
raise LintError("Non-void element reported as EmptyTag token: %(tag)s" % {"tag": token["name"]})
if type == "StartTag":
open_elements.append(name)
for name, value in token["data"]:
if not isinstance(name, str):
raise LintError("Attribute name is not a string: %(name)r" % {"name": name})
if not name:
raise LintError("Empty attribute name")
if not isinstance(value, str):
raise LintError("Attribute value is not a string: %(value)r" % {"value": value})
if name in cdataElements:
contentModelFlag = "CDATA"
elif name in rcdataElements:
contentModelFlag = "RCDATA"
elif name == "plaintext":
contentModelFlag = "PLAINTEXT"
elif type == "EndTag":
name = token["name"]
if not isinstance(name, str):
raise LintError("Tag name is not a string: %(tag)r" % {"tag": name})
if not name:
raise LintError("Empty tag name")
if name in voidElements:
raise LintError("Void element reported as EndTag token: %(tag)s" % {"tag": name})
start_name = open_elements.pop()
if start_name != name:
raise LintError("EndTag (%(end)s) does not match StartTag (%(start)s)" % {"end": name, "start": start_name})
contentModelFlag = "PCDATA"
elif type == "Comment":
if contentModelFlag != "PCDATA":
raise LintError("Comment not in PCDATA content model flag")
elif type in ("Characters", "SpaceCharacters"):
data = token["data"]
if not isinstance(data, str):
raise LintError("Attribute name is not a string: %(name)r" % {"name": data})
if not data:
raise LintError("%(type)s token with empty data" % {"type": type})
if type == "SpaceCharacters":
data = data.strip(spaceCharacters)
if data:
raise LintError("Non-space character(s) found in SpaceCharacters token: %(token)r" % {"token": data})
elif type == "Doctype":
name = token["name"]
if contentModelFlag != "PCDATA":
raise LintError("Doctype not in PCDATA content model flag: %(name)s" % {"name": name})
if not isinstance(name, str):
raise LintError("Tag name is not a string: %(tag)r" % {"tag": name})
# XXX: what to do with token["data"] ?
elif type in ("ParseError", "SerializeError"):
pass
else:
raise LintError("Unknown token type: %(type)s" % {"type": type})
yield token
| mit | -1,576,753,389,763,667,200 | -7,644,251,769,488,170,000 | 45.755556 | 128 | 0.513308 | false |
ryanjmccall/nupic | examples/opf/experiments/multistep/base/description.py | 3 | 15899 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Template file used by the OPF Experiment Generator to generate the actual
description.py file by replacing $XXXXXXXX tokens with desired values.
This description.py file was generated by:
'~/nupic/eng/lib/python2.6/site-packages/nupic/frameworks/opf/expGenerator/ExpGenerator.py'
"""
from nupic.frameworks.opf.expdescriptionapi import ExperimentDescriptionAPI
from nupic.frameworks.opf.expdescriptionhelpers import (
updateConfigFromSubConfig,
applyValueGettersToContainer,
DeferredDictLookup)
from nupic.frameworks.opf.clamodelcallbacks import *
from nupic.frameworks.opf.metrics import MetricSpec
from nupic.frameworks.opf.opfutils import (InferenceType,
InferenceElement)
from nupic.support import aggregationDivide
from nupic.frameworks.opf.opftaskdriver import (
IterationPhaseSpecLearnOnly,
IterationPhaseSpecInferOnly,
IterationPhaseSpecLearnAndInfer)
# Model Configuration Dictionary:
#
# Define the model parameters and adjust for any modifications if imported
# from a sub-experiment.
#
# These fields might be modified by a sub-experiment; this dict is passed
# between the sub-experiment and base experiment
#
#
# NOTE: Use of DEFERRED VALUE-GETTERs: dictionary fields and list elements
# within the config dictionary may be assigned futures derived from the
# ValueGetterBase class, such as DeferredDictLookup.
# This facility is particularly handy for enabling substitution of values in
# the config dictionary from other values in the config dictionary, which is
# needed by permutation.py-based experiments. These values will be resolved
# during the call to applyValueGettersToContainer(),
# which we call after the base experiment's config dictionary is updated from
# the sub-experiment. See ValueGetterBase and
# DeferredDictLookup for more details about value-getters.
#
# For each custom encoder parameter to be exposed to the sub-experiment/
# permutation overrides, define a variable in this section, using key names
# beginning with a single underscore character to avoid collisions with
# pre-defined keys (e.g., _dsEncoderFieldName2_N).
#
# Example:
# config = dict(
# _dsEncoderFieldName2_N = 70,
# _dsEncoderFieldName2_W = 5,
# dsEncoderSchema = [
# base=dict(
# fieldname='Name2', type='ScalarEncoder',
# name='Name2', minval=0, maxval=270, clipInput=True,
# n=DeferredDictLookup('_dsEncoderFieldName2_N'),
# w=DeferredDictLookup('_dsEncoderFieldName2_W')),
# ],
# )
# updateConfigFromSubConfig(config)
# applyValueGettersToContainer(config)
config = {
# Type of model that the rest of these parameters apply to.
'model': "CLA",
# Version that specifies the format of the config.
'version': 1,
# Intermediate variables used to compute fields in modelParams and also
# referenced from the control section.
'aggregationInfo': { 'days': 0,
'fields': [],
'hours': 0,
'microseconds': 0,
'milliseconds': 0,
'minutes': 0,
'months': 0,
'seconds': 0,
'weeks': 0,
'years': 0},
'predictAheadTime': None,
# Model parameter dictionary.
'modelParams': {
# The type of inference that this model will perform
'inferenceType': 'TemporalMultiStep',
'sensorParams': {
# Sensor diagnostic output verbosity control;
# if > 0: sensor region will print out on screen what it's sensing
# at each step 0: silent; >=1: some info; >=2: more info;
# >=3: even more info (see compute() in py/regions/RecordSensor.py)
'verbosity' : 0,
# Example:
# dsEncoderSchema = [
# DeferredDictLookup('__field_name_encoder'),
# ],
#
# (value generated from DS_ENCODER_SCHEMA)
'encoders': { 'field1': { 'fieldname': u'field1',
'n': 100,
'name': u'field1',
'type': 'SDRCategoryEncoder',
'w': 21},
'field2': { 'clipInput': True,
'fieldname': u'field2',
'maxval': 50,
'minval': 0,
'n': 500,
'name': u'field2',
'type': 'ScalarEncoder',
'w': 21}},
# A dictionary specifying the period for automatically-generated
# resets from a RecordSensor;
#
# None = disable automatically-generated resets (also disabled if
# all of the specified values evaluate to 0).
# Valid keys is the desired combination of the following:
# days, hours, minutes, seconds, milliseconds, microseconds, weeks
#
# Example for 1.5 days: sensorAutoReset = dict(days=1,hours=12),
#
# (value generated from SENSOR_AUTO_RESET)
'sensorAutoReset' : None,
},
'spEnable': True,
'spParams': {
# SP diagnostic output verbosity control;
# 0: silent; >=1: some info; >=2: more info;
'spVerbosity' : 0,
'spatialImp' : 'oldpy',
'globalInhibition': 1,
# Number of cell columns in the cortical region (same number for
# SP and TP)
# (see also tpNCellsPerCol)
'columnCount': 2048,
'inputWidth': 0,
# SP inhibition control (absolute value);
# Maximum number of active columns in the SP region's output (when
# there are more, the weaker ones are suppressed)
'numActivePerInhArea': 40,
'seed': 1956,
# coincInputPoolPct
# What percent of the columns's receptive field is available
# for potential synapses. At initialization time, we will
# choose coincInputPoolPct * (2*coincInputRadius+1)^2
'coincInputPoolPct': 0.5,
# The default connected threshold. Any synapse whose
# permanence value is above the connected threshold is
# a "connected synapse", meaning it can contribute to the
# cell's firing. Typical value is 0.10. Cells whose activity
# level before inhibition falls below minDutyCycleBeforeInh
# will have their own internal synPermConnectedCell
# threshold set below this default value.
# (This concept applies to both SP and TP and so 'cells'
# is correct here as opposed to 'columns')
'synPermConnected': 0.1,
'synPermActiveInc': 0.1,
'synPermInactiveDec': 0.01,
},
# Controls whether TP is enabled or disabled;
# TP is necessary for making temporal predictions, such as predicting
# the next inputs. Without TP, the model is only capable of
# reconstructing missing sensor inputs (via SP).
'tpEnable' : True,
'tpParams': {
# TP diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
# (see verbosity in nupic/trunk/py/nupic/research/TP.py and TP10X*.py)
'verbosity': 0,
# Number of cell columns in the cortical region (same number for
# SP and TP)
# (see also tpNCellsPerCol)
'columnCount': 2048,
# The number of cells (i.e., states), allocated per column.
'cellsPerColumn': 32,
'inputWidth': 2048,
'seed': 1960,
# Temporal Pooler implementation selector (see _getTPClass in
# CLARegion.py).
'temporalImp': 'cpp',
# New Synapse formation count
# NOTE: If None, use spNumActivePerInhArea
#
# TODO: need better explanation
'newSynapseCount': 16,
# Maximum number of synapses per segment
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TP
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSynapsesPerSegment': 32,
# Maximum number of segments per cell
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TP
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSegmentsPerCell': 128,
# Initial Permanence
# TODO: need better explanation
'initialPerm': 0.21,
# Permanence Increment
'permanenceInc': 0.1,
# Permanence Decrement
# If set to None, will automatically default to tpPermanenceInc
# value.
'permanenceDec' : 0.1,
'globalDecay': 0.0,
'maxAge': 0,
# Minimum number of active synapses for a segment to be considered
# during search for the best-matching segments.
# None=use default
# Replaces: tpMinThreshold
'minThreshold': 12,
# Segment activation threshold.
# A segment is active if it has >= tpSegmentActivationThreshold
# connected synapses that are active due to infActiveState
# None=use default
# Replaces: tpActivationThreshold
'activationThreshold': 16,
'outputType': 'normal',
# "Pay Attention Mode" length. This tells the TP how many new
# elements to append to the end of a learned sequence at a time.
# Smaller values are better for datasets with short sequences,
# higher values are better for datasets with long sequences.
'pamLength': 1,
},
'clParams': {
# Classifier implementation selection.
'implementation': 'cpp',
'regionName' : 'CLAClassifierRegion',
# Classifier diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
'clVerbosity' : 0,
# This controls how fast the classifier learns/forgets. Higher values
# make it adapt faster and forget older patterns faster.
'alpha': 0.001,
# This is set after the call to updateConfigFromSubConfig and is
# computed from the aggregationInfo and predictAheadTime.
'steps': '1',
},
'trainSPNetOnlyIfRequested': False,
},
'predictionSteps': [1],
'predictedField': 'field1',
'dataSource': 'fillInBySubExperiment',
}
# end of config dictionary
# Adjust base config dictionary for any modifications if imported from a
# sub-experiment
updateConfigFromSubConfig(config)
# Compute predictionSteps based on the predictAheadTime and the aggregation
# period, which may be permuted over.
if config['predictAheadTime'] is not None:
predictionSteps = int(round(aggregationDivide(
config['predictAheadTime'], config['aggregationInfo'])))
assert (predictionSteps >= 1)
config['modelParams']['clParams']['steps'] = str(predictionSteps)
# Fill in classifier steps
config['modelParams']['clParams']['steps'] = '%s' % \
(','.join([str(x) for x in config['predictionSteps']]))
# If the predicted field is field1 (category), use avg_err else if field 2
# (scalar) use aae as the metric
if config['predictedField'] == 'field1':
metricName = 'avg_err'
loggedMetrics = ['.*avg_err.*']
else:
metricName = 'aae'
loggedMetrics = ['.*aae.*']
# Adjust config by applying ValueGetterBase-derived
# futures. NOTE: this MUST be called after updateConfigFromSubConfig() in order
# to support value-getter-based substitutions from the sub-experiment (if any)
applyValueGettersToContainer(config)
control = {
# The environment that the current model is being run in
"environment": 'nupic',
# Input stream specification per py/nupic/cluster/database/StreamDef.json.
#
'dataset' : {
'info': 'multistep',
'streams': [ {
'columns': ['*'],
'info': 'multi-step',
'source': config['dataSource'],
}],
'version': 1},
# Iteration count: maximum number of iterations. Each iteration corresponds
# to one record from the (possibly aggregated) dataset. The task is
# terminated when either number of iterations reaches iterationCount or
# all records in the (possibly aggregated) database have been processed,
# whichever occurs first.
#
# iterationCount of -1 = iterate over the entire dataset
'iterationCount' : -1,
# A dictionary containing all the supplementary parameters for inference
"inferenceArgs":{'predictedField': config['predictedField'],
'predictionSteps': config['predictionSteps']},
# Metrics: A list of MetricSpecs that instantiate the metrics that are
# computed for this experiment
'metrics':[
MetricSpec(field=config['predictedField'], metric=metricName,
inferenceElement='prediction', params={'window': 200}),
MetricSpec(field=config['predictedField'], metric='trivial',
inferenceElement='prediction', params={'errorMetric': metricName,
'window': 200}),
],
# Logged Metrics: A sequence of regular expressions that specify which of
# the metrics from the Inference Specifications section MUST be logged for
# every prediction. The regex's correspond to the automatically generated
# metric labels. This is similar to the way the optimization metric is
# specified in permutations.py.
'loggedMetrics': loggedMetrics,
}
# Add multi-step prediction metrics
for steps in config['predictionSteps']:
control['metrics'].append(
MetricSpec(field=config['predictedField'], metric='multiStep',
inferenceElement='multiStepBestPredictions',
params={'errorMetric': metricName, 'window': 200,
'steps': steps}))
################################################################################
################################################################################
descriptionInterface = ExperimentDescriptionAPI(modelConfig=config,
control=control)
| gpl-3.0 | -3,795,069,816,328,408,600 | -5,294,451,628,375,149,000 | 37.403382 | 91 | 0.601925 | false |
ak2703/edx-platform | openedx/core/djangoapps/credit/tests/test_views.py | 27 | 13733 | """
Tests for credit app views.
"""
import unittest
import json
import datetime
import pytz
import ddt
from mock import patch
from django.test import TestCase
from django.test.utils import override_settings
from django.core.urlresolvers import reverse
from django.conf import settings
from student.tests.factories import UserFactory
from util.testing import UrlResetMixin
from util.date_utils import to_timestamp
from opaque_keys.edx.keys import CourseKey
from openedx.core.djangoapps.credit import api
from openedx.core.djangoapps.credit.signature import signature
from openedx.core.djangoapps.credit.models import (
CreditCourse,
CreditProvider,
CreditRequirement,
CreditRequirementStatus,
CreditEligibility,
CreditRequest,
)
TEST_CREDIT_PROVIDER_SECRET_KEY = "931433d583c84ca7ba41784bad3232e6"
@ddt.ddt
@override_settings(CREDIT_PROVIDER_SECRET_KEYS={
"hogwarts": TEST_CREDIT_PROVIDER_SECRET_KEY
})
@unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Test only valid in lms')
class CreditProviderViewTests(UrlResetMixin, TestCase):
"""
Tests for HTTP end-points used to issue requests to credit providers
and receive responses approving or denying requests.
"""
USERNAME = "ron"
USER_FULL_NAME = "Ron Weasley"
PASSWORD = "password"
PROVIDER_ID = "hogwarts"
PROVIDER_URL = "https://credit.example.com/request"
COURSE_KEY = CourseKey.from_string("edX/DemoX/Demo_Course")
FINAL_GRADE = 0.95
@patch.dict(settings.FEATURES, {"ENABLE_CREDIT_API": True})
def setUp(self):
"""
Configure a credit course.
"""
super(CreditProviderViewTests, self).setUp()
# Create the test user and log in
self.user = UserFactory(username=self.USERNAME, password=self.PASSWORD)
self.user.profile.name = self.USER_FULL_NAME
self.user.profile.save()
success = self.client.login(username=self.USERNAME, password=self.PASSWORD)
self.assertTrue(success, msg="Could not log in")
# Enable the course for credit
credit_course = CreditCourse.objects.create(
course_key=self.COURSE_KEY,
enabled=True,
)
# Configure a credit provider for the course
CreditProvider.objects.create(
provider_id=self.PROVIDER_ID,
enable_integration=True,
provider_url=self.PROVIDER_URL,
)
# Add a single credit requirement (final grade)
requirement = CreditRequirement.objects.create(
course=credit_course,
namespace="grade",
name="grade",
)
# Mark the user as having satisfied the requirement
# and eligible for credit.
CreditRequirementStatus.objects.create(
username=self.USERNAME,
requirement=requirement,
status="satisfied",
reason={"final_grade": self.FINAL_GRADE}
)
CreditEligibility.objects.create(
username=self.USERNAME,
course=credit_course,
)
def test_credit_request_and_response(self):
# Initiate a request
response = self._create_credit_request(self.USERNAME, self.COURSE_KEY)
self.assertEqual(response.status_code, 200)
# Check that the user's request status is pending
requests = api.get_credit_requests_for_user(self.USERNAME)
self.assertEqual(len(requests), 1)
self.assertEqual(requests[0]["status"], "pending")
# Check request parameters
content = json.loads(response.content)
self.assertEqual(content["url"], self.PROVIDER_URL)
self.assertEqual(content["method"], "POST")
self.assertEqual(len(content["parameters"]["request_uuid"]), 32)
self.assertEqual(content["parameters"]["course_org"], "edX")
self.assertEqual(content["parameters"]["course_num"], "DemoX")
self.assertEqual(content["parameters"]["course_run"], "Demo_Course")
self.assertEqual(content["parameters"]["final_grade"], self.FINAL_GRADE)
self.assertEqual(content["parameters"]["user_username"], self.USERNAME)
self.assertEqual(content["parameters"]["user_full_name"], self.USER_FULL_NAME)
self.assertEqual(content["parameters"]["user_mailing_address"], "")
self.assertEqual(content["parameters"]["user_country"], "")
# The signature is going to change each test run because the request
# is assigned a different UUID each time.
# For this reason, we use the signature function directly
# (the "signature" parameter will be ignored when calculating the signature).
# Other unit tests verify that the signature function is working correctly.
self.assertEqual(
content["parameters"]["signature"],
signature(content["parameters"], TEST_CREDIT_PROVIDER_SECRET_KEY)
)
# Simulate a response from the credit provider
response = self._credit_provider_callback(
content["parameters"]["request_uuid"],
"approved"
)
self.assertEqual(response.status_code, 200)
# Check that the user's status is approved
requests = api.get_credit_requests_for_user(self.USERNAME)
self.assertEqual(len(requests), 1)
self.assertEqual(requests[0]["status"], "approved")
def test_request_credit_anonymous_user(self):
self.client.logout()
response = self._create_credit_request(self.USERNAME, self.COURSE_KEY)
self.assertEqual(response.status_code, 403)
def test_request_credit_for_another_user(self):
response = self._create_credit_request("another_user", self.COURSE_KEY)
self.assertEqual(response.status_code, 403)
@ddt.data(
# Invalid JSON
"{",
# Missing required parameters
json.dumps({"username": USERNAME}),
json.dumps({"course_key": unicode(COURSE_KEY)}),
# Invalid course key format
json.dumps({"username": USERNAME, "course_key": "invalid"}),
)
def test_create_credit_request_invalid_parameters(self, request_data):
url = reverse("credit:create_request", args=[self.PROVIDER_ID])
response = self.client.post(url, data=request_data, content_type="application/json")
self.assertEqual(response.status_code, 400)
def test_credit_provider_callback_validates_signature(self):
request_uuid = self._create_credit_request_and_get_uuid(self.USERNAME, self.COURSE_KEY)
# Simulate a callback from the credit provider with an invalid signature
# Since the signature is invalid, we respond with a 403 Not Authorized.
response = self._credit_provider_callback(request_uuid, "approved", sig="invalid")
self.assertEqual(response.status_code, 403)
def test_credit_provider_callback_validates_timestamp(self):
request_uuid = self._create_credit_request_and_get_uuid(self.USERNAME, self.COURSE_KEY)
# Simulate a callback from the credit provider with a timestamp too far in the past
# (slightly more than 15 minutes)
# Since the message isn't timely, respond with a 403.
timestamp = to_timestamp(datetime.datetime.now(pytz.UTC) - datetime.timedelta(0, 60 * 15 + 1))
response = self._credit_provider_callback(request_uuid, "approved", timestamp=timestamp)
self.assertEqual(response.status_code, 403)
def test_credit_provider_callback_handles_string_timestamp(self):
request_uuid = self._create_credit_request_and_get_uuid(self.USERNAME, self.COURSE_KEY)
# Simulate a callback from the credit provider with a timestamp
# encoded as a string instead of an integer.
timestamp = str(to_timestamp(datetime.datetime.now(pytz.UTC)))
response = self._credit_provider_callback(request_uuid, "approved", timestamp=timestamp)
self.assertEqual(response.status_code, 200)
def test_credit_provider_callback_is_idempotent(self):
request_uuid = self._create_credit_request_and_get_uuid(self.USERNAME, self.COURSE_KEY)
# Initially, the status should be "pending"
self._assert_request_status(request_uuid, "pending")
# First call sets the status to approved
self._credit_provider_callback(request_uuid, "approved")
self._assert_request_status(request_uuid, "approved")
# Second call succeeds as well; status is still approved
self._credit_provider_callback(request_uuid, "approved")
self._assert_request_status(request_uuid, "approved")
@ddt.data(
# Invalid JSON
"{",
# Not a dictionary
"4",
# Invalid timestamp format
json.dumps({
"request_uuid": "557168d0f7664fe59097106c67c3f847",
"status": "approved",
"timestamp": "invalid",
"signature": "7685ae1c8f763597ee7ce526685c5ac24353317dbfe087f0ed32a699daf7dc63",
}),
)
def test_credit_provider_callback_invalid_parameters(self, request_data):
url = reverse("credit:provider_callback", args=[self.PROVIDER_ID])
response = self.client.post(url, data=request_data, content_type="application/json")
self.assertEqual(response.status_code, 400)
def test_credit_provider_invalid_status(self):
response = self._credit_provider_callback("557168d0f7664fe59097106c67c3f847", "invalid")
self.assertEqual(response.status_code, 400)
def test_credit_provider_key_not_configured(self):
# Cannot initiate a request because we can't sign it
with override_settings(CREDIT_PROVIDER_SECRET_KEYS={}):
response = self._create_credit_request(self.USERNAME, self.COURSE_KEY)
self.assertEqual(response.status_code, 400)
# Create the request with the secret key configured
request_uuid = self._create_credit_request_and_get_uuid(self.USERNAME, self.COURSE_KEY)
# Callback from the provider is not authorized, because
# the shared secret isn't configured.
with override_settings(CREDIT_PROVIDER_SECRET_KEYS={}):
response = self._credit_provider_callback(request_uuid, "approved")
self.assertEqual(response.status_code, 403)
def test_request_associated_with_another_provider(self):
other_provider_id = "other_provider"
other_provider_secret_key = "1d01f067a5a54b0b8059f7095a7c636d"
# Create an additional credit provider
CreditProvider.objects.create(provider_id=other_provider_id, enable_integration=True)
# Initiate a credit request with the first provider
request_uuid = self._create_credit_request_and_get_uuid(self.USERNAME, self.COURSE_KEY)
# Attempt to update the request status for a different provider
with override_settings(CREDIT_PROVIDER_SECRET_KEYS={other_provider_id: other_provider_secret_key}):
response = self._credit_provider_callback(
request_uuid,
"approved",
provider_id=other_provider_id,
secret_key=other_provider_secret_key,
)
# Response should be a 404 to avoid leaking request UUID values to other providers.
self.assertEqual(response.status_code, 404)
# Request status should still be "pending"
self._assert_request_status(request_uuid, "pending")
def _create_credit_request(self, username, course_key):
"""
Initiate a request for credit.
"""
url = reverse("credit:create_request", args=[self.PROVIDER_ID])
return self.client.post(
url,
data=json.dumps({
"username": username,
"course_key": unicode(course_key),
}),
content_type="application/json",
)
def _create_credit_request_and_get_uuid(self, username, course_key):
"""
Initiate a request for credit and return the request UUID.
"""
response = self._create_credit_request(username, course_key)
self.assertEqual(response.status_code, 200)
return json.loads(response.content)["parameters"]["request_uuid"]
def _credit_provider_callback(self, request_uuid, status, **kwargs):
"""
Simulate a response from the credit provider approving
or rejecting the credit request.
Arguments:
request_uuid (str): The UUID of the credit request.
status (str): The status of the credit request.
Keyword Arguments:
provider_id (str): Identifier for the credit provider.
secret_key (str): Shared secret key for signing messages.
timestamp (datetime): Timestamp of the message.
sig (str): Digital signature to use on messages.
"""
provider_id = kwargs.get("provider_id", self.PROVIDER_ID)
secret_key = kwargs.get("secret_key", TEST_CREDIT_PROVIDER_SECRET_KEY)
timestamp = kwargs.get("timestamp", to_timestamp(datetime.datetime.now(pytz.UTC)))
url = reverse("credit:provider_callback", args=[provider_id])
parameters = {
"request_uuid": request_uuid,
"status": status,
"timestamp": timestamp,
}
parameters["signature"] = kwargs.get("sig", signature(parameters, secret_key))
return self.client.post(url, data=json.dumps(parameters), content_type="application/json")
def _assert_request_status(self, uuid, expected_status):
"""
Check the status of a credit request.
"""
request = CreditRequest.objects.get(uuid=uuid)
self.assertEqual(request.status, expected_status)
| agpl-3.0 | 6,948,631,475,244,101,000 | -5,369,491,030,538,949,000 | 39.872024 | 107 | 0.66089 | false |
davidak/satzgenerator | python/satzgenerator.py | 1 | 1797 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Satzgenerator
Referenzimplementierung
"""
import sys
import random as r
# Textdateien einlesen, Zeilen in Liste
vornamen_m = open('../data/vornamen_m.txt', 'r').read().splitlines()
vornamen_w = open('../data/vornamen_w.txt', 'r').read().splitlines()
vornamen = vornamen_m + vornamen_w
verben = open('../data/verben.txt', 'r').read().splitlines()
adjektive = open('../data/adjektive.txt', 'r').read().splitlines()
orte = open('../data/orte.txt', 'r').read().splitlines()
# Listen
beziehung_m = ['Vater', 'Bruder', 'Mann', 'Sohn', 'Onkel', 'Opa', 'Cousin', 'Freund', 'Kollege', 'Mitbewohner']
beziehung_w = ['Mutter', 'Schwester', 'Frau', 'Tochter', 'Tante', 'Oma', 'Cousine', 'Freundin', 'Kollegin', 'Mitbewohnerin']
beziehung = beziehung_m + beziehung_w
possessivpronomen_m = ['Mein', 'Dein', 'Sein', 'Ihr']
spezial = ['Er', 'Sie', 'Jemand', 'Niemand', 'Ein Lehrer', 'Ein Polizist', 'Ein Beamter', 'Ein Arzt', 'Der Alkoholiker', 'Ein normaler Mensch']
# Person generieren
def person():
z = r.randint(1, 6)
if z == 1:
_s = r.choice(vornamen) + 's ' + r.choice(beziehung)
elif z == 2:
_s = r.choice(possessivpronomen_m) + ' ' + r.choice(beziehung_m)
elif z == 3:
_s = r.choice(possessivpronomen_m) + 'e ' + r.choice(beziehung_w)
elif z == 4:
_s = r.choice(spezial)
else:
_s = r.choice(vornamen)
return _s
# Satz generieren
def satz():
return person() + ' ' + r.choice(verben) + ' ' + r.choice(adjektive) + ' ' + r.choice(orte) + '.'
# Sätze ausgeben
def main():
# Anzahl als Parameter übergeben
try:
anzahl = int(sys.argv[1])
except:
anzahl = 1
for i in range(anzahl):
print(satz())
if __name__ == "__main__":
main()
| gpl-3.0 | 8,746,057,945,387,807,000 | -6,293,583,420,773,542,000 | 27.492063 | 143 | 0.594986 | false |
sunlianqiang/kbengine | kbe/res/scripts/common/Lib/test/test_pwd.py | 88 | 4229 | import sys
import unittest
from test import support
pwd = support.import_module('pwd')
class PwdTest(unittest.TestCase):
def test_values(self):
entries = pwd.getpwall()
for e in entries:
self.assertEqual(len(e), 7)
self.assertEqual(e[0], e.pw_name)
self.assertIsInstance(e.pw_name, str)
self.assertEqual(e[1], e.pw_passwd)
self.assertIsInstance(e.pw_passwd, str)
self.assertEqual(e[2], e.pw_uid)
self.assertIsInstance(e.pw_uid, int)
self.assertEqual(e[3], e.pw_gid)
self.assertIsInstance(e.pw_gid, int)
self.assertEqual(e[4], e.pw_gecos)
self.assertIsInstance(e.pw_gecos, str)
self.assertEqual(e[5], e.pw_dir)
self.assertIsInstance(e.pw_dir, str)
self.assertEqual(e[6], e.pw_shell)
self.assertIsInstance(e.pw_shell, str)
# The following won't work, because of duplicate entries
# for one uid
# self.assertEqual(pwd.getpwuid(e.pw_uid), e)
# instead of this collect all entries for one uid
# and check afterwards (done in test_values_extended)
def test_values_extended(self):
entries = pwd.getpwall()
entriesbyname = {}
entriesbyuid = {}
if len(entries) > 1000: # Huge passwd file (NIS?) -- skip this test
self.skipTest('passwd file is huge; extended test skipped')
for e in entries:
entriesbyname.setdefault(e.pw_name, []).append(e)
entriesbyuid.setdefault(e.pw_uid, []).append(e)
# check whether the entry returned by getpwuid()
# for each uid is among those from getpwall() for this uid
for e in entries:
if not e[0] or e[0] == '+':
continue # skip NIS entries etc.
self.assertIn(pwd.getpwnam(e.pw_name), entriesbyname[e.pw_name])
self.assertIn(pwd.getpwuid(e.pw_uid), entriesbyuid[e.pw_uid])
def test_errors(self):
self.assertRaises(TypeError, pwd.getpwuid)
self.assertRaises(TypeError, pwd.getpwuid, 3.14)
self.assertRaises(TypeError, pwd.getpwnam)
self.assertRaises(TypeError, pwd.getpwnam, 42)
self.assertRaises(TypeError, pwd.getpwall, 42)
# try to get some errors
bynames = {}
byuids = {}
for (n, p, u, g, gecos, d, s) in pwd.getpwall():
bynames[n] = u
byuids[u] = n
allnames = list(bynames.keys())
namei = 0
fakename = allnames[namei]
while fakename in bynames:
chars = list(fakename)
for i in range(len(chars)):
if chars[i] == 'z':
chars[i] = 'A'
break
elif chars[i] == 'Z':
continue
else:
chars[i] = chr(ord(chars[i]) + 1)
break
else:
namei = namei + 1
try:
fakename = allnames[namei]
except IndexError:
# should never happen... if so, just forget it
break
fakename = ''.join(chars)
self.assertRaises(KeyError, pwd.getpwnam, fakename)
# In some cases, byuids isn't a complete list of all users in the
# system, so if we try to pick a value not in byuids (via a perturbing
# loop, say), pwd.getpwuid() might still be able to find data for that
# uid. Using sys.maxint may provoke the same problems, but hopefully
# it will be a more repeatable failure.
fakeuid = sys.maxsize
self.assertNotIn(fakeuid, byuids)
self.assertRaises(KeyError, pwd.getpwuid, fakeuid)
# -1 shouldn't be a valid uid because it has a special meaning in many
# uid-related functions
self.assertRaises(KeyError, pwd.getpwuid, -1)
# should be out of uid_t range
self.assertRaises(KeyError, pwd.getpwuid, 2**128)
self.assertRaises(KeyError, pwd.getpwuid, -2**128)
def test_main():
support.run_unittest(PwdTest)
if __name__ == "__main__":
test_main()
| lgpl-3.0 | -7,642,617,959,715,247,000 | 3,907,934,875,736,944,600 | 36.096491 | 78 | 0.562544 | false |
openmeteo/enhydris | enhydris/api/tests/test_views/test_search_by_ts_has_years.py | 2 | 1962 | from io import StringIO
from rest_framework.test import APITestCase
from model_mommy import mommy
from enhydris import models
from .test_search import SearchTestCaseBase
class SearchWithYearExistingInOneStationTest(SearchTestCaseBase, APITestCase):
search_term = "ts_has_years:2005,2012,2016"
search_result = "Tharbad"
def _create_models(self):
komboti = mommy.make(models.Station, name="Komboti")
tharbad = mommy.make(models.Station, name="Tharbad")
self.komboti_temperature = self._make_timeseries(
komboti, "Temperature", "2005-03-23 18:20,5,\r\n2012-03-24 18:25,6,\r\n"
)
self.komboti_rain = self._make_timeseries(
komboti, "Rain", "2005-03-23 18:20,5,\r\n2011-03-24 18:25,6,\r\n"
)
self.tharbad_temperature = self._make_timeseries(
tharbad, "Temperature", "2005-03-23 18:20,5,\r\n2012-03-24 18:25,6,\r\n"
)
self.tharbad_rain = self._make_timeseries(
tharbad, "Rain", "2005-03-23 18:20,5,\r\n2016-03-24 18:25,6,\r\n"
)
def _make_timeseries(self, station, variable_descr, datastr):
result = mommy.make(
models.Timeseries,
timeseries_group__gentity=station,
timeseries_group__variable__descr=variable_descr,
timeseries_group__time_zone__utc_offset=120,
)
result.set_data(StringIO(datastr))
return result
class SearchWithYearsExistingInAllStationsTest(SearchWithYearExistingInOneStationTest):
search_term = "ts_has_years:2005,2012"
search_result = {"Komboti", "Tharbad"}
number_of_results = 2
class SearchWithYearsExistingNowhereTest(SearchWithYearExistingInOneStationTest):
search_term = "ts_has_years:2005,2012,2018"
search_result = set()
number_of_results = 0
class SearchWithGarbageTest(SearchWithYearExistingInOneStationTest):
search_term = "ts_has_years:hello,world"
status_code = 404
| agpl-3.0 | -5,281,426,500,032,059,000 | 475,392,757,884,778,750 | 33.421053 | 87 | 0.670234 | false |
minhphung171093/OpenERP_V8 | openerp/loglevels.py | 380 | 3930 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import sys
LOG_NOTSET = 'notset'
LOG_DEBUG = 'debug'
LOG_INFO = 'info'
LOG_WARNING = 'warn'
LOG_ERROR = 'error'
LOG_CRITICAL = 'critical'
# TODO get_encodings, ustr and exception_to_unicode were originally from tools.misc.
# There are here until we refactor tools so that this module doesn't depends on tools.
def get_encodings(hint_encoding='utf-8'):
fallbacks = {
'latin1': 'latin9',
'iso-8859-1': 'iso8859-15',
'cp1252': '1252',
}
if hint_encoding:
yield hint_encoding
if hint_encoding.lower() in fallbacks:
yield fallbacks[hint_encoding.lower()]
# some defaults (also taking care of pure ASCII)
for charset in ['utf8','latin1']:
if not hint_encoding or (charset.lower() != hint_encoding.lower()):
yield charset
from locale import getpreferredencoding
prefenc = getpreferredencoding()
if prefenc and prefenc.lower() != 'utf-8':
yield prefenc
prefenc = fallbacks.get(prefenc.lower())
if prefenc:
yield prefenc
def ustr(value, hint_encoding='utf-8', errors='strict'):
"""This method is similar to the builtin `unicode`, except
that it may try multiple encodings to find one that works
for decoding `value`, and defaults to 'utf-8' first.
:param: value: the value to convert
:param: hint_encoding: an optional encoding that was detecte
upstream and should be tried first to decode ``value``.
:param str errors: optional `errors` flag to pass to the unicode
built-in to indicate how illegal character values should be
treated when converting a string: 'strict', 'ignore' or 'replace'
(see ``unicode()`` constructor).
Passing anything other than 'strict' means that the first
encoding tried will be used, even if it's not the correct
one to use, so be careful! Ignored if value is not a string/unicode.
:raise: UnicodeError if value cannot be coerced to unicode
:return: unicode string representing the given value
"""
if isinstance(value, Exception):
return exception_to_unicode(value)
if isinstance(value, unicode):
return value
if not isinstance(value, basestring):
try:
return unicode(value)
except Exception:
raise UnicodeError('unable to convert %r' % (value,))
for ln in get_encodings(hint_encoding):
try:
return unicode(value, ln, errors=errors)
except Exception:
pass
raise UnicodeError('unable to convert %r' % (value,))
def exception_to_unicode(e):
if (sys.version_info[:2] < (2,6)) and hasattr(e, 'message'):
return ustr(e.message)
if hasattr(e, 'args'):
return "\n".join((ustr(a) for a in e.args))
try:
return unicode(e)
except Exception:
return u"Unknown message"
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -543,792,643,965,199,040 | -4,532,543,588,661,878,000 | 36.075472 | 86 | 0.63486 | false |
benoitsteiner/tensorflow | tensorflow/contrib/distributions/python/ops/bijectors/affine_impl.py | 6 | 22469 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Affine bijector."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.distributions.python.ops import operator_pd_cholesky
from tensorflow.contrib.distributions.python.ops import operator_pd_diag
from tensorflow.contrib.distributions.python.ops import operator_pd_identity
from tensorflow.contrib.distributions.python.ops import operator_pd_vdvt_update
from tensorflow.contrib.distributions.python.ops.shape import _DistributionShape
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.distributions import bijector
__all__ = [
"Affine",
]
def _as_tensor(x, name):
"""Convenience to convert to `Tensor` or leave as `None`."""
return None if x is None else ops.convert_to_tensor(x, name=name)
# TODO(srvasude): Deprecate this class with a dedicated Linear Operator
# corresponding to TriL + V D V.T.
class _TriLPlusVDVTLightweightOperatorPD(object):
"""Helper/hidden class fake an OperatorPD for TriL+VDV.T."""
def __init__(self, tril, v, diag=None, validate_args=False):
"""Creates an instance of _TriLPlusVDVTLightweightOperatorPD.
WARNING: This object is not to be used outside of `Affine` where it is
currently being temporarily used for refactoring purposes.
Args:
tril: `Tensor` of shape `[B1,..,Bb, d, d]`.
v: `Tensor` of shape `[B1,...,Bb, d, k]`.
diag: `Tensor` of shape `[B1,...,Bb, k, k]` or None
validate_args: Python `bool` indicating whether arguments should be
checked for correctness.
"""
self._m = tril
self._v = v
self._validate_args = validate_args
self._inputs = [tril, v]
if diag is not None:
self._inputs += [diag]
self._d = operator_pd_diag.OperatorPDDiag(diag, verify_pd=validate_args)
self._d_inv = operator_pd_diag.OperatorPDDiag(1. / diag,
verify_pd=validate_args)
return
if v.get_shape().is_fully_defined():
v_shape = v.get_shape().as_list()
id_shape = v_shape[:-2] + [v_shape[-1], v_shape[-1]]
else:
v_shape = array_ops.shape(v)
id_shape = array_ops.concat([v_shape[:-2], [v_shape[-1], v_shape[-1]]], 0)
self._d = operator_pd_identity.OperatorPDIdentity(
id_shape, v.dtype, verify_pd=self.validate_args)
self._d_inv = self._d
@property
def inputs(self):
return self._inputs
@property
def dtype(self):
return self._m.dtype.base_dtype
@property
def validate_args(self):
return self._validate_args
def rank(self):
"""Returns `rank(self)`."""
return array_ops.rank(self._m)
def sqrt_matmul(self, x):
"""Computes `matmul(self, x)`.
Doesn't actually do the sqrt! Named as such to agree with API.
Args:
x: `Tensor`
Returns:
self_times_x: `Tensor`
"""
m_x = math_ops.matmul(self._m, x)
vt_x = math_ops.matmul(self._v, x, adjoint_a=True)
d_vt_x = self._d.matmul(vt_x)
v_d_vt_x = math_ops.matmul(self._v, d_vt_x)
return m_x + v_d_vt_x
def sqrt_solve(self, x):
"""Computes `solve(self, x)`.
Doesn't actually do the sqrt! Named as such to agree with API.
To compute (M + V D V.T), we use the Woodbury matrix identity:
inv(M + V D V.T) = inv(M) - inv(M) V inv(C) V.T inv(M)
where,
C = inv(D) + V.T inv(M) V.
See: https://en.wikipedia.org/wiki/Woodbury_matrix_identity
Args:
x: `Tensor`
Returns:
inv_of_self_times_x: `Tensor`
"""
minv_x = linalg_ops.matrix_triangular_solve(self._m, x)
vt_minv_x = math_ops.matmul(self._v, minv_x, transpose_a=True)
cinv_vt_minv_x = linalg_ops.matrix_solve(
self._woodbury_sandwiched_term(), vt_minv_x)
v_cinv_vt_minv_x = math_ops.matmul(self._v, cinv_vt_minv_x)
minv_v_cinv_vt_minv_x = linalg_ops.matrix_triangular_solve(
self._m, v_cinv_vt_minv_x)
return minv_x - minv_v_cinv_vt_minv_x
def sqrt_log_abs_det(self):
"""Computes (log o abs o det)(X) for matrix X.
Doesn't actually do the sqrt! Named as such to agree with API.
To compute det(M + V D V.T), we use the matrix determinant lemma:
det(Tril + V D V.T) = det(C) det(D) det(M)
where C is defined as in `_inverse`, ie,
C = inv(D) + V.T inv(M) V.
See: https://en.wikipedia.org/wiki/Matrix_determinant_lemma
Returns:
log_abs_det: `Tensor`.
"""
log_det_c = math_ops.log(math_ops.abs(
linalg_ops.matrix_determinant(self._woodbury_sandwiched_term())))
# Reduction is ok because we always prepad inputs to this class.
log_det_m = math_ops.reduce_sum(math_ops.log(math_ops.abs(
array_ops.matrix_diag_part(self._m))), axis=[-1])
return log_det_c + 2. * self._d.sqrt_log_abs_det() + log_det_m
def _woodbury_sandwiched_term(self):
"""Computes the sandwiched term in the Woodbury identity.
Computes the "`C`" in the identity:
inv(M + V D V.T) = inv(M) - inv(M) V inv(C) V.T inv(M)
where,
C = inv(D) + V.T inv(M) V.
See: https://en.wikipedia.org/wiki/Woodbury_matrix_identity
Returns:
woodbury_sandwich_term: A `Tensor` to be used like `C`, above.
"""
minv_v = linalg_ops.matrix_triangular_solve(self._m, self._v)
vt_minv_v = math_ops.matmul(self._v, minv_v, adjoint_a=True)
return self._d_inv.add_to_tensor(vt_minv_v)
class Affine(bijector.Bijector):
"""Compute `Y = g(X; shift, scale) = scale @ X + shift`.
Here `scale = c * I + diag(D1) + tril(L) + V @ diag(D2) @ V.T`.
In TF parlance, the `scale` term is logically equivalent to:
```python
scale = (
scale_identity_multiplier * tf.diag(tf.ones(d)) +
tf.diag(scale_diag) +
scale_tril +
scale_perturb_factor @ diag(scale_perturb_diag) @
tf.transpose([scale_perturb_factor])
)
```
The `scale` term is applied without necessarily materializing constituent
matrices, i.e., the matmul is [matrix-free](
https://en.wikipedia.org/wiki/Matrix-free_methods) when possible.
Examples:
```python
# Y = X
b = Affine()
# Y = X + shift
b = Affine(shift=[1., 2, 3])
# Y = 2 * I @ X.T + shift
b = Affine(shift=[1., 2, 3],
scale_identity_multiplier=2.)
# Y = tf.diag(d1) @ X.T + shift
b = Affine(shift=[1., 2, 3],
scale_diag=[-1., 2, 1]) # Implicitly 3x3.
# Y = (I + v * v.T) @ X.T + shift
b = Affine(shift=[1., 2, 3],
scale_perturb_factor=[[1., 0],
[0, 1],
[1, 1]])
# Y = (diag(d1) + v * diag(d2) * v.T) @ X.T + shift
b = Affine(shift=[1., 2, 3],
scale_diag=[1., 3, 3], # Implicitly 3x3.
scale_perturb_diag=[2., 1], # Implicitly 2x2.
scale_perturb_factor=[[1., 0],
[0, 1],
[1, 1]])
```
"""
def __init__(self,
shift=None,
scale_identity_multiplier=None,
scale_diag=None,
scale_tril=None,
scale_perturb_factor=None,
scale_perturb_diag=None,
event_ndims=1,
validate_args=False,
name="affine"):
"""Instantiates the `Affine` bijector.
This `Bijector` is initialized with `shift` `Tensor` and `scale` arguments,
giving the forward operation:
```none
Y = g(X) = scale @ X + shift
```
where the `scale` term is logically equivalent to:
```python
scale = (
scale_identity_multiplier * tf.diag(tf.ones(d)) +
tf.diag(scale_diag) +
scale_tril +
scale_perturb_factor @ diag(scale_perturb_diag) @
tf.transpose([scale_perturb_factor])
)
```
If none of `scale_identity_multiplier`, `scale_diag`, or `scale_tril` are
specified then `scale += IdentityMatrix`. Otherwise specifying a
`scale` argument has the semantics of `scale += Expand(arg)`, i.e.,
`scale_diag != None` means `scale += tf.diag(scale_diag)`.
Args:
shift: Floating-point `Tensor`. If this is set to `None`, no shift is
applied.
scale_identity_multiplier: floating point rank 0 `Tensor` representing a
scaling done to the identity matrix.
When `scale_identity_multiplier = scale_diag = scale_tril = None` then
`scale += IdentityMatrix`. Otherwise no scaled-identity-matrix is added
to `scale`.
scale_diag: Floating-point `Tensor` representing the diagonal matrix.
`scale_diag` has shape [N1, N2, ... k], which represents a k x k
diagonal matrix.
When `None` no diagonal term is added to `scale`.
scale_tril: Floating-point `Tensor` representing the diagonal matrix.
`scale_diag` has shape [N1, N2, ... k, k], which represents a k x k
lower triangular matrix.
When `None` no `scale_tril` term is added to `scale`.
The upper triangular elements above the diagonal are ignored.
scale_perturb_factor: Floating-point `Tensor` representing factor matrix
with last two dimensions of shape `(k, r)`. When `None`, no rank-r
update is added to `scale`.
scale_perturb_diag: Floating-point `Tensor` representing the diagonal
matrix. `scale_perturb_diag` has shape [N1, N2, ... r], which
represents an `r x r` diagonal matrix. When `None` low rank updates will
take the form `scale_perturb_factor * scale_perturb_factor.T`.
event_ndims: Scalar `int32` `Tensor` indicating the number of dimensions
associated with a particular draw from the distribution. Must be 0 or 1.
validate_args: Python `bool` indicating whether arguments should be
checked for correctness.
name: Python `str` name given to ops managed by this object.
Raises:
ValueError: if `perturb_diag` is specified but not `perturb_factor`.
TypeError: if `shift` has different `dtype` from `scale` arguments.
"""
self._graph_parents = []
self._name = name
self._validate_args = validate_args
# Ambiguous definition of low rank update.
if scale_perturb_diag is not None and scale_perturb_factor is None:
raise ValueError("When scale_perturb_diag is specified, "
"scale_perturb_factor must be specified.")
# Special case, only handling a scaled identity matrix. We don't know its
# dimensions, so this is special cased.
# We don't check identity_multiplier, since below we set it to 1. if all
# other scale args are None.
self._is_only_identity_multiplier = (scale_tril is None and
scale_diag is None and
scale_perturb_factor is None)
# When no args are specified, pretend the scale matrix is the identity
# matrix.
if self._is_only_identity_multiplier and scale_identity_multiplier is None:
scale_identity_multiplier = 1.
with self._name_scope("init", values=[
shift, scale_identity_multiplier, scale_diag, scale_tril,
scale_perturb_diag, scale_perturb_factor, event_ndims]):
event_ndims = ops.convert_to_tensor(event_ndims, name="event_ndims")
if validate_args:
is_less_than_two = check_ops.assert_less(
event_ndims, 2,
message="event_ndims must be 0 or 1")
event_ndims = control_flow_ops.with_dependencies(
[is_less_than_two], event_ndims)
self._shift = _as_tensor(shift, "shift")
# self._create_scale_operator returns an OperatorPD in all cases except if
# self._is_only_identity_multiplier; in which case it returns a scalar
# Tensor.
self._scale = self._create_scale_operator(
identity_multiplier=scale_identity_multiplier,
diag=scale_diag,
tril=scale_tril,
perturb_diag=scale_perturb_diag,
perturb_factor=scale_perturb_factor,
event_ndims=event_ndims,
validate_args=validate_args)
if (self._shift is not None and
self._shift.dtype.base_dtype != self._scale.dtype.base_dtype):
raise TypeError("shift.dtype({}) does not match scale.dtype({})".format(
self._shift.dtype, self._scale.dtype))
self._shaper = _DistributionShape(
batch_ndims=self._infer_batch_ndims(),
event_ndims=event_ndims,
validate_args=validate_args)
super(Affine, self).__init__(
event_ndims=event_ndims,
graph_parents=(
[event_ndims] +
[self._scale] if tensor_util.is_tensor(self._scale)
else self._scale.inputs +
[self._shift] if self._shift is not None else []),
is_constant_jacobian=True,
dtype=self._scale.dtype,
validate_args=validate_args,
name=name)
def _create_scale_operator(self, identity_multiplier, diag, tril,
perturb_diag, perturb_factor, event_ndims,
validate_args):
"""Construct `scale` from various components.
Args:
identity_multiplier: floating point rank 0 `Tensor` representing a scaling
done to the identity matrix.
diag: Floating-point `Tensor` representing the diagonal matrix.
`scale_diag` has shape [N1, N2, ... k], which represents a k x k
diagonal matrix.
tril: Floating-point `Tensor` representing the diagonal matrix.
`scale_tril` has shape [N1, N2, ... k], which represents a k x k lower
triangular matrix.
perturb_diag: Floating-point `Tensor` representing the diagonal matrix of
the low rank update.
perturb_factor: Floating-point `Tensor` representing factor matrix.
event_ndims: Scalar `int32` `Tensor` indicating the number of dimensions
associated with a particular draw from the distribution. Must be 0 or 1
validate_args: Python `bool` indicating whether arguments should be
checked for correctness.
Returns:
scale. In the case of scaling by a constant, scale is a
floating point `Tensor`. Otherwise, scale is an `OperatorPD`.
Raises:
ValueError: if all of `tril`, `diag` and `identity_multiplier` are `None`.
"""
identity_multiplier = _as_tensor(identity_multiplier, "identity_multiplier")
diag = _as_tensor(diag, "diag")
tril = _as_tensor(tril, "tril")
perturb_diag = _as_tensor(perturb_diag, "perturb_diag")
perturb_factor = _as_tensor(perturb_factor, "perturb_factor")
identity_multiplier = self._maybe_validate_identity_multiplier(
identity_multiplier, validate_args)
if perturb_factor is not None:
perturb_factor = self._process_matrix(
perturb_factor, min_rank=2, event_ndims=event_ndims)
if perturb_diag is not None:
perturb_diag = self._process_matrix(
perturb_diag, min_rank=1, event_ndims=event_ndims)
# The following if-statments are ordered by increasingly stronger
# assumptions in the base matrix, i.e., we process in the order:
# TriL, Diag, Identity.
if tril is not None:
tril = self._preprocess_tril(
identity_multiplier, diag, tril, event_ndims)
if perturb_factor is None:
return operator_pd_cholesky.OperatorPDCholesky(
tril, verify_pd=validate_args)
return _TriLPlusVDVTLightweightOperatorPD(
tril=tril, v=perturb_factor, diag=perturb_diag,
validate_args=validate_args)
if diag is not None:
diag = self._preprocess_diag(identity_multiplier, diag, event_ndims)
if perturb_factor is None:
return operator_pd_diag.OperatorPDSqrtDiag(
diag, verify_pd=validate_args)
return operator_pd_vdvt_update.OperatorPDSqrtVDVTUpdate(
operator=operator_pd_diag.OperatorPDDiag(
diag, verify_pd=validate_args),
v=perturb_factor,
diag=perturb_diag,
verify_pd=validate_args)
if identity_multiplier is not None:
if perturb_factor is None:
return identity_multiplier
# Infer the shape from the V and D.
v_shape = array_ops.shape(perturb_factor)
identity_shape = array_ops.concat([v_shape[:-1], [v_shape[-2]]], 0)
scaled_identity = operator_pd_identity.OperatorPDIdentity(
identity_shape,
perturb_factor.dtype.base_dtype,
scale=identity_multiplier,
verify_pd=validate_args)
return operator_pd_vdvt_update.OperatorPDSqrtVDVTUpdate(
operator=scaled_identity,
v=perturb_factor,
diag=perturb_diag,
verify_pd=validate_args)
raise ValueError("One of tril, diag and/or identity_multiplier must be "
"specified.")
def _maybe_validate_identity_multiplier(self, identity_multiplier,
validate_args):
"""Check that the init arg `identity_multiplier` is valid."""
if identity_multiplier is None or not validate_args:
return identity_multiplier
if validate_args:
identity_multiplier = control_flow_ops.with_dependencies(
[check_ops.assert_positive(identity_multiplier)],
identity_multiplier)
return identity_multiplier
def _preprocess_tril(self, identity_multiplier, diag, tril, event_ndims):
"""Helper to preprocess a lower triangular matrix."""
tril = array_ops.matrix_band_part(tril, -1, 0) # Zero out TriU.
if identity_multiplier is None and diag is None:
return self._process_matrix(tril, min_rank=2, event_ndims=event_ndims)
new_diag = array_ops.matrix_diag_part(tril)
if identity_multiplier is not None:
new_diag += identity_multiplier
if diag is not None:
new_diag += diag
tril = array_ops.matrix_set_diag(tril, new_diag)
return self._process_matrix(tril, min_rank=2, event_ndims=event_ndims)
def _preprocess_diag(self, identity_multiplier, diag, event_ndims):
"""Helper to preprocess a diagonal matrix."""
if identity_multiplier is not None:
diag += identity_multiplier
return self._process_matrix(diag, min_rank=1, event_ndims=event_ndims)
def _process_matrix(self, matrix, min_rank, event_ndims):
"""Helper to __init__ which gets matrix in batch-ready form."""
# Pad the matrix so that matmul works in the case of a matrix and vector
# input. Keep track if the matrix was padded, to distinguish between a
# rank 3 tensor and a padded rank 2 tensor.
# TODO(srvasude): Remove side-effects from functions. Its currently unbroken
# but error-prone since the function call order may change in the future.
self._rank_two_event_ndims_one = math_ops.logical_and(
math_ops.equal(array_ops.rank(matrix), min_rank),
math_ops.equal(event_ndims, 1))
left = array_ops.where(self._rank_two_event_ndims_one, 1, 0)
pad = array_ops.concat(
[array_ops.ones(
[left], dtype=dtypes.int32), array_ops.shape(matrix)],
0)
return array_ops.reshape(matrix, pad)
def _infer_batch_ndims(self):
"""Return batch_ndims."""
if self._is_only_identity_multiplier:
return 0
# The real batch dims is one less when we pad in the case of event_ndims =
# 1, and the rank of the underlying scale being 2. This allows us to have
# non-negative sample dims.
return (self._scale.rank() - 2 -
array_ops.where(self._rank_two_event_ndims_one, 1, 0))
@property
def shift(self):
"""The `shift` `Tensor` in `Y = scale @ X + shift`."""
return self._shift
@property
def scale(self):
"""The `scale` `LinearOperator` in `Y = scale @ X + shift`."""
# TODO(srvasude): Remove this exception once TriLPlusVDVT is properly
# implemented.
if isinstance(self._scale, _TriLPlusVDVTLightweightOperatorPD):
raise NotImplementedError("Cannot access scale when Tril+VDV.T.")
return self._scale
def _forward(self, x):
y = x
if self._is_only_identity_multiplier:
y *= self._scale
if self.shift is not None:
return y + self.shift
return y
y, sample_shape = self._shaper.make_batch_of_event_sample_matrices(y)
y = self._scale.sqrt_matmul(y)
y = self._shaper.undo_make_batch_of_event_sample_matrices(y, sample_shape)
if self.shift is not None:
return y + self.shift
return y
def _inverse(self, y):
x = y
if self.shift is not None:
x -= self.shift
if self._is_only_identity_multiplier:
return x / self._scale
x, sample_shape = self._shaper.make_batch_of_event_sample_matrices(x)
x = self._scale.sqrt_solve(x)
x = self._shaper.undo_make_batch_of_event_sample_matrices(x, sample_shape)
return x
def _inverse_log_det_jacobian(self, y):
return -self._forward_log_det_jacobian(y)
def _forward_log_det_jacobian(self, x):
if self._is_only_identity_multiplier:
# TODO(jvdillon): We don't pad in this case and instead let the fldj be
# applied via broadcast.
d = math_ops.cast(array_ops.shape(x)[-1], dtype=self._scale.dtype)
return math_ops.log(math_ops.abs(self._scale)) * array_ops.where(
math_ops.equal(self._shaper.event_ndims, 0), 1., d)
fldj = self._scale.sqrt_log_abs_det()
# We need to squeeze off the padded dimension.
start = array_ops.where(self._rank_two_event_ndims_one, 1, 0)
return array_ops.reshape(fldj, array_ops.shape(fldj)[start:])
| apache-2.0 | -1,784,600,516,572,866,300 | 6,317,422,228,396,916,000 | 38.350263 | 80 | 0.635854 | false |
andreamerello/linux-analogdevices | scripts/gdb/linux/utils.py | 367 | 4267 | #
# gdb helper commands and functions for Linux kernel debugging
#
# common utilities
#
# Copyright (c) Siemens AG, 2011-2013
#
# Authors:
# Jan Kiszka <jan.kiszka@siemens.com>
#
# This work is licensed under the terms of the GNU GPL version 2.
#
import gdb
class CachedType:
def __init__(self, name):
self._type = None
self._name = name
def _new_objfile_handler(self, event):
self._type = None
gdb.events.new_objfile.disconnect(self._new_objfile_handler)
def get_type(self):
if self._type is None:
self._type = gdb.lookup_type(self._name)
if self._type is None:
raise gdb.GdbError(
"cannot resolve type '{0}'".format(self._name))
if hasattr(gdb, 'events') and hasattr(gdb.events, 'new_objfile'):
gdb.events.new_objfile.connect(self._new_objfile_handler)
return self._type
long_type = CachedType("long")
def get_long_type():
global long_type
return long_type.get_type()
def offset_of(typeobj, field):
element = gdb.Value(0).cast(typeobj)
return int(str(element[field].address).split()[0], 16)
def container_of(ptr, typeobj, member):
return (ptr.cast(get_long_type()) -
offset_of(typeobj, member)).cast(typeobj)
class ContainerOf(gdb.Function):
"""Return pointer to containing data structure.
$container_of(PTR, "TYPE", "ELEMENT"): Given PTR, return a pointer to the
data structure of the type TYPE in which PTR is the address of ELEMENT.
Note that TYPE and ELEMENT have to be quoted as strings."""
def __init__(self):
super(ContainerOf, self).__init__("container_of")
def invoke(self, ptr, typename, elementname):
return container_of(ptr, gdb.lookup_type(typename.string()).pointer(),
elementname.string())
ContainerOf()
BIG_ENDIAN = 0
LITTLE_ENDIAN = 1
target_endianness = None
def get_target_endianness():
global target_endianness
if target_endianness is None:
endian = gdb.execute("show endian", to_string=True)
if "little endian" in endian:
target_endianness = LITTLE_ENDIAN
elif "big endian" in endian:
target_endianness = BIG_ENDIAN
else:
raise gdb.GdgError("unknown endianness '{0}'".format(str(endian)))
return target_endianness
def read_u16(buffer):
if get_target_endianness() == LITTLE_ENDIAN:
return ord(buffer[0]) + (ord(buffer[1]) << 8)
else:
return ord(buffer[1]) + (ord(buffer[0]) << 8)
def read_u32(buffer):
if get_target_endianness() == LITTLE_ENDIAN:
return read_u16(buffer[0:2]) + (read_u16(buffer[2:4]) << 16)
else:
return read_u16(buffer[2:4]) + (read_u16(buffer[0:2]) << 16)
def read_u64(buffer):
if get_target_endianness() == LITTLE_ENDIAN:
return read_u32(buffer[0:4]) + (read_u32(buffer[4:8]) << 32)
else:
return read_u32(buffer[4:8]) + (read_u32(buffer[0:4]) << 32)
target_arch = None
def is_target_arch(arch):
if hasattr(gdb.Frame, 'architecture'):
return arch in gdb.newest_frame().architecture().name()
else:
global target_arch
if target_arch is None:
target_arch = gdb.execute("show architecture", to_string=True)
return arch in target_arch
GDBSERVER_QEMU = 0
GDBSERVER_KGDB = 1
gdbserver_type = None
def get_gdbserver_type():
def exit_handler(event):
global gdbserver_type
gdbserver_type = None
gdb.events.exited.disconnect(exit_handler)
def probe_qemu():
try:
return gdb.execute("monitor info version", to_string=True) != ""
except:
return False
def probe_kgdb():
try:
thread_info = gdb.execute("info thread 2", to_string=True)
return "shadowCPU0" in thread_info
except:
return False
global gdbserver_type
if gdbserver_type is None:
if probe_qemu():
gdbserver_type = GDBSERVER_QEMU
elif probe_kgdb():
gdbserver_type = GDBSERVER_KGDB
if not gdbserver_type is None and hasattr(gdb, 'events'):
gdb.events.exited.connect(exit_handler)
return gdbserver_type
| gpl-2.0 | 7,027,450,973,185,176,000 | 3,804,973,972,211,929,600 | 26.352564 | 78 | 0.617764 | false |
phreakocious/graphite-web | webapp/graphite/storage.py | 29 | 5786 | import time
try:
from importlib import import_module
except ImportError: # python < 2.7 compatibility
from django.utils.importlib import import_module
from django.conf import settings
from graphite.util import is_local_interface, is_pattern
from graphite.remote_storage import RemoteStore
from graphite.node import LeafNode
from graphite.intervals import Interval, IntervalSet
from graphite.readers import MultiReader
def get_finder(finder_path):
module_name, class_name = finder_path.rsplit('.', 1)
module = import_module(module_name)
return getattr(module, class_name)()
class Store:
def __init__(self, finders=None, hosts=None):
if finders is None:
finders = [get_finder(finder_path)
for finder_path in settings.STORAGE_FINDERS]
self.finders = finders
if hosts is None:
hosts = settings.CLUSTER_SERVERS
remote_hosts = [host for host in hosts if not settings.REMOTE_EXCLUDE_LOCAL or not is_local_interface(host)]
self.remote_stores = [ RemoteStore(host) for host in remote_hosts ]
def find(self, pattern, startTime=None, endTime=None, local=False):
query = FindQuery(pattern, startTime, endTime)
# Start remote searches
if not local:
remote_requests = [ r.find(query) for r in self.remote_stores if r.available ]
matching_nodes = set()
# Search locally
for finder in self.finders:
for node in finder.find_nodes(query):
#log.info("find() :: local :: %s" % node)
matching_nodes.add(node)
# Gather remote search results
if not local:
for request in remote_requests:
for node in request.get_results():
#log.info("find() :: remote :: %s from %s" % (node,request.store.host))
matching_nodes.add(node)
# Group matching nodes by their path
nodes_by_path = {}
for node in matching_nodes:
if node.path not in nodes_by_path:
nodes_by_path[node.path] = []
nodes_by_path[node.path].append(node)
# Reduce matching nodes for each path to a minimal set
found_branch_nodes = set()
for path, nodes in nodes_by_path.iteritems():
leaf_nodes = []
# First we dispense with the BranchNodes
for node in nodes:
if node.is_leaf:
leaf_nodes.append(node)
elif node.path not in found_branch_nodes: #TODO need to filter branch nodes based on requested interval... how?!?!?
yield node
found_branch_nodes.add(node.path)
if not leaf_nodes:
continue
# Calculate best minimal node set
minimal_node_set = set()
covered_intervals = IntervalSet([])
# If the query doesn't fall entirely within the FIND_TOLERANCE window
# we disregard the window. This prevents unnecessary remote fetches
# caused when carbon's cache skews node.intervals, giving the appearance
# remote systems have data we don't have locally, which we probably do.
now = int( time.time() )
tolerance_window = now - settings.FIND_TOLERANCE
disregard_tolerance_window = query.interval.start < tolerance_window
prior_to_window = Interval( float('-inf'), tolerance_window )
def measure_of_added_coverage(node, drop_window=disregard_tolerance_window):
relevant_intervals = node.intervals.intersect_interval(query.interval)
if drop_window:
relevant_intervals = relevant_intervals.intersect_interval(prior_to_window)
return covered_intervals.union(relevant_intervals).size - covered_intervals.size
nodes_remaining = list(leaf_nodes)
# Prefer local nodes first (and do *not* drop the tolerance window)
for node in leaf_nodes:
if node.local and measure_of_added_coverage(node, False) > 0:
nodes_remaining.remove(node)
minimal_node_set.add(node)
covered_intervals = covered_intervals.union(node.intervals)
while nodes_remaining:
node_coverages = [ (measure_of_added_coverage(n), n) for n in nodes_remaining ]
best_coverage, best_node = max(node_coverages)
if best_coverage == 0:
break
nodes_remaining.remove(best_node)
minimal_node_set.add(best_node)
covered_intervals = covered_intervals.union(best_node.intervals)
# Sometimes the requested interval falls within the caching window.
# We include the most likely node if the gap is within tolerance.
if not minimal_node_set:
def distance_to_requested_interval(node):
latest = sorted(node.intervals, key=lambda i: i.end)[-1]
distance = query.interval.start - latest.end
return distance if distance >= 0 else float('inf')
best_candidate = min(leaf_nodes, key=distance_to_requested_interval)
if distance_to_requested_interval(best_candidate) <= settings.FIND_TOLERANCE:
minimal_node_set.add(best_candidate)
if len(minimal_node_set) == 1:
yield minimal_node_set.pop()
elif len(minimal_node_set) > 1:
reader = MultiReader(minimal_node_set)
yield LeafNode(path, reader)
class FindQuery:
def __init__(self, pattern, startTime, endTime):
self.pattern = pattern
self.startTime = startTime
self.endTime = endTime
self.isExact = is_pattern(pattern)
self.interval = Interval(float('-inf') if startTime is None else startTime,
float('inf') if endTime is None else endTime)
def __repr__(self):
if self.startTime is None:
startString = '*'
else:
startString = time.ctime(self.startTime)
if self.endTime is None:
endString = '*'
else:
endString = time.ctime(self.endTime)
return '<FindQuery: %s from %s until %s>' % (self.pattern, startString, endString)
STORE = Store()
| apache-2.0 | -6,862,662,395,228,295,000 | 7,430,949,864,545,718,000 | 34.066667 | 123 | 0.666609 | false |
zacharyvoase/urlobject | urlobject/path.py | 4 | 4746 | # -*- coding: utf-8 -*-
import posixpath
import urllib
from .compat import urlparse
from .six import text_type, u
class Root(object):
"""A descriptor which always returns the root path."""
def __get__(self, instance, cls):
return cls('/')
class URLPath(text_type):
root = Root()
def __repr__(self):
return u('URLPath(%r)') % (text_type(self),)
@classmethod
def join_segments(cls, segments, absolute=True):
"""Create a :class:`URLPath` from an iterable of segments."""
if absolute:
path = cls('/')
else:
path = cls('')
for segment in segments:
path = path.add_segment(segment)
return path
@property
def segments(self):
"""
Split this path into (decoded) segments.
>>> URLPath('/a/b/c').segments
('a', 'b', 'c')
Non-leaf nodes will have a trailing empty string, and percent encodes
will be decoded:
>>> URLPath('/a%20b/c%20d/').segments
('a b', 'c d', '')
"""
segments = tuple(map(path_decode, self.split('/')))
if segments[0] == '':
return segments[1:]
return segments
@property
def parent(self):
"""
The parent of this node.
>>> URLPath('/a/b/c').parent
URLPath('/a/b/')
>>> URLPath('/foo/bar/').parent
URLPath('/foo/')
"""
if self.is_leaf:
return self.relative('.')
return self.relative('..')
@property
def is_leaf(self):
"""
Is this path a leaf node?
>>> URLPath('/a/b/c').is_leaf
True
>>> URLPath('/a/b/').is_leaf
False
"""
return self and self.segments[-1] != '' or False
@property
def is_relative(self):
"""
Is this path relative?
>>> URLPath('a/b/c').is_relative
True
>>> URLPath('/a/b/c').is_relative
False
"""
return self[0] != '/'
@property
def is_absolute(self):
"""
Is this path absolute?
>>> URLPath('a/b/c').is_absolute
False
>>> URLPath('/a/b/c').is_absolute
True
"""
return self[0] == '/'
def relative(self, rel_path):
"""
Resolve a relative path against this one.
>>> URLPath('/a/b/c').relative('.')
URLPath('/a/b/')
>>> URLPath('/a/b/c').relative('d')
URLPath('/a/b/d')
>>> URLPath('/a/b/c').relative('../d')
URLPath('/a/d')
"""
return type(self)(urlparse.urljoin(self, rel_path))
def add_segment(self, segment):
"""
Add a segment to this path.
>>> URLPath('/a/b/').add_segment('c')
URLPath('/a/b/c')
Non-ASCII and reserved characters (including slashes) will be encoded:
>>> URLPath('/a/b/').add_segment('dé/f')
URLPath('/a/b/d%C3%A9%2Ff')
"""
return type(self)(posixpath.join(self, path_encode(segment)))
def add(self, path):
"""
Add a partial path to this one.
The only difference between this and :meth:`add_segment` is that slash
characters will not be encoded, making it suitable for adding more than
one path segment at a time:
>>> URLPath('/a/b/').add('dé/f/g')
URLPath('/a/b/d%C3%A9/f/g')
"""
return type(self)(posixpath.join(self, path_encode(path, safe='/')))
def _path_encode_py2(s, safe=''):
"""Quote unicode or str using path rules."""
if isinstance(s, unicode):
s = s.encode('utf-8')
if isinstance(safe, unicode):
safe = safe.encode('utf-8')
return urllib.quote(s, safe=safe).decode('utf-8')
def _path_encode_py3(s, safe=''):
"""Quote str or bytes using path rules."""
# s can be bytes or unicode, urllib.parse.quote() assumes
# utf-8 if encoding is necessary.
return urlparse.quote(s, safe=safe)
def _path_decode_py2(s):
"""Unquote unicode or str using path rules."""
if isinstance(s, unicode):
s = s.encode('utf-8')
return urllib.unquote(s).decode('utf-8')
def _path_decode_py3(s):
"""Unquote str or bytes using path rules."""
if isinstance(s, bytes):
s = s.decode('utf-8')
return urlparse.unquote(s)
if hasattr(urllib, 'quote'):
path_encode = _path_encode_py2
path_decode = _path_decode_py2
del _path_encode_py3
del _path_decode_py3
else:
path_encode = _path_encode_py3
path_decode = _path_decode_py3
del _path_encode_py2
del _path_decode_py2
| unlicense | -4,280,237,424,016,477,700 | -4,644,964,054,278,973,000 | 24.643243 | 79 | 0.521079 | false |
IssamLaradji/scikit-learn | sklearn/linear_model/ransac.py | 16 | 13870 | # coding: utf-8
# Author: Johannes Schönberger
#
# License: BSD 3 clause
import numpy as np
from ..base import BaseEstimator, MetaEstimatorMixin, RegressorMixin, clone
from ..utils import check_random_state, check_array, check_consistent_length
from ..utils.random import sample_without_replacement
from .base import LinearRegression
_EPSILON = np.spacing(1)
def _dynamic_max_trials(n_inliers, n_samples, min_samples, probability):
"""Determine number trials such that at least one outlier-free subset is
sampled for the given inlier/outlier ratio.
Parameters
----------
n_inliers : int
Number of inliers in the data.
n_samples : int
Total number of samples in the data.
min_samples : int
Minimum number of samples chosen randomly from original data.
probability : float
Probability (confidence) that one outlier-free sample is generated.
Returns
-------
trials : int
Number of trials.
"""
inlier_ratio = n_inliers / float(n_samples)
nom = max(_EPSILON, 1 - probability)
denom = max(_EPSILON, 1 - inlier_ratio ** min_samples)
if nom == 1:
return 0
if denom == 1:
return float('inf')
return abs(float(np.ceil(np.log(nom) / np.log(denom))))
class RANSACRegressor(BaseEstimator, MetaEstimatorMixin, RegressorMixin):
"""RANSAC (RANdom SAmple Consensus) algorithm.
RANSAC is an iterative algorithm for the robust estimation of parameters
from a subset of inliers from the complete data set. More information can
be found in the general documentation of linear models.
A detailed description of the algorithm can be found in the documentation
of the ``linear_model`` sub-package.
Parameters
----------
base_estimator : object, optional
Base estimator object which implements the following methods:
* `fit(X, y)`: Fit model to given training data and target values.
* `score(X, y)`: Returns the mean accuracy on the given test data,
which is used for the stop criterion defined by `stop_score`.
Additionally, the score is used to decide which of two equally
large consensus sets is chosen as the better one.
If `base_estimator` is None, then
``base_estimator=sklearn.linear_model.LinearRegression()`` is used for
target values of dtype float.
Note that the current implementation only supports regression
estimators.
min_samples : int (>= 1) or float ([0, 1]), optional
Minimum number of samples chosen randomly from original data. Treated
as an absolute number of samples for `min_samples >= 1`, treated as a
relative number `ceil(min_samples * X.shape[0]`) for
`min_samples < 1`. This is typically chosen as the minimal number of
samples necessary to estimate the given `base_estimator`. By default a
``sklearn.linear_model.LinearRegression()`` estimator is assumed and
`min_samples` is chosen as ``X.shape[1] + 1``.
residual_threshold : float, optional
Maximum residual for a data sample to be classified as an inlier.
By default the threshold is chosen as the MAD (median absolute
deviation) of the target values `y`.
is_data_valid : callable, optional
This function is called with the randomly selected data before the
model is fitted to it: `is_data_valid(X, y)`. If its return value is
False the current randomly chosen sub-sample is skipped.
is_model_valid : callable, optional
This function is called with the estimated model and the randomly
selected data: `is_model_valid(model, X, y)`. If its return value is
False the current randomly chosen sub-sample is skipped.
Rejecting samples with this function is computationally costlier than
with `is_data_valid`. `is_model_valid` should therefore only be used if
the estimated model is needed for making the rejection decision.
max_trials : int, optional
Maximum number of iterations for random sample selection.
stop_n_inliers : int, optional
Stop iteration if at least this number of inliers are found.
stop_score : float, optional
Stop iteration if score is greater equal than this threshold.
stop_probability : float in range [0, 1], optional
RANSAC iteration stops if at least one outlier-free set of the training
data is sampled in RANSAC. This requires to generate at least N
samples (iterations)::
N >= log(1 - probability) / log(1 - e**m)
where the probability (confidence) is typically set to high value such
as 0.99 (the default) and e is the current fraction of inliers w.r.t.
the total number of samples.
residual_metric : callable, optional
Metric to reduce the dimensionality of the residuals to 1 for
multi-dimensional target values ``y.shape[1] > 1``. By default the sum
of absolute differences is used::
lambda dy: np.sum(np.abs(dy), axis=1)
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
Attributes
----------
estimator_ : object
Best fitted model (copy of the `base_estimator` object).
n_trials_ : int
Number of random selection trials until one of the stop criteria is
met. It is always ``<= max_trials``.
inlier_mask_ : bool array of shape [n_samples]
Boolean mask of inliers classified as ``True``.
References
----------
.. [1] http://en.wikipedia.org/wiki/RANSAC
.. [2] http://www.cs.columbia.edu/~belhumeur/courses/compPhoto/ransac.pdf
.. [3] http://www.bmva.org/bmvc/2009/Papers/Paper355/Paper355.pdf
"""
def __init__(self, base_estimator=None, min_samples=None,
residual_threshold=None, is_data_valid=None,
is_model_valid=None, max_trials=100,
stop_n_inliers=np.inf, stop_score=np.inf,
stop_probability=0.99, residual_metric=None,
random_state=None):
self.base_estimator = base_estimator
self.min_samples = min_samples
self.residual_threshold = residual_threshold
self.is_data_valid = is_data_valid
self.is_model_valid = is_model_valid
self.max_trials = max_trials
self.stop_n_inliers = stop_n_inliers
self.stop_score = stop_score
self.stop_probability = stop_probability
self.residual_metric = residual_metric
self.random_state = random_state
def fit(self, X, y):
"""Fit estimator using RANSAC algorithm.
Parameters
----------
X : array-like or sparse matrix, shape [n_samples, n_features]
Training data.
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values.
Raises
------
ValueError
If no valid consensus set could be found. This occurs if
`is_data_valid` and `is_model_valid` return False for all
`max_trials` randomly chosen sub-samples.
"""
X = check_array(X, accept_sparse='csr')
y = check_array(y, ensure_2d=False)
if y.ndim == 1:
y = y.reshape(-1, 1)
check_consistent_length(X, y)
if self.base_estimator is not None:
base_estimator = clone(self.base_estimator)
else:
base_estimator = LinearRegression()
if self.min_samples is None:
# assume linear model by default
min_samples = X.shape[1] + 1
elif 0 < self.min_samples < 1:
min_samples = np.ceil(self.min_samples * X.shape[0])
elif self.min_samples >= 1:
if self.min_samples % 1 != 0:
raise ValueError("Absolute number of samples must be an "
"integer value.")
min_samples = self.min_samples
else:
raise ValueError("Value for `min_samples` must be scalar and "
"positive.")
if min_samples > X.shape[0]:
raise ValueError("`min_samples` may not be larger than number "
"of samples ``X.shape[0]``.")
if self.stop_probability < 0 or self.stop_probability > 1:
raise ValueError("`stop_probability` must be in range [0, 1].")
if self.residual_threshold is None:
# MAD (median absolute deviation)
residual_threshold = np.median(np.abs(y - np.median(y)))
else:
residual_threshold = self.residual_threshold
if self.residual_metric is None:
residual_metric = lambda dy: np.sum(np.abs(dy), axis=1)
else:
residual_metric = self.residual_metric
random_state = check_random_state(self.random_state)
try: # Not all estimator accept a random_state
base_estimator.set_params(random_state=random_state)
except ValueError:
pass
n_inliers_best = 0
score_best = np.inf
inlier_mask_best = None
X_inlier_best = None
y_inlier_best = None
# number of data samples
n_samples = X.shape[0]
sample_idxs = np.arange(n_samples)
n_samples, _ = X.shape
for self.n_trials_ in range(1, self.max_trials + 1):
# choose random sample set
subset_idxs = sample_without_replacement(n_samples, min_samples,
random_state=random_state)
X_subset = X[subset_idxs]
y_subset = y[subset_idxs]
# check if random sample set is valid
if (self.is_data_valid is not None
and not self.is_data_valid(X_subset, y_subset)):
continue
# fit model for current random sample set
base_estimator.fit(X_subset, y_subset)
# check if estimated model is valid
if (self.is_model_valid is not None and not
self.is_model_valid(base_estimator, X_subset, y_subset)):
continue
# residuals of all data for current random sample model
y_pred = base_estimator.predict(X)
if y_pred.ndim == 1:
y_pred = y_pred[:, None]
residuals_subset = residual_metric(y_pred - y)
# classify data into inliers and outliers
inlier_mask_subset = residuals_subset < residual_threshold
n_inliers_subset = np.sum(inlier_mask_subset)
# less inliers -> skip current random sample
if n_inliers_subset < n_inliers_best:
continue
# extract inlier data set
inlier_idxs_subset = sample_idxs[inlier_mask_subset]
X_inlier_subset = X[inlier_idxs_subset]
y_inlier_subset = y[inlier_idxs_subset]
# score of inlier data set
score_subset = base_estimator.score(X_inlier_subset,
y_inlier_subset)
# same number of inliers but worse score -> skip current random
# sample
if (n_inliers_subset == n_inliers_best
and score_subset < score_best):
continue
# save current random sample as best sample
n_inliers_best = n_inliers_subset
score_best = score_subset
inlier_mask_best = inlier_mask_subset
X_inlier_best = X_inlier_subset
y_inlier_best = y_inlier_subset
# break if sufficient number of inliers or score is reached
if (n_inliers_best >= self.stop_n_inliers
or score_best >= self.stop_score
or self.n_trials_
>= _dynamic_max_trials(n_inliers_best, n_samples,
min_samples,
self.stop_probability)):
break
# if none of the iterations met the required criteria
if inlier_mask_best is None:
raise ValueError(
"RANSAC could not find valid consensus set, because"
" either the `residual_threshold` rejected all the samples or"
" `is_data_valid` and `is_model_valid` returned False for all"
" `max_trials` randomly ""chosen sub-samples. Consider "
"relaxing the ""constraints.")
# estimate final model using all inliers
base_estimator.fit(X_inlier_best, y_inlier_best)
self.estimator_ = base_estimator
self.inlier_mask_ = inlier_mask_best
return self
def predict(self, X):
"""Predict using the estimated model.
This is a wrapper for `estimator_.predict(X)`.
Parameters
----------
X : numpy array of shape [n_samples, n_features]
Returns
-------
y : array, shape = [n_samples] or [n_samples, n_targets]
Returns predicted values.
"""
return self.estimator_.predict(X)
def score(self, X, y):
"""Returns the score of the prediction.
This is a wrapper for `estimator_.score(X, y)`.
Parameters
----------
X : numpy array or sparse matrix of shape [n_samples, n_features]
Training data.
y : array, shape = [n_samples] or [n_samples, n_targets]
Target values.
Returns
-------
z : float
Score of the prediction.
"""
return self.estimator_.score(X, y)
| bsd-3-clause | 8,410,626,056,720,261,000 | -1,977,729,503,757,674,800 | 36.082888 | 79 | 0.598313 | false |