repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
salaria/odoo | addons/calendar/calendar.py | 81 | 86439 | # -*- coding: utf-8 -*-
import pytz
import re
import time
import openerp
import openerp.service.report
import uuid
import collections
import babel.dates
from werkzeug.exceptions import BadRequest
from datetime import datetime, timedelta
from dateutil import parser
from dateutil import rrule
from dateutil.relativedelta import relativedelta
from openerp import api
from openerp import tools, SUPERUSER_ID
from openerp.osv import fields, osv
from openerp.tools import DEFAULT_SERVER_DATE_FORMAT, DEFAULT_SERVER_DATETIME_FORMAT
from openerp.tools.translate import _
from openerp.http import request
from operator import itemgetter
import logging
_logger = logging.getLogger(__name__)
def calendar_id2real_id(calendar_id=None, with_date=False):
"""
Convert a "virtual/recurring event id" (type string) into a real event id (type int).
E.g. virtual/recurring event id is 4-20091201100000, so it will return 4.
@param calendar_id: id of calendar
@param with_date: if a value is passed to this param it will return dates based on value of withdate + calendar_id
@return: real event id
"""
if calendar_id and isinstance(calendar_id, (basestring)):
res = calendar_id.split('-')
if len(res) >= 2:
real_id = res[0]
if with_date:
real_date = time.strftime(DEFAULT_SERVER_DATETIME_FORMAT, time.strptime(res[1], "%Y%m%d%H%M%S"))
start = datetime.strptime(real_date, DEFAULT_SERVER_DATETIME_FORMAT)
end = start + timedelta(hours=with_date)
return (int(real_id), real_date, end.strftime(DEFAULT_SERVER_DATETIME_FORMAT))
return int(real_id)
return calendar_id and int(calendar_id) or calendar_id
def get_real_ids(ids):
if isinstance(ids, (basestring, int, long)):
return calendar_id2real_id(ids)
if isinstance(ids, (list, tuple)):
return [calendar_id2real_id(id) for id in ids]
class calendar_attendee(osv.Model):
"""
Calendar Attendee Information
"""
_name = 'calendar.attendee'
_rec_name = 'cn'
_description = 'Attendee information'
def _compute_data(self, cr, uid, ids, name, arg, context=None):
"""
Compute data on function fields for attendee values.
@param ids: list of calendar attendee's IDs
@param name: name of field
@return: dictionary of form {id: {'field Name': value'}}
"""
name = name[0]
result = {}
for attdata in self.browse(cr, uid, ids, context=context):
id = attdata.id
result[id] = {}
if name == 'cn':
if attdata.partner_id:
result[id][name] = attdata.partner_id.name or False
else:
result[id][name] = attdata.email or ''
return result
STATE_SELECTION = [
('needsAction', 'Needs Action'),
('tentative', 'Uncertain'),
('declined', 'Declined'),
('accepted', 'Accepted'),
]
_columns = {
'state': fields.selection(STATE_SELECTION, 'Status', readonly=True, help="Status of the attendee's participation"),
'cn': fields.function(_compute_data, string='Common name', type="char", multi='cn', store=True),
'partner_id': fields.many2one('res.partner', 'Contact', readonly="True"),
'email': fields.char('Email', help="Email of Invited Person"),
'availability': fields.selection([('free', 'Free'), ('busy', 'Busy')], 'Free/Busy', readonly="True"),
'access_token': fields.char('Invitation Token'),
'event_id': fields.many2one('calendar.event', 'Meeting linked', ondelete='cascade'),
}
_defaults = {
'state': 'needsAction',
}
def copy(self, cr, uid, id, default=None, context=None):
raise osv.except_osv(_('Warning!'), _('You cannot duplicate a calendar attendee.'))
def onchange_partner_id(self, cr, uid, ids, partner_id, context=None):
"""
Make entry on email and availability on change of partner_id field.
@param partner_id: changed value of partner id
"""
if not partner_id:
return {'value': {'email': ''}}
partner = self.pool['res.partner'].browse(cr, uid, partner_id, context=context)
return {'value': {'email': partner.email}}
def get_ics_file(self, cr, uid, event_obj, context=None):
"""
Returns iCalendar file for the event invitation.
@param event_obj: event object (browse record)
@return: .ics file content
"""
res = None
def ics_datetime(idate, allday=False):
if idate:
if allday:
return openerp.fields.Date.from_string(idate)
else:
return openerp.fields.Datetime.from_string(idate).replace(tzinfo=pytz.timezone('UTC'))
return False
try:
# FIXME: why isn't this in CalDAV?
import vobject
except ImportError:
return res
cal = vobject.iCalendar()
event = cal.add('vevent')
if not event_obj.start or not event_obj.stop:
raise osv.except_osv(_('Warning!'), _("First you have to specify the date of the invitation."))
event.add('created').value = ics_datetime(time.strftime(DEFAULT_SERVER_DATETIME_FORMAT))
event.add('dtstart').value = ics_datetime(event_obj.start, event_obj.allday)
event.add('dtend').value = ics_datetime(event_obj.stop, event_obj.allday)
event.add('summary').value = event_obj.name
if event_obj.description:
event.add('description').value = event_obj.description
if event_obj.location:
event.add('location').value = event_obj.location
if event_obj.rrule:
event.add('rrule').value = event_obj.rrule
if event_obj.alarm_ids:
for alarm in event_obj.alarm_ids:
valarm = event.add('valarm')
interval = alarm.interval
duration = alarm.duration
trigger = valarm.add('TRIGGER')
trigger.params['related'] = ["START"]
if interval == 'days':
delta = timedelta(days=duration)
elif interval == 'hours':
delta = timedelta(hours=duration)
elif interval == 'minutes':
delta = timedelta(minutes=duration)
trigger.value = delta
valarm.add('DESCRIPTION').value = alarm.name or 'Odoo'
for attendee in event_obj.attendee_ids:
attendee_add = event.add('attendee')
attendee_add.value = 'MAILTO:' + (attendee.email or '')
res = cal.serialize()
return res
def _send_mail_to_attendees(self, cr, uid, ids, email_from=tools.config.get('email_from', False),
template_xmlid='calendar_template_meeting_invitation', force=False, context=None):
"""
Send mail for event invitation to event attendees.
@param email_from: email address for user sending the mail
@param force: If set to True, email will be sent to user himself. Usefull for example for alert, ...
"""
res = False
if self.pool['ir.config_parameter'].get_param(cr, uid, 'calendar.block_mail', default=False) or context.get("no_mail_to_attendees"):
return res
mail_ids = []
data_pool = self.pool['ir.model.data']
mailmess_pool = self.pool['mail.message']
mail_pool = self.pool['mail.mail']
template_pool = self.pool['email.template']
local_context = context.copy()
color = {
'needsAction': 'grey',
'accepted': 'green',
'tentative': '#FFFF00',
'declined': 'red'
}
if not isinstance(ids, (tuple, list)):
ids = [ids]
dummy, template_id = data_pool.get_object_reference(cr, uid, 'calendar', template_xmlid)
dummy, act_id = data_pool.get_object_reference(cr, uid, 'calendar', "view_calendar_event_calendar")
local_context.update({
'color': color,
'action_id': self.pool['ir.actions.act_window'].search(cr, uid, [('view_id', '=', act_id)], context=context)[0],
'dbname': cr.dbname,
'base_url': self.pool['ir.config_parameter'].get_param(cr, uid, 'web.base.url', default='http://localhost:8069', context=context)
})
for attendee in self.browse(cr, uid, ids, context=context):
if attendee.email and email_from and (attendee.email != email_from or force):
ics_file = self.get_ics_file(cr, uid, attendee.event_id, context=context)
mail_id = template_pool.send_mail(cr, uid, template_id, attendee.id, context=local_context)
vals = {}
if ics_file:
vals['attachment_ids'] = [(0, 0, {'name': 'invitation.ics',
'datas_fname': 'invitation.ics',
'datas': str(ics_file).encode('base64')})]
vals['model'] = None # We don't want to have the mail in the tchatter while in queue!
the_mailmess = mail_pool.browse(cr, uid, mail_id, context=context).mail_message_id
mailmess_pool.write(cr, uid, [the_mailmess.id], vals, context=context)
mail_ids.append(mail_id)
if mail_ids:
res = mail_pool.send(cr, uid, mail_ids, context=context)
return res
def onchange_user_id(self, cr, uid, ids, user_id, *args, **argv):
"""
Make entry on email and availability on change of user_id field.
@param ids: list of attendee's IDs
@param user_id: changed value of User id
@return: dictionary of values which put value in email and availability fields
"""
if not user_id:
return {'value': {'email': ''}}
user = self.pool['res.users'].browse(cr, uid, user_id, *args)
return {'value': {'email': user.email, 'availability': user.availability}}
def do_tentative(self, cr, uid, ids, context=None, *args):
"""
Makes event invitation as Tentative.
@param ids: list of attendee's IDs
"""
return self.write(cr, uid, ids, {'state': 'tentative'}, context)
def do_accept(self, cr, uid, ids, context=None, *args):
"""
Marks event invitation as Accepted.
@param ids: list of attendee's IDs
"""
if context is None:
context = {}
meeting_obj = self.pool['calendar.event']
res = self.write(cr, uid, ids, {'state': 'accepted'}, context)
for attendee in self.browse(cr, uid, ids, context=context):
meeting_obj.message_post(cr, uid, attendee.event_id.id, body=_(("%s has accepted invitation") % (attendee.cn)),
subtype="calendar.subtype_invitation", context=context)
return res
def do_decline(self, cr, uid, ids, context=None, *args):
"""
Marks event invitation as Declined.
@param ids: list of calendar attendee's IDs
"""
if context is None:
context = {}
meeting_obj = self.pool['calendar.event']
res = self.write(cr, uid, ids, {'state': 'declined'}, context)
for attendee in self.browse(cr, uid, ids, context=context):
meeting_obj.message_post(cr, uid, attendee.event_id.id, body=_(("%s has declined invitation") % (attendee.cn)), subtype="calendar.subtype_invitation", context=context)
return res
def create(self, cr, uid, vals, context=None):
if context is None:
context = {}
if not vals.get("email") and vals.get("cn"):
cnval = vals.get("cn").split(':')
email = filter(lambda x: x.__contains__('@'), cnval)
vals['email'] = email and email[0] or ''
vals['cn'] = vals.get("cn")
res = super(calendar_attendee, self).create(cr, uid, vals, context=context)
return res
class res_partner(osv.Model):
_inherit = 'res.partner'
_columns = {
'calendar_last_notif_ack': fields.datetime('Last notification marked as read from base Calendar'),
}
def get_attendee_detail(self, cr, uid, ids, meeting_id, context=None):
"""
Return a list of tuple (id, name, status)
Used by web_calendar.js : Many2ManyAttendee
"""
datas = []
meeting = None
if meeting_id:
meeting = self.pool['calendar.event'].browse(cr, uid, get_real_ids(meeting_id), context=context)
for partner in self.browse(cr, uid, ids, context=context):
data = self.name_get(cr, uid, [partner.id], context)[0]
if meeting:
for attendee in meeting.attendee_ids:
if attendee.partner_id.id == partner.id:
data = (data[0], data[1], attendee.state)
datas.append(data)
return datas
def _set_calendar_last_notif_ack(self, cr, uid, context=None):
partner = self.pool['res.users'].browse(cr, uid, uid, context=context).partner_id
self.write(cr, uid, partner.id, {'calendar_last_notif_ack': datetime.now()}, context=context)
return
class calendar_alarm_manager(osv.AbstractModel):
_name = 'calendar.alarm_manager'
def get_next_potential_limit_alarm(self, cr, uid, seconds, notif=True, mail=True, partner_id=None, context=None):
res = {}
base_request = """
SELECT
cal.id,
cal.start - interval '1' minute * calcul_delta.max_delta AS first_alarm,
CASE
WHEN cal.recurrency THEN cal.final_date - interval '1' minute * calcul_delta.min_delta
ELSE cal.stop - interval '1' minute * calcul_delta.min_delta
END as last_alarm,
cal.start as first_event_date,
CASE
WHEN cal.recurrency THEN cal.final_date
ELSE cal.stop
END as last_event_date,
calcul_delta.min_delta,
calcul_delta.max_delta,
cal.rrule AS rule
FROM
calendar_event AS cal
RIGHT JOIN
(
SELECT
rel.calendar_event_id, max(alarm.duration_minutes) AS max_delta,min(alarm.duration_minutes) AS min_delta
FROM
calendar_alarm_calendar_event_rel AS rel
LEFT JOIN calendar_alarm AS alarm ON alarm.id = rel.calendar_alarm_id
WHERE alarm.type in %s
GROUP BY rel.calendar_event_id
) AS calcul_delta ON calcul_delta.calendar_event_id = cal.id
"""
filter_user = """
RIGHT JOIN calendar_event_res_partner_rel AS part_rel ON part_rel.calendar_event_id = cal.id
AND part_rel.res_partner_id = %s
"""
#Add filter on type
type_to_read = ()
if notif:
type_to_read += ('notification',)
if mail:
type_to_read += ('email',)
tuple_params = (type_to_read,)
# ADD FILTER ON PARTNER_ID
if partner_id:
base_request += filter_user
tuple_params += (partner_id, )
#Add filter on hours
tuple_params += (seconds,)
cr.execute("""SELECT *
FROM ( %s WHERE cal.active = True ) AS ALL_EVENTS
WHERE ALL_EVENTS.first_alarm < (now() at time zone 'utc' + interval '%%s' second )
AND ALL_EVENTS.last_event_date > (now() at time zone 'utc')
""" % base_request, tuple_params)
for event_id, first_alarm, last_alarm, first_meeting, last_meeting, min_duration, max_duration, rule in cr.fetchall():
res[event_id] = {
'event_id': event_id,
'first_alarm': first_alarm,
'last_alarm': last_alarm,
'first_meeting': first_meeting,
'last_meeting': last_meeting,
'min_duration': min_duration,
'max_duration': max_duration,
'rrule': rule
}
return res
def do_check_alarm_for_one_date(self, cr, uid, one_date, event, event_maxdelta, in_the_next_X_seconds, after=False, notif=True, mail=True, missing=False, context=None):
# one_date: date of the event to check (not the same that in the event browse if recurrent)
# event: Event browse record
# event_maxdelta: biggest duration from alarms for this event
# in_the_next_X_seconds: looking in the future (in seconds)
# after: if not False: will return alert if after this date (date as string - todo: change in master)
# missing: if not False: will return alert even if we are too late
# notif: Looking for type notification
# mail: looking for type email
res = []
# TODO: replace notif and email in master by alarm_type + remove event_maxdelta and if using it
alarm_type = []
if notif:
alarm_type.append('notification')
if mail:
alarm_type.append('email')
if one_date - timedelta(minutes=(missing and 0 or event_maxdelta)) < datetime.now() + timedelta(seconds=in_the_next_X_seconds): # if an alarm is possible for this date
for alarm in event.alarm_ids:
if alarm.type in alarm_type and \
one_date - timedelta(minutes=(missing and 0 or alarm.duration_minutes)) < datetime.now() + timedelta(seconds=in_the_next_X_seconds) and \
(not after or one_date - timedelta(minutes=alarm.duration_minutes) > openerp.fields.Datetime.from_string(after)):
alert = {
'alarm_id': alarm.id,
'event_id': event.id,
'notify_at': one_date - timedelta(minutes=alarm.duration_minutes),
}
res.append(alert)
return res
def get_next_mail(self, cr, uid, context=None):
now = openerp.fields.Datetime.to_string(datetime.now())
icp = self.pool['ir.config_parameter']
last_notif_mail = icp.get_param(cr, SUPERUSER_ID, 'calendar.last_notif_mail', default=False) or now
try:
cron = self.pool['ir.model.data'].get_object(cr, uid, 'calendar', 'ir_cron_scheduler_alarm', context=context)
except ValueError:
_logger.error("Cron for " + self._name + " can not be identified !")
return False
interval_to_second = {
"weeks": 7 * 24 * 60 * 60,
"days": 24 * 60 * 60,
"hours": 60 * 60,
"minutes": 60,
"seconds": 1
}
if cron.interval_type not in interval_to_second.keys():
_logger.error("Cron delay can not be computed !")
return False
cron_interval = cron.interval_number * interval_to_second[cron.interval_type]
all_events = self.get_next_potential_limit_alarm(cr, uid, cron_interval, notif=False, context=context)
for curEvent in self.pool.get('calendar.event').browse(cr, uid, all_events.keys(), context=context):
max_delta = all_events[curEvent.id]['max_duration']
if curEvent.recurrency:
at_least_one = False
last_found = False
for one_date in self.pool.get('calendar.event').get_recurrent_date_by_event(cr, uid, curEvent, context=context):
in_date_format = one_date.replace(tzinfo=None)
last_found = self.do_check_alarm_for_one_date(cr, uid, in_date_format, curEvent, max_delta, 0, after=last_notif_mail, notif=False, missing=True, context=context)
for alert in last_found:
self.do_mail_reminder(cr, uid, alert, context=context)
at_least_one = True # if it's the first alarm for this recurrent event
if at_least_one and not last_found: # if the precedent event had an alarm but not this one, we can stop the search for this event
break
else:
in_date_format = datetime.strptime(curEvent.start, DEFAULT_SERVER_DATETIME_FORMAT)
last_found = self.do_check_alarm_for_one_date(cr, uid, in_date_format, curEvent, max_delta, 0, after=last_notif_mail, notif=False, missing=True, context=context)
for alert in last_found:
self.do_mail_reminder(cr, uid, alert, context=context)
icp.set_param(cr, SUPERUSER_ID, 'calendar.last_notif_mail', now)
def get_next_notif(self, cr, uid, context=None):
ajax_check_every_seconds = 300
partner = self.pool['res.users'].read(cr, SUPERUSER_ID, uid, ['partner_id', 'calendar_last_notif_ack'], context=context)
all_notif = []
if not partner:
return []
all_events = self.get_next_potential_limit_alarm(cr, uid, ajax_check_every_seconds, partner_id=partner['partner_id'][0], mail=False, context=context)
for event in all_events: # .values()
max_delta = all_events[event]['max_duration']
curEvent = self.pool.get('calendar.event').browse(cr, uid, event, context=context)
if curEvent.recurrency:
bFound = False
LastFound = False
for one_date in self.pool.get("calendar.event").get_recurrent_date_by_event(cr, uid, curEvent, context=context):
in_date_format = one_date.replace(tzinfo=None)
LastFound = self.do_check_alarm_for_one_date(cr, uid, in_date_format, curEvent, max_delta, ajax_check_every_seconds, after=partner['calendar_last_notif_ack'], mail=False, context=context)
if LastFound:
for alert in LastFound:
all_notif.append(self.do_notif_reminder(cr, uid, alert, context=context))
if not bFound: # if it's the first alarm for this recurrent event
bFound = True
if bFound and not LastFound: # if the precedent event had alarm but not this one, we can stop the search fot this event
break
else:
in_date_format = datetime.strptime(curEvent.start, DEFAULT_SERVER_DATETIME_FORMAT)
LastFound = self.do_check_alarm_for_one_date(cr, uid, in_date_format, curEvent, max_delta, ajax_check_every_seconds, after=partner['calendar_last_notif_ack'], mail=False, context=context)
if LastFound:
for alert in LastFound:
all_notif.append(self.do_notif_reminder(cr, uid, alert, context=context))
return all_notif
def do_mail_reminder(self, cr, uid, alert, context=None):
if context is None:
context = {}
res = False
event = self.pool['calendar.event'].browse(cr, uid, alert['event_id'], context=context)
alarm = self.pool['calendar.alarm'].browse(cr, uid, alert['alarm_id'], context=context)
if alarm.type == 'email':
res = self.pool['calendar.attendee']._send_mail_to_attendees(
cr,
uid,
[att.id for att in event.attendee_ids],
email_from=event.user_id.partner_id.email,
template_xmlid='calendar_template_meeting_reminder',
force=True,
context=context
)
return res
def do_notif_reminder(self, cr, uid, alert, context=None):
alarm = self.pool['calendar.alarm'].browse(cr, uid, alert['alarm_id'], context=context)
event = self.pool['calendar.event'].browse(cr, uid, alert['event_id'], context=context)
if alarm.type == 'notification':
message = event.display_time
delta = alert['notify_at'] - datetime.now()
delta = delta.seconds + delta.days * 3600 * 24
return {
'event_id': event.id,
'title': event.name,
'message': message,
'timer': delta,
'notify_at': alert['notify_at'].strftime(DEFAULT_SERVER_DATETIME_FORMAT),
}
class calendar_alarm(osv.Model):
_name = 'calendar.alarm'
_description = 'Event alarm'
def _get_duration(self, cr, uid, ids, field_name, arg, context=None):
res = {}
for alarm in self.browse(cr, uid, ids, context=context):
if alarm.interval == "minutes":
res[alarm.id] = alarm.duration
elif alarm.interval == "hours":
res[alarm.id] = alarm.duration * 60
elif alarm.interval == "days":
res[alarm.id] = alarm.duration * 60 * 24
else:
res[alarm.id] = 0
return res
_columns = {
'name': fields.char('Name', required=True),
'type': fields.selection([('notification', 'Notification'), ('email', 'Email')], 'Type', required=True),
'duration': fields.integer('Amount', required=True),
'interval': fields.selection([('minutes', 'Minutes'), ('hours', 'Hours'), ('days', 'Days')], 'Unit', required=True),
'duration_minutes': fields.function(_get_duration, type='integer', string='Duration in minutes', store=True),
}
_defaults = {
'type': 'notification',
'duration': 1,
'interval': 'hours',
}
def _update_cron(self, cr, uid, context=None):
try:
cron = self.pool['ir.model.data'].get_object(
cr, SUPERUSER_ID, 'calendar', 'ir_cron_scheduler_alarm', context=context)
except ValueError:
return False
return cron.toggle(model=self._name, domain=[('type', '=', 'email')])
def create(self, cr, uid, values, context=None):
res = super(calendar_alarm, self).create(cr, uid, values, context=context)
self._update_cron(cr, uid, context=context)
return res
def write(self, cr, uid, ids, values, context=None):
res = super(calendar_alarm, self).write(cr, uid, ids, values, context=context)
self._update_cron(cr, uid, context=context)
return res
def unlink(self, cr, uid, ids, context=None):
res = super(calendar_alarm, self).unlink(cr, uid, ids, context=context)
self._update_cron(cr, uid, context=context)
return res
class ir_values(osv.Model):
_inherit = 'ir.values'
def set(self, cr, uid, key, key2, name, models, value, replace=True, isobject=False, meta=False, preserve_user=False, company=False):
new_model = []
for data in models:
if type(data) in (list, tuple):
new_model.append((data[0], calendar_id2real_id(data[1])))
else:
new_model.append(data)
return super(ir_values, self).set(cr, uid, key, key2, name, new_model,
value, replace, isobject, meta, preserve_user, company)
def get(self, cr, uid, key, key2, models, meta=False, context=None, res_id_req=False, without_user=True, key2_req=True):
if context is None:
context = {}
new_model = []
for data in models:
if type(data) in (list, tuple):
new_model.append((data[0], calendar_id2real_id(data[1])))
else:
new_model.append(data)
return super(ir_values, self).get(cr, uid, key, key2, new_model,
meta, context, res_id_req, without_user, key2_req)
class ir_model(osv.Model):
_inherit = 'ir.model'
def read(self, cr, uid, ids, fields=None, context=None, load='_classic_read'):
new_ids = isinstance(ids, (basestring, int, long)) and [ids] or ids
if context is None:
context = {}
data = super(ir_model, self).read(cr, uid, new_ids, fields=fields, context=context, load=load)
if data:
for val in data:
val['id'] = calendar_id2real_id(val['id'])
return isinstance(ids, (basestring, int, long)) and data[0] or data
original_exp_report = openerp.service.report.exp_report
def exp_report(db, uid, object, ids, datas=None, context=None):
"""
Export Report
"""
if object == 'printscreen.list':
original_exp_report(db, uid, object, ids, datas, context)
new_ids = []
for id in ids:
new_ids.append(calendar_id2real_id(id))
if datas.get('id', False):
datas['id'] = calendar_id2real_id(datas['id'])
return original_exp_report(db, uid, object, new_ids, datas, context)
openerp.service.report.exp_report = exp_report
class calendar_event_type(osv.Model):
_name = 'calendar.event.type'
_description = 'Meeting Type'
_columns = {
'name': fields.char('Name', required=True, translate=True),
}
class calendar_event(osv.Model):
""" Model for Calendar Event """
_name = 'calendar.event'
_description = "Event"
_order = "id desc"
_inherit = ["mail.thread", "ir.needaction_mixin"]
def do_run_scheduler(self, cr, uid, id, context=None):
self.pool['calendar.alarm_manager'].get_next_mail(cr, uid, context=context)
def get_recurrent_date_by_event(self, cr, uid, event, context=None):
"""Get recurrent dates based on Rule string and all event where recurrent_id is child
"""
def todate(date):
val = parser.parse(''.join((re.compile('\d')).findall(date)))
## Dates are localized to saved timezone if any, else current timezone.
if not val.tzinfo:
val = pytz.UTC.localize(val)
return val.astimezone(timezone)
if context is None:
context = {}
timezone = pytz.timezone(context.get('tz') or 'UTC')
startdate = pytz.UTC.localize(datetime.strptime(event.start, DEFAULT_SERVER_DATETIME_FORMAT)) # Add "+hh:mm" timezone
if not startdate:
startdate = datetime.now()
## Convert the start date to saved timezone (or context tz) as it'll
## define the correct hour/day asked by the user to repeat for recurrence.
startdate = startdate.astimezone(timezone) # transform "+hh:mm" timezone
rset1 = rrule.rrulestr(str(event.rrule), dtstart=startdate, forceset=True)
ids_depending = self.search(cr, uid, [('recurrent_id', '=', event.id), '|', ('active', '=', False), ('active', '=', True)], context=context)
all_events = self.browse(cr, uid, ids_depending, context=context)
for ev in all_events:
rset1._exdate.append(todate(ev.recurrent_id_date))
return [d.astimezone(pytz.UTC) for d in rset1]
def _get_recurrency_end_date(self, cr, uid, id, context=None):
data = self.read(cr, uid, id, ['final_date', 'recurrency', 'rrule_type', 'count', 'end_type', 'stop'], context=context)
if not data.get('recurrency'):
return False
end_type = data.get('end_type')
final_date = data.get('final_date')
if end_type == 'count' and all(data.get(key) for key in ['count', 'rrule_type', 'stop']):
count = data['count'] + 1
delay, mult = {
'daily': ('days', 1),
'weekly': ('days', 7),
'monthly': ('months', 1),
'yearly': ('years', 1),
}[data['rrule_type']]
deadline = datetime.strptime(data['stop'], tools.DEFAULT_SERVER_DATETIME_FORMAT)
return deadline + relativedelta(**{delay: count * mult})
return final_date
def _find_my_attendee(self, cr, uid, meeting_ids, context=None):
"""
Return the first attendee where the user connected has been invited from all the meeting_ids in parameters
"""
user = self.pool['res.users'].browse(cr, uid, uid, context=context)
for meeting_id in meeting_ids:
for attendee in self.browse(cr, uid, meeting_id, context).attendee_ids:
if user.partner_id.id == attendee.partner_id.id:
return attendee
return False
def get_date_formats(self, cr, uid, context):
lang = context.get("lang")
res_lang = self.pool.get('res.lang')
lang_params = {}
if lang:
ids = res_lang.search(request.cr, uid, [("code", "=", lang)])
if ids:
lang_params = res_lang.read(request.cr, uid, ids[0], ["date_format", "time_format"])
# formats will be used for str{f,p}time() which do not support unicode in Python 2, coerce to str
format_date = lang_params.get("date_format", '%B-%d-%Y').encode('utf-8')
format_time = lang_params.get("time_format", '%I-%M %p').encode('utf-8')
return (format_date, format_time)
def get_display_time_tz(self, cr, uid, ids, tz=False, context=None):
context = dict(context or {})
if tz:
context["tz"] = tz
ev = self.browse(cr, uid, ids, context=context)[0]
return self._get_display_time(cr, uid, ev.start, ev.stop, ev.duration, ev.allday, context=context)
def _get_display_time(self, cr, uid, start, stop, zduration, zallday, context=None):
"""
Return date and time (from to from) based on duration with timezone in string :
eg.
1) if user add duration for 2 hours, return : August-23-2013 at (04-30 To 06-30) (Europe/Brussels)
2) if event all day ,return : AllDay, July-31-2013
"""
context = dict(context or {})
tz = context.get('tz', False)
if not tz: # tz can have a value False, so dont do it in the default value of get !
context['tz'] = self.pool.get('res.users').read(cr, SUPERUSER_ID, uid, ['tz'])['tz']
tz = context['tz']
tz = tools.ustr(tz).encode('utf-8') # make safe for str{p,f}time()
format_date, format_time = self.get_date_formats(cr, uid, context=context)
date = fields.datetime.context_timestamp(cr, uid, datetime.strptime(start, tools.DEFAULT_SERVER_DATETIME_FORMAT), context=context)
date_deadline = fields.datetime.context_timestamp(cr, uid, datetime.strptime(stop, tools.DEFAULT_SERVER_DATETIME_FORMAT), context=context)
event_date = date.strftime(format_date)
display_time = date.strftime(format_time)
if zallday:
time = _("AllDay , %s") % (event_date)
elif zduration < 24:
duration = date + timedelta(hours=zduration)
time = ("%s at (%s To %s) (%s)") % (event_date, display_time, duration.strftime(format_time), tz)
else:
time = ("%s at %s To\n %s at %s (%s)") % (event_date, display_time, date_deadline.strftime(format_date), date_deadline.strftime(format_time), tz)
return time
def _compute(self, cr, uid, ids, fields, arg, context=None):
res = {}
if not isinstance(fields, list):
fields = [fields]
for meeting in self.browse(cr, uid, ids, context=context):
meeting_data = {}
res[meeting.id] = meeting_data
attendee = self._find_my_attendee(cr, uid, [meeting.id], context)
for field in fields:
if field == 'is_attendee':
meeting_data[field] = bool(attendee)
elif field == 'attendee_status':
meeting_data[field] = attendee.state if attendee else 'needsAction'
elif field == 'display_time':
meeting_data[field] = self._get_display_time(cr, uid, meeting.start, meeting.stop, meeting.duration, meeting.allday, context=context)
elif field == "display_start":
meeting_data[field] = meeting.start_date if meeting.allday else meeting.start_datetime
elif field == 'start':
meeting_data[field] = meeting.start_date if meeting.allday else meeting.start_datetime
elif field == 'stop':
meeting_data[field] = meeting.stop_date if meeting.allday else meeting.stop_datetime
return res
def _get_rulestring(self, cr, uid, ids, name, arg, context=None):
"""
Gets Recurrence rule string according to value type RECUR of iCalendar from the values given.
@return: dictionary of rrule value.
"""
result = {}
if not isinstance(ids, list):
ids = [ids]
#read these fields as SUPERUSER because if the record is private a normal search could raise an error
events = self.read(cr, SUPERUSER_ID, ids,
['id', 'byday', 'recurrency', 'final_date', 'rrule_type', 'month_by',
'interval', 'count', 'end_type', 'mo', 'tu', 'we', 'th', 'fr', 'sa',
'su', 'day', 'week_list'], context=context)
for event in events:
if event['recurrency']:
result[event['id']] = self.compute_rule_string(event)
else:
result[event['id']] = ''
return result
# retro compatibility function
def _rrule_write(self, cr, uid, ids, field_name, field_value, args, context=None):
return self._set_rulestring(self, cr, uid, ids, field_name, field_value, args, context=context)
def _set_rulestring(self, cr, uid, ids, field_name, field_value, args, context=None):
if not isinstance(ids, list):
ids = [ids]
data = self._get_empty_rrule_data()
if field_value:
data['recurrency'] = True
for event in self.browse(cr, uid, ids, context=context):
rdate = event.start
update_data = self._parse_rrule(field_value, dict(data), rdate)
data.update(update_data)
self.write(cr, uid, ids, data, context=context)
return True
def _set_date(self, cr, uid, values, id=False, context=None):
if context is None:
context = {}
if values.get('start_datetime') or values.get('start_date') or values.get('start') \
or values.get('stop_datetime') or values.get('stop_date') or values.get('stop'):
allday = values.get("allday", None)
event = self.browse(cr, uid, id, context=context)
if allday is None:
if id:
allday = event.allday
else:
allday = False
_logger.warning("Calendar - All day is not specified, arbitrarily set to False")
#raise osv.except_osv(_('Error!'), ("Need to know if it's an allday or not..."))
key = "date" if allday else "datetime"
notkey = "datetime" if allday else "date"
for fld in ('start', 'stop'):
if values.get('%s_%s' % (fld, key)) or values.get(fld):
values['%s_%s' % (fld, key)] = values.get('%s_%s' % (fld, key)) or values.get(fld)
values['%s_%s' % (fld, notkey)] = None
if fld not in values.keys():
values[fld] = values['%s_%s' % (fld, key)]
diff = False
if allday and (values.get('stop_date') or values.get('start_date')):
stop_date = values.get('stop_date') or event.stop_date
start_date = values.get('start_date') or event.start_date
if stop_date and start_date:
diff = openerp.fields.Date.from_string(stop_date) - openerp.fields.Date.from_string(start_date)
elif values.get('stop_datetime') or values.get('start_datetime'):
stop_datetime = values.get('stop_datetime') or event.stop_datetime
start_datetime = values.get('start_datetime') or event.start_datetime
if stop_datetime and start_datetime:
diff = openerp.fields.Datetime.from_string(stop_datetime) - openerp.fields.Datetime.from_string(start_datetime)
if diff:
duration = float(diff.days) * 24 + (float(diff.seconds) / 3600)
values['duration'] = round(duration, 2)
_track = {
'location': {
'calendar.subtype_invitation': lambda self, cr, uid, obj, ctx=None: True,
},
'start': {
'calendar.subtype_invitation': lambda self, cr, uid, obj, ctx=None: True,
},
}
_columns = {
'id': fields.integer('ID', readonly=True),
'state': fields.selection([('draft', 'Unconfirmed'), ('open', 'Confirmed')], string='Status', readonly=True, track_visibility='onchange'),
'name': fields.char('Meeting Subject', required=True, states={'done': [('readonly', True)]}),
'is_attendee': fields.function(_compute, string='Attendee', type="boolean", multi='attendee'),
'attendee_status': fields.function(_compute, string='Attendee Status', type="selection", selection=calendar_attendee.STATE_SELECTION, multi='attendee'),
'display_time': fields.function(_compute, string='Event Time', type="char", multi='attendee'),
'display_start': fields.function(_compute, string='Date', type="char", multi='attendee', store=True),
'allday': fields.boolean('All Day', states={'done': [('readonly', True)]}),
'start': fields.function(_compute, string='Calculated start', type="datetime", multi='attendee', store=True, required=True),
'stop': fields.function(_compute, string='Calculated stop', type="datetime", multi='attendee', store=True, required=True),
'start_date': fields.date('Start Date', states={'done': [('readonly', True)]}, track_visibility='onchange'),
'start_datetime': fields.datetime('Start DateTime', states={'done': [('readonly', True)]}, track_visibility='onchange'),
'stop_date': fields.date('End Date', states={'done': [('readonly', True)]}, track_visibility='onchange'),
'stop_datetime': fields.datetime('End Datetime', states={'done': [('readonly', True)]}, track_visibility='onchange'), # old date_deadline
'duration': fields.float('Duration', states={'done': [('readonly', True)]}),
'description': fields.text('Description', states={'done': [('readonly', True)]}),
'class': fields.selection([('public', 'Public'), ('private', 'Private'), ('confidential', 'Public for Employees')], 'Privacy', states={'done': [('readonly', True)]}),
'location': fields.char('Location', help="Location of Event", track_visibility='onchange', states={'done': [('readonly', True)]}),
'show_as': fields.selection([('free', 'Free'), ('busy', 'Busy')], 'Show Time as', states={'done': [('readonly', True)]}),
# RECURRENCE FIELD
'rrule': fields.function(_get_rulestring, type='char', fnct_inv=_set_rulestring, store=True, string='Recurrent Rule'),
'rrule_type': fields.selection([('daily', 'Day(s)'), ('weekly', 'Week(s)'), ('monthly', 'Month(s)'), ('yearly', 'Year(s)')], 'Recurrency', states={'done': [('readonly', True)]}, help="Let the event automatically repeat at that interval"),
'recurrency': fields.boolean('Recurrent', help="Recurrent Meeting"),
'recurrent_id': fields.integer('Recurrent ID'),
'recurrent_id_date': fields.datetime('Recurrent ID date'),
'end_type': fields.selection([('count', 'Number of repetitions'), ('end_date', 'End date')], 'Recurrence Termination'),
'interval': fields.integer('Repeat Every', help="Repeat every (Days/Week/Month/Year)"),
'count': fields.integer('Repeat', help="Repeat x times"),
'mo': fields.boolean('Mon'),
'tu': fields.boolean('Tue'),
'we': fields.boolean('Wed'),
'th': fields.boolean('Thu'),
'fr': fields.boolean('Fri'),
'sa': fields.boolean('Sat'),
'su': fields.boolean('Sun'),
'month_by': fields.selection([('date', 'Date of month'), ('day', 'Day of month')], 'Option', oldname='select1'),
'day': fields.integer('Date of month'),
'week_list': fields.selection([('MO', 'Monday'), ('TU', 'Tuesday'), ('WE', 'Wednesday'), ('TH', 'Thursday'), ('FR', 'Friday'), ('SA', 'Saturday'), ('SU', 'Sunday')], 'Weekday'),
'byday': fields.selection([('1', 'First'), ('2', 'Second'), ('3', 'Third'), ('4', 'Fourth'), ('5', 'Fifth'), ('-1', 'Last')], 'By day'),
'final_date': fields.date('Repeat Until'), # The last event of a recurrence
'user_id': fields.many2one('res.users', 'Responsible', states={'done': [('readonly', True)]}),
'color_partner_id': fields.related('user_id', 'partner_id', 'id', type="integer", string="colorize", store=False), # Color of creator
'active': fields.boolean('Active', help="If the active field is set to false, it will allow you to hide the event alarm information without removing it."),
'categ_ids': fields.many2many('calendar.event.type', 'meeting_category_rel', 'event_id', 'type_id', 'Tags'),
'attendee_ids': fields.one2many('calendar.attendee', 'event_id', 'Attendees', ondelete='cascade'),
'partner_ids': fields.many2many('res.partner', 'calendar_event_res_partner_rel', string='Attendees', states={'done': [('readonly', True)]}),
'alarm_ids': fields.many2many('calendar.alarm', 'calendar_alarm_calendar_event_rel', string='Reminders', ondelete="restrict", copy=False),
}
def _get_default_partners(self, cr, uid, ctx=None):
ret = [self.pool['res.users'].browse(cr, uid, uid, context=ctx).partner_id.id]
active_id = ctx.get('active_id')
if ctx.get('active_model') == 'res.partner' and active_id:
if active_id not in ret:
ret.append(active_id)
return ret
_defaults = {
'end_type': 'count',
'count': 1,
'rrule_type': False,
'allday': False,
'state': 'draft',
'class': 'public',
'show_as': 'busy',
'month_by': 'date',
'interval': 1,
'active': 1,
'user_id': lambda self, cr, uid, ctx: uid,
'partner_ids': _get_default_partners,
}
def _check_closing_date(self, cr, uid, ids, context=None):
for event in self.browse(cr, uid, ids, context=context):
if event.start_datetime and event.stop_datetime < event.start_datetime:
return False
if event.start_date and event.stop_date < event.start_date:
return False
return True
_constraints = [
(_check_closing_date, 'Error ! End date cannot be set before start date.', ['start_datetime', 'stop_datetime', 'start_date', 'stop_date'])
]
def onchange_allday(self, cr, uid, ids, start=False, end=False, starttime=False, endtime=False, startdatetime=False, enddatetime=False, checkallday=False, context=None):
value = {}
if not ((starttime and endtime) or (start and end)): # At first intialize, we have not datetime
return value
if checkallday: # from datetime to date
startdatetime = startdatetime or start
if startdatetime:
start = datetime.strptime(startdatetime, DEFAULT_SERVER_DATETIME_FORMAT)
value['start_date'] = datetime.strftime(start, DEFAULT_SERVER_DATE_FORMAT)
enddatetime = enddatetime or end
if enddatetime:
end = datetime.strptime(enddatetime, DEFAULT_SERVER_DATETIME_FORMAT)
value['stop_date'] = datetime.strftime(end, DEFAULT_SERVER_DATE_FORMAT)
else: # from date to datetime
user = self.pool['res.users'].browse(cr, uid, uid, context)
tz = pytz.timezone(user.tz) if user.tz else pytz.utc
if starttime:
start = openerp.fields.Datetime.from_string(starttime)
startdate = tz.localize(start) # Add "+hh:mm" timezone
startdate = startdate.replace(hour=8) # Set 8 AM in localtime
startdate = startdate.astimezone(pytz.utc) # Convert to UTC
value['start_datetime'] = datetime.strftime(startdate, DEFAULT_SERVER_DATETIME_FORMAT)
elif start:
value['start_datetime'] = start
if endtime:
end = datetime.strptime(endtime.split(' ')[0], DEFAULT_SERVER_DATE_FORMAT)
enddate = tz.localize(end).replace(hour=18).astimezone(pytz.utc)
value['stop_datetime'] = datetime.strftime(enddate, DEFAULT_SERVER_DATETIME_FORMAT)
elif end:
value['stop_datetime'] = end
return {'value': value}
def onchange_dates(self, cr, uid, ids, fromtype, start=False, end=False, checkallday=False, allday=False, context=None):
"""Returns duration and end date based on values passed
@param ids: List of calendar event's IDs.
"""
value = {}
if checkallday != allday:
return value
value['allday'] = checkallday # Force to be rewrited
if allday:
if fromtype == 'start' and start:
start = datetime.strptime(start, DEFAULT_SERVER_DATE_FORMAT)
value['start_datetime'] = datetime.strftime(start, DEFAULT_SERVER_DATETIME_FORMAT)
value['start'] = datetime.strftime(start, DEFAULT_SERVER_DATETIME_FORMAT)
if fromtype == 'stop' and end:
end = datetime.strptime(end, DEFAULT_SERVER_DATE_FORMAT)
value['stop_datetime'] = datetime.strftime(end, DEFAULT_SERVER_DATETIME_FORMAT)
value['stop'] = datetime.strftime(end, DEFAULT_SERVER_DATETIME_FORMAT)
else:
if fromtype == 'start' and start:
start = datetime.strptime(start, DEFAULT_SERVER_DATETIME_FORMAT)
value['start_date'] = datetime.strftime(start, DEFAULT_SERVER_DATE_FORMAT)
value['start'] = datetime.strftime(start, DEFAULT_SERVER_DATETIME_FORMAT)
if fromtype == 'stop' and end:
end = datetime.strptime(end, DEFAULT_SERVER_DATETIME_FORMAT)
value['stop_date'] = datetime.strftime(end, DEFAULT_SERVER_DATE_FORMAT)
value['stop'] = datetime.strftime(end, DEFAULT_SERVER_DATETIME_FORMAT)
return {'value': value}
def new_invitation_token(self, cr, uid, record, partner_id):
return uuid.uuid4().hex
def create_attendees(self, cr, uid, ids, context=None):
if context is None:
context = {}
user_obj = self.pool['res.users']
current_user = user_obj.browse(cr, uid, uid, context=context)
res = {}
for event in self.browse(cr, uid, ids, context):
attendees = {}
for att in event.attendee_ids:
attendees[att.partner_id.id] = True
new_attendees = []
new_att_partner_ids = []
for partner in event.partner_ids:
if partner.id in attendees:
continue
access_token = self.new_invitation_token(cr, uid, event, partner.id)
values = {
'partner_id': partner.id,
'event_id': event.id,
'access_token': access_token,
'email': partner.email,
}
if partner.id == current_user.partner_id.id:
values['state'] = 'accepted'
att_id = self.pool['calendar.attendee'].create(cr, uid, values, context=context)
new_attendees.append(att_id)
new_att_partner_ids.append(partner.id)
if not current_user.email or current_user.email != partner.email:
mail_from = current_user.email or tools.config.get('email_from', False)
if not context.get('no_email'):
if self.pool['calendar.attendee']._send_mail_to_attendees(cr, uid, att_id, email_from=mail_from, context=context):
self.message_post(cr, uid, event.id, body=_("An invitation email has been sent to attendee %s") % (partner.name,), subtype="calendar.subtype_invitation", context=context)
if new_attendees:
self.write(cr, uid, [event.id], {'attendee_ids': [(4, att) for att in new_attendees]}, context=context)
if new_att_partner_ids:
self.message_subscribe(cr, uid, [event.id], new_att_partner_ids, context=context)
# We remove old attendees who are not in partner_ids now.
all_partner_ids = [part.id for part in event.partner_ids]
all_part_attendee_ids = [att.partner_id.id for att in event.attendee_ids]
all_attendee_ids = [att.id for att in event.attendee_ids]
partner_ids_to_remove = map(lambda x: x, set(all_part_attendee_ids + new_att_partner_ids) - set(all_partner_ids))
attendee_ids_to_remove = []
if partner_ids_to_remove:
attendee_ids_to_remove = self.pool["calendar.attendee"].search(cr, uid, [('partner_id.id', 'in', partner_ids_to_remove), ('event_id.id', '=', event.id)], context=context)
if attendee_ids_to_remove:
self.pool['calendar.attendee'].unlink(cr, uid, attendee_ids_to_remove, context)
res[event.id] = {
'new_attendee_ids': new_attendees,
'old_attendee_ids': all_attendee_ids,
'removed_attendee_ids': attendee_ids_to_remove
}
return res
def get_search_fields(self, browse_event, order_fields, r_date=None):
sort_fields = {}
for ord in order_fields:
if ord == 'id' and r_date:
sort_fields[ord] = '%s-%s' % (browse_event[ord], r_date.strftime("%Y%m%d%H%M%S"))
else:
sort_fields[ord] = browse_event[ord]
if type(browse_event[ord]) is openerp.osv.orm.browse_record:
name_get = browse_event[ord].name_get()
if len(name_get) and len(name_get[0]) >= 2:
sort_fields[ord] = name_get[0][1]
if r_date:
sort_fields['sort_start'] = r_date.strftime("%Y%m%d%H%M%S")
else:
sort_fields['sort_start'] = browse_event['display_start'].replace(' ', '').replace('-', '')
return sort_fields
def get_recurrent_ids(self, cr, uid, event_id, domain, order=None, context=None):
"""Gives virtual event ids for recurring events
This method gives ids of dates that comes between start date and end date of calendar views
@param order: The fields (comma separated, format "FIELD {DESC|ASC}") on which the events should be sorted
"""
if not context:
context = {}
if isinstance(event_id, (basestring, int, long)):
ids_to_browse = [event_id] # keep select for return
else:
ids_to_browse = event_id
if order:
order_fields = [field.split()[0] for field in order.split(',')]
else:
# fallback on self._order defined on the model
order_fields = [field.split()[0] for field in self._order.split(',')]
if 'id' not in order_fields:
order_fields.append('id')
result_data = []
result = []
for ev in self.browse(cr, uid, ids_to_browse, context=context):
if not ev.recurrency or not ev.rrule:
result.append(ev.id)
result_data.append(self.get_search_fields(ev, order_fields))
continue
rdates = self.get_recurrent_date_by_event(cr, uid, ev, context=context)
for r_date in rdates:
# fix domain evaluation
# step 1: check date and replace expression by True or False, replace other expressions by True
# step 2: evaluation of & and |
# check if there are one False
pile = []
ok = True
for arg in domain:
if str(arg[0]) in ('start', 'stop', 'final_date'):
if (arg[1] == '='):
ok = r_date.strftime('%Y-%m-%d') == arg[2]
if (arg[1] == '>'):
ok = r_date.strftime('%Y-%m-%d') > arg[2]
if (arg[1] == '<'):
ok = r_date.strftime('%Y-%m-%d') < arg[2]
if (arg[1] == '>='):
ok = r_date.strftime('%Y-%m-%d') >= arg[2]
if (arg[1] == '<='):
ok = r_date.strftime('%Y-%m-%d') <= arg[2]
pile.append(ok)
elif str(arg) == str('&') or str(arg) == str('|'):
pile.append(arg)
else:
pile.append(True)
pile.reverse()
new_pile = []
for item in pile:
if not isinstance(item, basestring):
res = item
elif str(item) == str('&'):
first = new_pile.pop()
second = new_pile.pop()
res = first and second
elif str(item) == str('|'):
first = new_pile.pop()
second = new_pile.pop()
res = first or second
new_pile.append(res)
if [True for item in new_pile if not item]:
continue
result_data.append(self.get_search_fields(ev, order_fields, r_date=r_date))
if order_fields:
uniq = lambda it: collections.OrderedDict((id(x), x) for x in it).values()
def comparer(left, right):
for fn, mult in comparers:
result = cmp(fn(left), fn(right))
if result:
return mult * result
return 0
sort_params = [key.split()[0] if key[-4:].lower() != 'desc' else '-%s' % key.split()[0] for key in (order or self._order).split(',')]
sort_params = uniq([comp if comp not in ['start', 'start_date', 'start_datetime'] else 'sort_start' for comp in sort_params])
sort_params = uniq([comp if comp not in ['-start', '-start_date', '-start_datetime'] else '-sort_start' for comp in sort_params])
comparers = [((itemgetter(col[1:]), -1) if col[0] == '-' else (itemgetter(col), 1)) for col in sort_params]
ids = [r['id'] for r in sorted(result_data, cmp=comparer)]
if isinstance(event_id, (basestring, int, long)):
return ids and ids[0] or False
else:
return ids
def compute_rule_string(self, data):
"""
Compute rule string according to value type RECUR of iCalendar from the values given.
@param self: the object pointer
@param data: dictionary of freq and interval value
@return: string containing recurring rule (empty if no rule)
"""
if data['interval'] and data['interval'] < 0:
raise osv.except_osv(_('warning!'), _('interval cannot be negative.'))
if data['count'] and data['count'] <= 0:
raise osv.except_osv(_('warning!'), _('count cannot be negative or 0.'))
def get_week_string(freq, data):
weekdays = ['mo', 'tu', 'we', 'th', 'fr', 'sa', 'su']
if freq == 'weekly':
byday = map(lambda x: x.upper(), filter(lambda x: data.get(x) and x in weekdays, data))
if byday:
return ';BYDAY=' + ','.join(byday)
return ''
def get_month_string(freq, data):
if freq == 'monthly':
if data.get('month_by') == 'date' and (data.get('day') < 1 or data.get('day') > 31):
raise osv.except_osv(_('Error!'), ("Please select a proper day of the month."))
if data.get('month_by') == 'day': # Eg : Second Monday of the month
return ';BYDAY=' + data.get('byday') + data.get('week_list')
elif data.get('month_by') == 'date': # Eg : 16th of the month
return ';BYMONTHDAY=' + str(data.get('day'))
return ''
def get_end_date(data):
if data.get('final_date'):
data['end_date_new'] = ''.join((re.compile('\d')).findall(data.get('final_date'))) + 'T235959Z'
return (data.get('end_type') == 'count' and (';COUNT=' + str(data.get('count'))) or '') +\
((data.get('end_date_new') and data.get('end_type') == 'end_date' and (';UNTIL=' + data.get('end_date_new'))) or '')
freq = data.get('rrule_type', False) # day/week/month/year
res = ''
if freq:
interval_srting = data.get('interval') and (';INTERVAL=' + str(data.get('interval'))) or ''
res = 'FREQ=' + freq.upper() + get_week_string(freq, data) + interval_srting + get_end_date(data) + get_month_string(freq, data)
return res
def _get_empty_rrule_data(self):
return {
'byday': False,
'recurrency': False,
'final_date': False,
'rrule_type': False,
'month_by': False,
'interval': 0,
'count': False,
'end_type': False,
'mo': False,
'tu': False,
'we': False,
'th': False,
'fr': False,
'sa': False,
'su': False,
'day': False,
'week_list': False
}
def _parse_rrule(self, rule, data, date_start):
day_list = ['mo', 'tu', 'we', 'th', 'fr', 'sa', 'su']
rrule_type = ['yearly', 'monthly', 'weekly', 'daily']
r = rrule.rrulestr(rule, dtstart=datetime.strptime(date_start, DEFAULT_SERVER_DATETIME_FORMAT))
if r._freq > 0 and r._freq < 4:
data['rrule_type'] = rrule_type[r._freq]
data['count'] = r._count
data['interval'] = r._interval
data['final_date'] = r._until and r._until.strftime(DEFAULT_SERVER_DATETIME_FORMAT)
#repeat weekly
if r._byweekday:
for i in xrange(0, 7):
if i in r._byweekday:
data[day_list[i]] = True
data['rrule_type'] = 'weekly'
#repeat monthly by nweekday ((weekday, weeknumber), )
if r._bynweekday:
data['week_list'] = day_list[r._bynweekday[0][0]].upper()
data['byday'] = str(r._bynweekday[0][1])
data['month_by'] = 'day'
data['rrule_type'] = 'monthly'
if r._bymonthday:
data['day'] = r._bymonthday[0]
data['month_by'] = 'date'
data['rrule_type'] = 'monthly'
#repeat yearly but for openerp it's monthly, take same information as monthly but interval is 12 times
if r._bymonth:
data['interval'] = data['interval'] * 12
#FIXEME handle forever case
#end of recurrence
#in case of repeat for ever that we do not support right now
if not (data.get('count') or data.get('final_date')):
data['count'] = 100
if data.get('count'):
data['end_type'] = 'count'
else:
data['end_type'] = 'end_date'
return data
def message_get_subscription_data(self, cr, uid, ids, user_pid=None, context=None):
res = {}
for virtual_id in ids:
real_id = calendar_id2real_id(virtual_id)
result = super(calendar_event, self).message_get_subscription_data(cr, uid, [real_id], user_pid=None, context=context)
res[virtual_id] = result[real_id]
return res
def onchange_partner_ids(self, cr, uid, ids, value, context=None):
""" The basic purpose of this method is to check that destination partners
effectively have email addresses. Otherwise a warning is thrown.
:param value: value format: [[6, 0, [3, 4]]]
"""
res = {'value': {}}
if not value or not value[0] or not value[0][0] == 6:
return
res.update(self.check_partners_email(cr, uid, value[0][2], context=context))
return res
def check_partners_email(self, cr, uid, partner_ids, context=None):
""" Verify that selected partner_ids have an email_address defined.
Otherwise throw a warning. """
partner_wo_email_lst = []
for partner in self.pool['res.partner'].browse(cr, uid, partner_ids, context=context):
if not partner.email:
partner_wo_email_lst.append(partner)
if not partner_wo_email_lst:
return {}
warning_msg = _('The following contacts have no email address :')
for partner in partner_wo_email_lst:
warning_msg += '\n- %s' % (partner.name)
return {'warning': {
'title': _('Email addresses not found'),
'message': warning_msg,
}}
# shows events of the day for this user
def _needaction_domain_get(self, cr, uid, context=None):
return [
('stop', '<=', time.strftime(DEFAULT_SERVER_DATE_FORMAT + ' 23:59:59')),
('start', '>=', time.strftime(DEFAULT_SERVER_DATE_FORMAT + ' 00:00:00')),
('user_id', '=', uid),
]
@api.cr_uid_ids_context
def message_post(self, cr, uid, thread_id, body='', subject=None, type='notification', subtype=None, parent_id=False, attachments=None, context=None, **kwargs):
if isinstance(thread_id, basestring):
thread_id = get_real_ids(thread_id)
if context.get('default_date'):
del context['default_date']
return super(calendar_event, self).message_post(cr, uid, thread_id, body=body, subject=subject, type=type, subtype=subtype, parent_id=parent_id, attachments=attachments, context=context, **kwargs)
def message_subscribe(self, cr, uid, ids, partner_ids, subtype_ids=None, context=None):
return super(calendar_event, self).message_subscribe(cr, uid, get_real_ids(ids), partner_ids, subtype_ids=subtype_ids, context=context)
def message_unsubscribe(self, cr, uid, ids, partner_ids, context=None):
return super(calendar_event, self).message_unsubscribe(cr, uid, get_real_ids(ids), partner_ids, context=context)
def do_sendmail(self, cr, uid, ids, context=None):
for event in self.browse(cr, uid, ids, context):
current_user = self.pool['res.users'].browse(cr, uid, uid, context=context)
if current_user.email:
if self.pool['calendar.attendee']._send_mail_to_attendees(cr, uid, [att.id for att in event.attendee_ids], email_from=current_user.email, context=context):
self.message_post(cr, uid, event.id, body=_("An invitation email has been sent to attendee(s)"), subtype="calendar.subtype_invitation", context=context)
return
def get_attendee(self, cr, uid, meeting_id, context=None):
# Used for view in controller
invitation = {'meeting': {}, 'attendee': []}
meeting = self.browse(cr, uid, int(meeting_id), context=context)
invitation['meeting'] = {
'event': meeting.name,
'where': meeting.location,
'when': meeting.display_time
}
for attendee in meeting.attendee_ids:
invitation['attendee'].append({'name': attendee.cn, 'status': attendee.state})
return invitation
def get_interval(self, cr, uid, ids, date, interval, tz=None, context=None):
''' Format and localize some dates to be used in email templates
:param string date: date/time to be formatted
:param string interval: Among 'day', 'month', 'dayname' and 'time' indicating the desired formatting
:param string tz: Timezone indicator (optional)
:return unicode: Formatted date or time (as unicode string, to prevent jinja2 crash)
(Function used only in calendar_event_data.xml) '''
date = openerp.fields.Datetime.from_string(date)
if tz:
timezone = pytz.timezone(tz or 'UTC')
date = date.replace(tzinfo=pytz.timezone('UTC')).astimezone(timezone)
if interval == 'day':
# Day number (1-31)
res = unicode(date.day)
elif interval == 'month':
# Localized month name and year
res = babel.dates.format_date(date=date, format='MMMM y', locale=context.get('lang', 'en_US'))
elif interval == 'dayname':
# Localized day name
res = babel.dates.format_date(date=date, format='EEEE', locale=context.get('lang', 'en_US'))
elif interval == 'time':
# Localized time
dummy, format_time = self.get_date_formats(cr, uid, context=context)
res = tools.ustr(date.strftime(format_time + " %Z"))
return res
def search(self, cr, uid, args, offset=0, limit=0, order=None, context=None, count=False):
if context is None:
context = {}
if context.get('mymeetings', False):
partner_id = self.pool['res.users'].browse(cr, uid, uid, context).partner_id.id
args += [('partner_ids', 'in', [partner_id])]
new_args = []
for arg in args:
new_arg = arg
if arg[0] in ('start_date', 'start_datetime', 'start',) and arg[1] == ">=":
if context.get('virtual_id', True):
new_args += ['|', '&', ('recurrency', '=', 1), ('final_date', arg[1], arg[2])]
elif arg[0] == "id":
new_id = get_real_ids(arg[2])
new_arg = (arg[0], arg[1], new_id)
new_args.append(new_arg)
if not context.get('virtual_id', True):
return super(calendar_event, self).search(cr, uid, new_args, offset=offset, limit=limit, order=order, count=count, context=context)
# offset, limit, order and count must be treated separately as we may need to deal with virtual ids
res = super(calendar_event, self).search(cr, uid, new_args, offset=0, limit=0, order=None, context=context, count=False)
res = self.get_recurrent_ids(cr, uid, res, args, order=order, context=context)
if count:
return len(res)
elif limit:
return res[offset: offset + limit]
return res
def copy(self, cr, uid, id, default=None, context=None):
default = default or {}
self._set_date(cr, uid, default, id=default.get('id'), context=context)
return super(calendar_event, self).copy(cr, uid, calendar_id2real_id(id), default, context)
def _detach_one_event(self, cr, uid, id, values=dict(), context=None):
real_event_id = calendar_id2real_id(id)
data = self.read(cr, uid, id, ['allday', 'start', 'stop', 'rrule', 'duration'])
data['start_date' if data['allday'] else 'start_datetime'] = data['start']
data['stop_date' if data['allday'] else 'stop_datetime'] = data['stop']
if data.get('rrule'):
data.update(
values,
recurrent_id=real_event_id,
recurrent_id_date=data.get('start'),
rrule_type=False,
rrule='',
recurrency=False,
final_date=datetime.strptime(data.get('start'), DEFAULT_SERVER_DATETIME_FORMAT if data['allday'] else DEFAULT_SERVER_DATETIME_FORMAT) + timedelta(hours=values.get('duration', False) or data.get('duration'))
)
#do not copy the id
if data.get('id'):
del(data['id'])
new_id = self.copy(cr, uid, real_event_id, default=data, context=context)
return new_id
def open_after_detach_event(self, cr, uid, ids, context=None):
if context is None:
context = {}
new_id = self._detach_one_event(cr, uid, ids[0], context=context)
return {
'type': 'ir.actions.act_window',
'res_model': 'calendar.event',
'view_mode': 'form',
'res_id': new_id,
'target': 'current',
'flags': {'form': {'action_buttons': True, 'options': {'mode': 'edit'}}}
}
def _name_search(self, cr, user, name='', args=None, operator='ilike', context=None, limit=100, name_get_uid=None):
for arg in args:
if arg[0] == 'id':
for n, calendar_id in enumerate(arg[2]):
if isinstance(calendar_id, basestring):
arg[2][n] = calendar_id.split('-')[0]
return super(calendar_event, self)._name_search(cr, user, name=name, args=args, operator=operator, context=context, limit=limit, name_get_uid=name_get_uid)
def write(self, cr, uid, ids, values, context=None):
def _only_changes_to_apply_on_real_ids(field_names):
''' return True if changes are only to be made on the real ids'''
for field in field_names:
if field in ['start', 'start_date', 'start_datetime', 'stop', 'stop_date', 'stop_datetime', 'active']:
return True
return False
if not isinstance(ids, (tuple, list)):
ids = [ids]
context = context or {}
self._set_date(cr, uid, values, id=ids[0], context=context)
for one_ids in ids:
if isinstance(one_ids, (basestring, int, long)):
if len(str(one_ids).split('-')) == 1:
ids = [int(one_ids)]
else:
ids = [one_ids]
res = False
new_id = False
# Special write of complex IDS
for event_id in list(ids):
if len(str(event_id).split('-')) == 1:
continue
ids.remove(event_id)
real_event_id = calendar_id2real_id(event_id)
# if we are setting the recurrency flag to False or if we are only changing fields that
# should be only updated on the real ID and not on the virtual (like message_follower_ids):
# then set real ids to be updated.
if not values.get('recurrency', True) or not _only_changes_to_apply_on_real_ids(values.keys()):
ids.append(real_event_id)
continue
else:
data = self.read(cr, uid, event_id, ['start', 'stop', 'rrule', 'duration'])
if data.get('rrule'):
new_id = self._detach_one_event(cr, uid, event_id, values, context=None)
res = super(calendar_event, self).write(cr, uid, [int(event_id) for event_id in ids], values, context=context)
# set end_date for calendar searching
if values.get('recurrency', True) and values.get('end_type', 'count') in ('count', unicode('count')) and \
(values.get('rrule_type') or values.get('count') or values.get('start') or values.get('stop')):
for id in ids:
final_date = self._get_recurrency_end_date(cr, uid, id, context=context)
super(calendar_event, self).write(cr, uid, [id], {'final_date': final_date}, context=context)
attendees_create = False
if values.get('partner_ids', False):
attendees_create = self.create_attendees(cr, uid, ids, context)
if (values.get('start_date') or values.get('start_datetime', False)) and values.get('active', True):
the_id = new_id or (ids and int(ids[0]))
if the_id:
if attendees_create:
attendees_create = attendees_create[the_id]
mail_to_ids = list(set(attendees_create['old_attendee_ids']) - set(attendees_create['removed_attendee_ids']))
else:
mail_to_ids = [att.id for att in self.browse(cr, uid, the_id, context=context).attendee_ids]
if mail_to_ids:
current_user = self.pool['res.users'].browse(cr, uid, uid, context=context)
if self.pool['calendar.attendee']._send_mail_to_attendees(cr, uid, mail_to_ids, template_xmlid='calendar_template_meeting_changedate', email_from=current_user.email, context=context):
self.message_post(cr, uid, the_id, body=_("A email has been send to specify that the date has been changed !"), subtype="calendar.subtype_invitation", context=context)
return res or True and False
def create(self, cr, uid, vals, context=None):
if context is None:
context = {}
self._set_date(cr, uid, vals, id=False, context=context)
if not 'user_id' in vals: # Else bug with quick_create when we are filter on an other user
vals['user_id'] = uid
res = super(calendar_event, self).create(cr, uid, vals, context=context)
final_date = self._get_recurrency_end_date(cr, uid, res, context=context)
self.write(cr, uid, [res], {'final_date': final_date}, context=context)
self.create_attendees(cr, uid, [res], context=context)
return res
def export_data(self, cr, uid, ids, *args, **kwargs):
""" Override to convert virtual ids to ids """
real_ids = []
for real_id in get_real_ids(ids):
if real_id not in real_ids:
real_ids.append(real_id)
return super(calendar_event, self).export_data(cr, uid, real_ids, *args, **kwargs)
def read_group(self, cr, uid, domain, fields, groupby, offset=0, limit=None, context=None, orderby=False, lazy=True):
context = dict(context or {})
if 'date' in groupby:
raise osv.except_osv(_('Warning!'), _('Group by date is not supported, use the calendar view instead.'))
virtual_id = context.get('virtual_id', True)
context.update({'virtual_id': False})
res = super(calendar_event, self).read_group(cr, uid, domain, fields, groupby, offset=offset, limit=limit, context=context, orderby=orderby, lazy=lazy)
return res
def read(self, cr, uid, ids, fields=None, context=None, load='_classic_read'):
if context is None:
context = {}
fields2 = fields and fields[:] or None
EXTRAFIELDS = ('class', 'user_id', 'duration', 'allday', 'start', 'start_date', 'start_datetime', 'rrule')
for f in EXTRAFIELDS:
if fields and (f not in fields):
fields2.append(f)
if isinstance(ids, (basestring, int, long)):
select = [ids]
else:
select = ids
select = map(lambda x: (x, calendar_id2real_id(x)), select)
result = []
real_data = super(calendar_event, self).read(cr, uid, [real_id for calendar_id, real_id in select], fields=fields2, context=context, load=load)
real_data = dict(zip([x['id'] for x in real_data], real_data))
for calendar_id, real_id in select:
res = real_data[real_id].copy()
ls = calendar_id2real_id(calendar_id, with_date=res and res.get('duration', 0) > 0 and res.get('duration') or 1)
if not isinstance(ls, (basestring, int, long)) and len(ls) >= 2:
res['start'] = ls[1]
res['stop'] = ls[2]
if res['allday']:
res['start_date'] = ls[1]
res['stop_date'] = ls[2]
else:
res['start_datetime'] = ls[1]
res['stop_datetime'] = ls[2]
if 'display_time' in fields:
res['display_time'] = self._get_display_time(cr, uid, ls[1], ls[2], res['duration'], res['allday'], context=context)
res['id'] = calendar_id
result.append(res)
for r in result:
if r['user_id']:
user_id = type(r['user_id']) in (tuple, list) and r['user_id'][0] or r['user_id']
if user_id == uid:
continue
if r['class'] == 'private':
for f in r.keys():
if f not in ('id', 'allday', 'start', 'stop', 'duration', 'user_id', 'state', 'interval', 'count', 'recurrent_id_date', 'rrule'):
if isinstance(r[f], list):
r[f] = []
else:
r[f] = False
if f == 'name':
r[f] = _('Busy')
for r in result:
for k in EXTRAFIELDS:
if (k in r) and (fields and (k not in fields)):
del r[k]
if isinstance(ids, (basestring, int, long)):
return result and result[0] or False
return result
def unlink(self, cr, uid, ids, can_be_deleted=True, context=None):
if not isinstance(ids, list):
ids = [ids]
res = False
ids_to_exclure = []
ids_to_unlink = []
for event_id in ids:
if can_be_deleted and len(str(event_id).split('-')) == 1: # if ID REAL
if self.browse(cr, uid, int(event_id), context).recurrent_id:
ids_to_exclure.append(event_id)
else:
ids_to_unlink.append(int(event_id))
else:
ids_to_exclure.append(event_id)
if ids_to_unlink:
res = super(calendar_event, self).unlink(cr, uid, ids_to_unlink, context=context)
if ids_to_exclure:
for id_to_exclure in ids_to_exclure:
res = self.write(cr, uid, id_to_exclure, {'active': False}, context=context)
return res
class mail_message(osv.Model):
_inherit = "mail.message"
def search(self, cr, uid, args, offset=0, limit=0, order=None, context=None, count=False):
'''
convert the search on real ids in the case it was asked on virtual ids, then call super()
'''
for index in range(len(args)):
if args[index][0] == "res_id" and isinstance(args[index][2], basestring):
args[index][2] = get_real_ids(args[index][2])
return super(mail_message, self).search(cr, uid, args, offset=offset, limit=limit, order=order, context=context, count=count)
def _find_allowed_model_wise(self, cr, uid, doc_model, doc_dict, context=None):
if context is None:
context = {}
if doc_model == 'calendar.event':
order = context.get('order', self._order)
for virtual_id in self.pool[doc_model].get_recurrent_ids(cr, uid, doc_dict.keys(), [], order=order, context=context):
doc_dict.setdefault(virtual_id, doc_dict[get_real_ids(virtual_id)])
return super(mail_message, self)._find_allowed_model_wise(cr, uid, doc_model, doc_dict, context=context)
class ir_attachment(osv.Model):
_inherit = "ir.attachment"
def search(self, cr, uid, args, offset=0, limit=0, order=None, context=None, count=False):
'''
convert the search on real ids in the case it was asked on virtual ids, then call super()
'''
for index in range(len(args)):
if args[index][0] == "res_id" and isinstance(args[index][2], basestring):
args[index][2] = get_real_ids(args[index][2])
return super(ir_attachment, self).search(cr, uid, args, offset=offset, limit=limit, order=order, context=context, count=count)
def write(self, cr, uid, ids, vals, context=None):
'''
when posting an attachment (new or not), convert the virtual ids in real ids.
'''
if isinstance(vals.get('res_id'), basestring):
vals['res_id'] = get_real_ids(vals.get('res_id'))
return super(ir_attachment, self).write(cr, uid, ids, vals, context=context)
class ir_http(osv.AbstractModel):
_inherit = 'ir.http'
def _auth_method_calendar(self):
token = request.params['token']
db = request.params['db']
registry = openerp.modules.registry.RegistryManager.get(db)
attendee_pool = registry.get('calendar.attendee')
error_message = False
with registry.cursor() as cr:
attendee_id = attendee_pool.search(cr, openerp.SUPERUSER_ID, [('access_token', '=', token)])
if not attendee_id:
error_message = """Invalid Invitation Token."""
elif request.session.uid and request.session.login != 'anonymous':
# if valid session but user is not match
attendee = attendee_pool.browse(cr, openerp.SUPERUSER_ID, attendee_id[0])
user = registry.get('res.users').browse(cr, openerp.SUPERUSER_ID, request.session.uid)
if attendee.partner_id.id != user.partner_id.id:
error_message = """Invitation cannot be forwarded via email. This event/meeting belongs to %s and you are logged in as %s. Please ask organizer to add you.""" % (attendee.email, user.email)
if error_message:
raise BadRequest(error_message)
return True
class invite_wizard(osv.osv_memory):
_inherit = 'mail.wizard.invite'
def default_get(self, cr, uid, fields, context=None):
'''
in case someone clicked on 'invite others' wizard in the followers widget, transform virtual ids in real ids
'''
if 'default_res_id' in context:
context = dict(context, default_res_id=get_real_ids(context['default_res_id']))
result = super(invite_wizard, self).default_get(cr, uid, fields, context=context)
if 'res_id' in result:
result['res_id'] = get_real_ids(result['res_id'])
return result
| agpl-3.0 |
mancoast/CPythonPyc_test | fail/320_test_profile.py | 91 | 7006 | """Test suite for the profile module."""
import sys
import pstats
import unittest
from difflib import unified_diff
from io import StringIO
from test.support import run_unittest
import profile
from test.profilee import testfunc, timer
class ProfileTest(unittest.TestCase):
profilerclass = profile.Profile
methodnames = ['print_stats', 'print_callers', 'print_callees']
expected_max_output = ':0(max)'
def get_expected_output(self):
return _ProfileOutput
@classmethod
def do_profiling(cls):
results = []
prof = cls.profilerclass(timer, 0.001)
start_timer = timer()
prof.runctx("testfunc()", globals(), locals())
results.append(timer() - start_timer)
for methodname in cls.methodnames:
s = StringIO()
stats = pstats.Stats(prof, stream=s)
stats.strip_dirs().sort_stats("stdname")
getattr(stats, methodname)()
output = s.getvalue().splitlines()
mod_name = testfunc.__module__.rsplit('.', 1)[1]
# Only compare against stats originating from the test file.
# Prevents outside code (e.g., the io module) from causing
# unexpected output.
output = [line.rstrip() for line in output if mod_name in line]
results.append('\n'.join(output))
return results
def test_cprofile(self):
results = self.do_profiling()
expected = self.get_expected_output()
self.assertEqual(results[0], 1000)
for i, method in enumerate(self.methodnames):
if results[i+1] != expected[method]:
print("Stats.%s output for %s doesn't fit expectation!" %
(method, self.profilerclass.__name__))
print('\n'.join(unified_diff(
results[i+1].split('\n'),
expected[method].split('\n'))))
def test_calling_conventions(self):
# Issue #5330: profile and cProfile wouldn't report C functions called
# with keyword arguments. We test all calling conventions.
stmts = [
"max([0])",
"max([0], key=int)",
"max([0], **dict(key=int))",
"max(*([0],))",
"max(*([0],), key=int)",
"max(*([0],), **dict(key=int))",
]
for stmt in stmts:
s = StringIO()
prof = self.profilerclass(timer, 0.001)
prof.runctx(stmt, globals(), locals())
stats = pstats.Stats(prof, stream=s)
stats.print_stats()
res = s.getvalue()
self.assertIn(self.expected_max_output, res,
"Profiling {0!r} didn't report max:\n{1}".format(stmt, res))
def regenerate_expected_output(filename, cls):
filename = filename.rstrip('co')
print('Regenerating %s...' % filename)
results = cls.do_profiling()
newfile = []
with open(filename, 'r') as f:
for line in f:
newfile.append(line)
if line.startswith('#--cut'):
break
with open(filename, 'w') as f:
f.writelines(newfile)
f.write("_ProfileOutput = {}\n")
for i, method in enumerate(cls.methodnames):
f.write('_ProfileOutput[%r] = """\\\n%s"""\n' % (
method, results[i+1]))
f.write('\nif __name__ == "__main__":\n main()\n')
def test_main():
run_unittest(ProfileTest)
def main():
if '-r' not in sys.argv:
test_main()
else:
regenerate_expected_output(__file__, ProfileTest)
# Don't remove this comment. Everything below it is auto-generated.
#--cut--------------------------------------------------------------------------
_ProfileOutput = {}
_ProfileOutput['print_stats'] = """\
28 27.972 0.999 27.972 0.999 profilee.py:110(__getattr__)
1 269.996 269.996 999.769 999.769 profilee.py:25(testfunc)
23/3 149.937 6.519 169.917 56.639 profilee.py:35(factorial)
20 19.980 0.999 19.980 0.999 profilee.py:48(mul)
2 39.986 19.993 599.830 299.915 profilee.py:55(helper)
4 115.984 28.996 119.964 29.991 profilee.py:73(helper1)
2 -0.006 -0.003 139.946 69.973 profilee.py:84(helper2_indirect)
8 311.976 38.997 399.912 49.989 profilee.py:88(helper2)
8 63.976 7.997 79.960 9.995 profilee.py:98(subhelper)"""
_ProfileOutput['print_callers'] = """\
:0(append) <- profilee.py:73(helper1)(4) 119.964
:0(exc_info) <- profilee.py:73(helper1)(4) 119.964
:0(hasattr) <- profilee.py:73(helper1)(4) 119.964
profilee.py:88(helper2)(8) 399.912
profilee.py:110(__getattr__) <- :0(hasattr)(12) 11.964
profilee.py:98(subhelper)(16) 79.960
profilee.py:25(testfunc) <- <string>:1(<module>)(1) 999.767
profilee.py:35(factorial) <- profilee.py:25(testfunc)(1) 999.769
profilee.py:35(factorial)(20) 169.917
profilee.py:84(helper2_indirect)(2) 139.946
profilee.py:48(mul) <- profilee.py:35(factorial)(20) 169.917
profilee.py:55(helper) <- profilee.py:25(testfunc)(2) 999.769
profilee.py:73(helper1) <- profilee.py:55(helper)(4) 599.830
profilee.py:84(helper2_indirect) <- profilee.py:55(helper)(2) 599.830
profilee.py:88(helper2) <- profilee.py:55(helper)(6) 599.830
profilee.py:84(helper2_indirect)(2) 139.946
profilee.py:98(subhelper) <- profilee.py:88(helper2)(8) 399.912"""
_ProfileOutput['print_callees'] = """\
:0(hasattr) -> profilee.py:110(__getattr__)(12) 27.972
<string>:1(<module>) -> profilee.py:25(testfunc)(1) 999.769
profilee.py:110(__getattr__) ->
profilee.py:25(testfunc) -> profilee.py:35(factorial)(1) 169.917
profilee.py:55(helper)(2) 599.830
profilee.py:35(factorial) -> profilee.py:35(factorial)(20) 169.917
profilee.py:48(mul)(20) 19.980
profilee.py:48(mul) ->
profilee.py:55(helper) -> profilee.py:73(helper1)(4) 119.964
profilee.py:84(helper2_indirect)(2) 139.946
profilee.py:88(helper2)(6) 399.912
profilee.py:73(helper1) -> :0(append)(4) -0.004
profilee.py:84(helper2_indirect) -> profilee.py:35(factorial)(2) 169.917
profilee.py:88(helper2)(2) 399.912
profilee.py:88(helper2) -> :0(hasattr)(8) 11.964
profilee.py:98(subhelper)(8) 79.960
profilee.py:98(subhelper) -> profilee.py:110(__getattr__)(16) 27.972"""
if __name__ == "__main__":
main()
| gpl-3.0 |
tosolveit/scikit-learn | sklearn/ensemble/tests/test_partial_dependence.py | 365 | 6996 | """
Testing for the partial dependence module.
"""
import numpy as np
from numpy.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import if_matplotlib
from sklearn.ensemble.partial_dependence import partial_dependence
from sklearn.ensemble.partial_dependence import plot_partial_dependence
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import GradientBoostingRegressor
from sklearn import datasets
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
# also load the boston dataset
boston = datasets.load_boston()
# also load the iris dataset
iris = datasets.load_iris()
def test_partial_dependence_classifier():
# Test partial dependence for classifier
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(X, y)
pdp, axes = partial_dependence(clf, [0], X=X, grid_resolution=5)
# only 4 grid points instead of 5 because only 4 unique X[:,0] vals
assert pdp.shape == (1, 4)
assert axes[0].shape[0] == 4
# now with our own grid
X_ = np.asarray(X)
grid = np.unique(X_[:, 0])
pdp_2, axes = partial_dependence(clf, [0], grid=grid)
assert axes is None
assert_array_equal(pdp, pdp_2)
def test_partial_dependence_multiclass():
# Test partial dependence for multi-class classifier
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(iris.data, iris.target)
grid_resolution = 25
n_classes = clf.n_classes_
pdp, axes = partial_dependence(
clf, [0], X=iris.data, grid_resolution=grid_resolution)
assert pdp.shape == (n_classes, grid_resolution)
assert len(axes) == 1
assert axes[0].shape[0] == grid_resolution
def test_partial_dependence_regressor():
# Test partial dependence for regressor
clf = GradientBoostingRegressor(n_estimators=10, random_state=1)
clf.fit(boston.data, boston.target)
grid_resolution = 25
pdp, axes = partial_dependence(
clf, [0], X=boston.data, grid_resolution=grid_resolution)
assert pdp.shape == (1, grid_resolution)
assert axes[0].shape[0] == grid_resolution
def test_partial_dependecy_input():
# Test input validation of partial dependence.
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(X, y)
assert_raises(ValueError, partial_dependence,
clf, [0], grid=None, X=None)
assert_raises(ValueError, partial_dependence,
clf, [0], grid=[0, 1], X=X)
# first argument must be an instance of BaseGradientBoosting
assert_raises(ValueError, partial_dependence,
{}, [0], X=X)
# Gradient boosting estimator must be fit
assert_raises(ValueError, partial_dependence,
GradientBoostingClassifier(), [0], X=X)
assert_raises(ValueError, partial_dependence, clf, [-1], X=X)
assert_raises(ValueError, partial_dependence, clf, [100], X=X)
# wrong ndim for grid
grid = np.random.rand(10, 2, 1)
assert_raises(ValueError, partial_dependence, clf, [0], grid=grid)
@if_matplotlib
def test_plot_partial_dependence():
# Test partial dependence plot function.
clf = GradientBoostingRegressor(n_estimators=10, random_state=1)
clf.fit(boston.data, boston.target)
grid_resolution = 25
fig, axs = plot_partial_dependence(clf, boston.data, [0, 1, (0, 1)],
grid_resolution=grid_resolution,
feature_names=boston.feature_names)
assert len(axs) == 3
assert all(ax.has_data for ax in axs)
# check with str features and array feature names
fig, axs = plot_partial_dependence(clf, boston.data, ['CRIM', 'ZN',
('CRIM', 'ZN')],
grid_resolution=grid_resolution,
feature_names=boston.feature_names)
assert len(axs) == 3
assert all(ax.has_data for ax in axs)
# check with list feature_names
feature_names = boston.feature_names.tolist()
fig, axs = plot_partial_dependence(clf, boston.data, ['CRIM', 'ZN',
('CRIM', 'ZN')],
grid_resolution=grid_resolution,
feature_names=feature_names)
assert len(axs) == 3
assert all(ax.has_data for ax in axs)
@if_matplotlib
def test_plot_partial_dependence_input():
# Test partial dependence plot function input checks.
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
# not fitted yet
assert_raises(ValueError, plot_partial_dependence,
clf, X, [0])
clf.fit(X, y)
assert_raises(ValueError, plot_partial_dependence,
clf, np.array(X)[:, :0], [0])
# first argument must be an instance of BaseGradientBoosting
assert_raises(ValueError, plot_partial_dependence,
{}, X, [0])
# must be larger than -1
assert_raises(ValueError, plot_partial_dependence,
clf, X, [-1])
# too large feature value
assert_raises(ValueError, plot_partial_dependence,
clf, X, [100])
# str feature but no feature_names
assert_raises(ValueError, plot_partial_dependence,
clf, X, ['foobar'])
# not valid features value
assert_raises(ValueError, plot_partial_dependence,
clf, X, [{'foo': 'bar'}])
@if_matplotlib
def test_plot_partial_dependence_multiclass():
# Test partial dependence plot function on multi-class input.
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(iris.data, iris.target)
grid_resolution = 25
fig, axs = plot_partial_dependence(clf, iris.data, [0, 1],
label=0,
grid_resolution=grid_resolution)
assert len(axs) == 2
assert all(ax.has_data for ax in axs)
# now with symbol labels
target = iris.target_names[iris.target]
clf = GradientBoostingClassifier(n_estimators=10, random_state=1)
clf.fit(iris.data, target)
grid_resolution = 25
fig, axs = plot_partial_dependence(clf, iris.data, [0, 1],
label='setosa',
grid_resolution=grid_resolution)
assert len(axs) == 2
assert all(ax.has_data for ax in axs)
# label not in gbrt.classes_
assert_raises(ValueError, plot_partial_dependence,
clf, iris.data, [0, 1], label='foobar',
grid_resolution=grid_resolution)
# label not provided
assert_raises(ValueError, plot_partial_dependence,
clf, iris.data, [0, 1],
grid_resolution=grid_resolution)
| bsd-3-clause |
felixma/nova | nova/tests/functional/api_sample_tests/test_security_group_default_rules.py | 26 | 2374 | # Copyright 2014 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from nova.tests.functional.api_sample_tests import api_sample_base
CONF = cfg.CONF
CONF.import_opt('osapi_compute_extension',
'nova.api.openstack.compute.legacy_v2.extensions')
class SecurityGroupDefaultRulesSampleJsonTest(
api_sample_base.ApiSampleTestBaseV21):
ADMIN_API = True
extension_name = 'os-security-group-default-rules'
def _get_flags(self):
f = super(SecurityGroupDefaultRulesSampleJsonTest, self)._get_flags()
f['osapi_compute_extension'] = CONF.osapi_compute_extension[:]
f['osapi_compute_extension'].append('nova.api.openstack.compute.'
'contrib.security_group_default_rules.'
'Security_group_default_rules')
return f
def test_security_group_default_rules_create(self):
response = self._do_post('os-security-group-default-rules',
'security-group-default-rules-create-req',
{})
self._verify_response('security-group-default-rules-create-resp',
{}, response, 200)
def test_security_group_default_rules_list(self):
self.test_security_group_default_rules_create()
response = self._do_get('os-security-group-default-rules')
self._verify_response('security-group-default-rules-list-resp',
{}, response, 200)
def test_security_group_default_rules_show(self):
self.test_security_group_default_rules_create()
rule_id = '1'
response = self._do_get('os-security-group-default-rules/%s' % rule_id)
self._verify_response('security-group-default-rules-show-resp',
{}, response, 200)
| apache-2.0 |
caseyclements/bokeh | bokeh/compat/mplexporter/exporter.py | 32 | 12403 | """
Matplotlib Exporter
===================
This submodule contains tools for crawling a matplotlib figure and exporting
relevant pieces to a renderer.
"""
import warnings
import io
from . import utils
import matplotlib
from matplotlib import transforms
from matplotlib.backends.backend_agg import FigureCanvasAgg
class Exporter(object):
"""Matplotlib Exporter
Parameters
----------
renderer : Renderer object
The renderer object called by the exporter to create a figure
visualization. See mplexporter.Renderer for information on the
methods which should be defined within the renderer.
close_mpl : bool
If True (default), close the matplotlib figure as it is rendered. This
is useful for when the exporter is used within the notebook, or with
an interactive matplotlib backend.
"""
def __init__(self, renderer, close_mpl=True):
self.close_mpl = close_mpl
self.renderer = renderer
def run(self, fig):
"""
Run the exporter on the given figure
Parmeters
---------
fig : matplotlib.Figure instance
The figure to export
"""
# Calling savefig executes the draw() command, putting elements
# in the correct place.
if fig.canvas is None:
fig.canvas = FigureCanvasAgg(fig)
fig.savefig(io.BytesIO(), format='png', dpi=fig.dpi)
if self.close_mpl:
import matplotlib.pyplot as plt
plt.close(fig)
self.crawl_fig(fig)
@staticmethod
def process_transform(transform, ax=None, data=None, return_trans=False,
force_trans=None):
"""Process the transform and convert data to figure or data coordinates
Parameters
----------
transform : matplotlib Transform object
The transform applied to the data
ax : matplotlib Axes object (optional)
The axes the data is associated with
data : ndarray (optional)
The array of data to be transformed.
return_trans : bool (optional)
If true, return the final transform of the data
force_trans : matplotlib.transform instance (optional)
If supplied, first force the data to this transform
Returns
-------
code : string
Code is either "data", "axes", "figure", or "display", indicating
the type of coordinates output.
transform : matplotlib transform
the transform used to map input data to output data.
Returned only if return_trans is True
new_data : ndarray
Data transformed to match the given coordinate code.
Returned only if data is specified
"""
if isinstance(transform, transforms.BlendedGenericTransform):
warnings.warn("Blended transforms not yet supported. "
"Zoom behavior may not work as expected.")
if force_trans is not None:
if data is not None:
data = (transform - force_trans).transform(data)
transform = force_trans
code = "display"
if ax is not None:
for (c, trans) in [("data", ax.transData),
("axes", ax.transAxes),
("figure", ax.figure.transFigure),
("display", transforms.IdentityTransform())]:
if transform.contains_branch(trans):
code, transform = (c, transform - trans)
break
if data is not None:
if return_trans:
return code, transform.transform(data), transform
else:
return code, transform.transform(data)
else:
if return_trans:
return code, transform
else:
return code
def crawl_fig(self, fig):
"""Crawl the figure and process all axes"""
with self.renderer.draw_figure(fig=fig,
props=utils.get_figure_properties(fig)):
for ax in fig.axes:
self.crawl_ax(ax)
def crawl_ax(self, ax):
"""Crawl the axes and process all elements within"""
with self.renderer.draw_axes(ax=ax,
props=utils.get_axes_properties(ax)):
for line in ax.lines:
self.draw_line(ax, line)
for text in ax.texts:
self.draw_text(ax, text)
for (text, ttp) in zip([ax.xaxis.label, ax.yaxis.label, ax.title],
["xlabel", "ylabel", "title"]):
if(hasattr(text, 'get_text') and text.get_text()):
self.draw_text(ax, text, force_trans=ax.transAxes,
text_type=ttp)
for artist in ax.artists:
# TODO: process other artists
if isinstance(artist, matplotlib.text.Text):
self.draw_text(ax, artist)
for patch in ax.patches:
self.draw_patch(ax, patch)
for collection in ax.collections:
self.draw_collection(ax, collection)
for image in ax.images:
self.draw_image(ax, image)
legend = ax.get_legend()
if legend is not None:
props = utils.get_legend_properties(ax, legend)
with self.renderer.draw_legend(legend=legend, props=props):
if props['visible']:
self.crawl_legend(ax, legend)
def crawl_legend(self, ax, legend):
"""
Recursively look through objects in legend children
"""
legendElements = list(utils.iter_all_children(legend._legend_box,
skipContainers=True))
legendElements.append(legend.legendPatch)
for child in legendElements:
# force a large zorder so it appears on top
child.set_zorder(1E6 + child.get_zorder())
try:
# What kind of object...
if isinstance(child, matplotlib.patches.Patch):
self.draw_patch(ax, child, force_trans=ax.transAxes)
elif isinstance(child, matplotlib.text.Text):
if not (child is legend.get_children()[-1]
and child.get_text() == 'None'):
self.draw_text(ax, child, force_trans=ax.transAxes)
elif isinstance(child, matplotlib.lines.Line2D):
self.draw_line(ax, child, force_trans=ax.transAxes)
elif isinstance(child, matplotlib.collections.Collection):
self.draw_collection(ax, child,
force_pathtrans=ax.transAxes)
else:
warnings.warn("Legend element %s not impemented" % child)
except NotImplementedError:
warnings.warn("Legend element %s not impemented" % child)
def draw_line(self, ax, line, force_trans=None):
"""Process a matplotlib line and call renderer.draw_line"""
coordinates, data = self.process_transform(line.get_transform(),
ax, line.get_xydata(),
force_trans=force_trans)
linestyle = utils.get_line_style(line)
if linestyle['dasharray'] is None:
linestyle = None
markerstyle = utils.get_marker_style(line)
if (markerstyle['marker'] in ['None', 'none', None]
or markerstyle['markerpath'][0].size == 0):
markerstyle = None
label = line.get_label()
if markerstyle or linestyle:
self.renderer.draw_marked_line(data=data, coordinates=coordinates,
linestyle=linestyle,
markerstyle=markerstyle,
label=label,
mplobj=line)
def draw_text(self, ax, text, force_trans=None, text_type=None):
"""Process a matplotlib text object and call renderer.draw_text"""
content = text.get_text()
if content:
transform = text.get_transform()
position = text.get_position()
coords, position = self.process_transform(transform, ax,
position,
force_trans=force_trans)
style = utils.get_text_style(text)
self.renderer.draw_text(text=content, position=position,
coordinates=coords,
text_type=text_type,
style=style, mplobj=text)
def draw_patch(self, ax, patch, force_trans=None):
"""Process a matplotlib patch object and call renderer.draw_path"""
vertices, pathcodes = utils.SVG_path(patch.get_path())
transform = patch.get_transform()
coordinates, vertices = self.process_transform(transform,
ax, vertices,
force_trans=force_trans)
linestyle = utils.get_path_style(patch, fill=patch.get_fill())
self.renderer.draw_path(data=vertices,
coordinates=coordinates,
pathcodes=pathcodes,
style=linestyle,
mplobj=patch)
def draw_collection(self, ax, collection,
force_pathtrans=None,
force_offsettrans=None):
"""Process a matplotlib collection and call renderer.draw_collection"""
(transform, transOffset,
offsets, paths) = collection._prepare_points()
offset_coords, offsets = self.process_transform(
transOffset, ax, offsets, force_trans=force_offsettrans)
path_coords = self.process_transform(
transform, ax, force_trans=force_pathtrans)
processed_paths = [utils.SVG_path(path) for path in paths]
processed_paths = [(self.process_transform(
transform, ax, path[0], force_trans=force_pathtrans)[1], path[1])
for path in processed_paths]
path_transforms = collection.get_transforms()
try:
# matplotlib 1.3: path_transforms are transform objects.
# Convert them to numpy arrays.
path_transforms = [t.get_matrix() for t in path_transforms]
except AttributeError:
# matplotlib 1.4: path transforms are already numpy arrays.
pass
styles = {'linewidth': collection.get_linewidths(),
'facecolor': collection.get_facecolors(),
'edgecolor': collection.get_edgecolors(),
'alpha': collection._alpha,
'zorder': collection.get_zorder()}
offset_dict = {"data": "before",
"screen": "after"}
offset_order = offset_dict[collection.get_offset_position()]
self.renderer.draw_path_collection(paths=processed_paths,
path_coordinates=path_coords,
path_transforms=path_transforms,
offsets=offsets,
offset_coordinates=offset_coords,
offset_order=offset_order,
styles=styles,
mplobj=collection)
def draw_image(self, ax, image):
"""Process a matplotlib image object and call renderer.draw_image"""
self.renderer.draw_image(imdata=utils.image_to_base64(image),
extent=image.get_extent(),
coordinates="data",
style={"alpha": image.get_alpha(),
"zorder": image.get_zorder()},
mplobj=image)
| bsd-3-clause |
nmercier/linux-cross-gcc | win32/bin/Lib/robotparser.py | 2 | 7821 | """ robotparser.py
Copyright (C) 2000 Bastian Kleineidam
You can choose between two licenses when using this package:
1) GNU GPLv2
2) PSF license for Python 2.2
The robots.txt Exclusion Protocol is implemented as specified in
http://www.robotstxt.org/norobots-rfc.txt
"""
import urlparse
import urllib
__all__ = ["RobotFileParser"]
class RobotFileParser:
""" This class provides a set of methods to read, parse and answer
questions about a single robots.txt file.
"""
def __init__(self, url=''):
self.entries = []
self.default_entry = None
self.disallow_all = False
self.allow_all = False
self.set_url(url)
self.last_checked = 0
def mtime(self):
"""Returns the time the robots.txt file was last fetched.
This is useful for long-running web spiders that need to
check for new robots.txt files periodically.
"""
return self.last_checked
def modified(self):
"""Sets the time the robots.txt file was last fetched to the
current time.
"""
import time
self.last_checked = time.time()
def set_url(self, url):
"""Sets the URL referring to a robots.txt file."""
self.url = url
self.host, self.path = urlparse.urlparse(url)[1:3]
def read(self):
"""Reads the robots.txt URL and feeds it to the parser."""
opener = URLopener()
f = opener.open(self.url)
lines = [line.strip() for line in f]
f.close()
self.errcode = opener.errcode
if self.errcode in (401, 403):
self.disallow_all = True
elif self.errcode >= 400 and self.errcode < 500:
self.allow_all = True
elif self.errcode == 200 and lines:
self.parse(lines)
def _add_entry(self, entry):
if "*" in entry.useragents:
# the default entry is considered last
if self.default_entry is None:
# the first default entry wins
self.default_entry = entry
else:
self.entries.append(entry)
def parse(self, lines):
"""parse the input lines from a robots.txt file.
We allow that a user-agent: line is not preceded by
one or more blank lines."""
# states:
# 0: start state
# 1: saw user-agent line
# 2: saw an allow or disallow line
state = 0
linenumber = 0
entry = Entry()
self.modified()
for line in lines:
linenumber += 1
if not line:
if state == 1:
entry = Entry()
state = 0
elif state == 2:
self._add_entry(entry)
entry = Entry()
state = 0
# remove optional comment and strip line
i = line.find('#')
if i >= 0:
line = line[:i]
line = line.strip()
if not line:
continue
line = line.split(':', 1)
if len(line) == 2:
line[0] = line[0].strip().lower()
line[1] = urllib.unquote(line[1].strip())
if line[0] == "user-agent":
if state == 2:
self._add_entry(entry)
entry = Entry()
entry.useragents.append(line[1])
state = 1
elif line[0] == "disallow":
if state != 0:
entry.rulelines.append(RuleLine(line[1], False))
state = 2
elif line[0] == "allow":
if state != 0:
entry.rulelines.append(RuleLine(line[1], True))
state = 2
if state == 2:
self._add_entry(entry)
def can_fetch(self, useragent, url):
"""using the parsed robots.txt decide if useragent can fetch url"""
if self.disallow_all:
return False
if self.allow_all:
return True
# Until the robots.txt file has been read or found not
# to exist, we must assume that no url is allowable.
# This prevents false positives when a user erronenously
# calls can_fetch() before calling read().
if not self.last_checked:
return False
# search for given user agent matches
# the first match counts
parsed_url = urlparse.urlparse(urllib.unquote(url))
url = urlparse.urlunparse(('', '', parsed_url.path,
parsed_url.params, parsed_url.query, parsed_url.fragment))
url = urllib.quote(url)
if not url:
url = "/"
for entry in self.entries:
if entry.applies_to(useragent):
return entry.allowance(url)
# try the default entry last
if self.default_entry:
return self.default_entry.allowance(url)
# agent not found ==> access granted
return True
def __str__(self):
return ''.join([str(entry) + "\n" for entry in self.entries])
class RuleLine:
"""A rule line is a single "Allow:" (allowance==True) or "Disallow:"
(allowance==False) followed by a path."""
def __init__(self, path, allowance):
if path == '' and not allowance:
# an empty value means allow all
allowance = True
path = urlparse.urlunparse(urlparse.urlparse(path))
self.path = urllib.quote(path)
self.allowance = allowance
def applies_to(self, filename):
return self.path == "*" or filename.startswith(self.path)
def __str__(self):
return (self.allowance and "Allow" or "Disallow") + ": " + self.path
class Entry:
"""An entry has one or more user-agents and zero or more rulelines"""
def __init__(self):
self.useragents = []
self.rulelines = []
def __str__(self):
ret = []
for agent in self.useragents:
ret.extend(["User-agent: ", agent, "\n"])
for line in self.rulelines:
ret.extend([str(line), "\n"])
return ''.join(ret)
def applies_to(self, useragent):
"""check if this entry applies to the specified agent"""
# split the name token and make it lower case
useragent = useragent.split("/")[0].lower()
for agent in self.useragents:
if agent == '*':
# we have the catch-all agent
return True
agent = agent.lower()
if agent in useragent:
return True
return False
def allowance(self, filename):
"""Preconditions:
- our agent applies to this entry
- filename is URL decoded"""
for line in self.rulelines:
if line.applies_to(filename):
return line.allowance
return True
class URLopener(urllib.FancyURLopener):
def __init__(self, *args):
urllib.FancyURLopener.__init__(self, *args)
self.errcode = 200
def prompt_user_passwd(self, host, realm):
## If robots.txt file is accessible only with a password,
## we act as if the file wasn't there.
return None, None
def http_error_default(self, url, fp, errcode, errmsg, headers):
self.errcode = errcode
return urllib.FancyURLopener.http_error_default(self, url, fp, errcode,
errmsg, headers)
| bsd-3-clause |
zaheerm/gst-python | testsuite/common.py | 1 | 5702 | # -*- Mode: Python -*-
# vi:si:et:sw=4:sts=4:ts=4
#
# gst-python - Python bindings for GStreamer
# Copyright (C) 2002 David I. Lehn
# Copyright (C) 2004 Johan Dahlin
# Copyright (C) 2005 Edward Hervey
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
try:
from dl import RTLD_LAZY, RTLD_GLOBAL
except ImportError:
# dl doesn't seem to be available on 64bit systems
try:
from DLFCN import RTLD_LAZY, RTLD_GLOBAL
except ImportError:
pass
import os
import sys
import gc
import unittest
import pygtk
pygtk.require('2.0')
import gobject
try:
gobject.threads_init()
except:
print "WARNING: gobject doesn't have threads_init, no threadsafety"
# Detect the version of pygobject
# In pygobject >= 2.13.0 the refcounting of objects has changed.
pgmaj,pgmin,pgmac = gobject.pygobject_version
if pgmaj >= 2 and pgmin >= 13:
pygobject_2_13 = True
else:
pygobject_2_13 = False
# Don't insert before .
# sys.path.insert(1, os.path.join('..'))
# Load GST and make sure we load it from the current build
sys.setdlopenflags(RTLD_LAZY | RTLD_GLOBAL)
topbuilddir = os.path.abspath(os.path.join('..'))
topsrcdir = os.path.abspath(os.path.join('..'))
if topsrcdir.endswith('_build'):
topsrcdir = os.path.dirname(topsrcdir)
# gst's __init__.py is in topsrcdir/gst
path = os.path.abspath(os.path.join(topsrcdir, 'gst'))
import gst
file = gst.__file__
assert file.startswith(path), 'bad gst path: %s' % file
# gst's interfaces is in topbuilddir/gst
path = os.path.abspath(os.path.join(topbuilddir, 'gst'))
try:
import gst.interfaces
except ImportError:
# hack: we import it from our builddir/gst/.libs instead; ugly
import interfaces
gst.interfaces = interfaces
file = gst.interfaces.__file__
assert file.startswith(path), 'bad gst.interfaces path: %s' % file
# gst's pbutils is in topbuilddir/gst
path = os.path.abspath(os.path.join(topbuilddir, 'gst'))
try:
import gst.pbutils
except ImportError:
# hack: we import it from our builddir/gst/.libs instead; ugly
import pbutils
gst.pbutils = pbutils
file = gst.pbutils.__file__
assert file.startswith(path), 'bad gst.pbutils path: %s' % file
# testhelper needs gstltihooks
import gstltihooks
import testhelper
gstltihooks.uninstall()
_stderr = None
def disable_stderr():
global _stderr
_stderr = file('/tmp/stderr', 'w+')
sys.stderr = os.fdopen(os.dup(2), 'w')
os.close(2)
os.dup(_stderr.fileno())
def enable_stderr():
global _stderr
os.close(2)
os.dup(sys.stderr.fileno())
_stderr.seek(0, 0)
data = _stderr.read()
_stderr.close()
os.remove('/tmp/stderr')
return data
def run_silent(function, *args, **kwargs):
disable_stderr()
try:
function(*args, **kwargs)
except Exception, exc:
enable_stderr()
raise exc
output = enable_stderr()
return output
class TestCase(unittest.TestCase):
_types = [gst.Object, gst.MiniObject]
def gccollect(self):
# run the garbage collector
ret = 0
gst.debug('garbage collecting')
while True:
c = gc.collect()
ret += c
if c == 0: break
gst.debug('done garbage collecting, %d objects' % ret)
return ret
def gctrack(self):
# store all gst objects in the gc in a tracking dict
# call before doing any allocation in your test, from setUp
gst.debug('tracking gc GstObjects for types %r' % self._types)
self.gccollect()
self._tracked = {}
for c in self._types:
self._tracked[c] = [o for o in gc.get_objects() if isinstance(o, c)]
def gcverify(self):
# verify no new gst objects got added to the gc
# call after doing all cleanup in your test, from tearDown
gst.debug('verifying gc GstObjects for types %r' % self._types)
new = []
for c in self._types:
objs = [o for o in gc.get_objects() if isinstance(o, c)]
new.extend([o for o in objs if o not in self._tracked[c]])
self.failIf(new, new)
#self.failIf(new, ["%r:%d" % (type(o), id(o)) for o in new])
del self._tracked
def setUp(self):
"""
Override me by chaining up to me at the start of your setUp.
"""
# Using private variables is BAD ! this variable changed name in
# python 2.5
try:
methodName = self.__testMethodName
except:
methodName = self._testMethodName
gst.debug('%s.%s' % (self.__class__.__name__, methodName))
self.gctrack()
def tearDown(self):
"""
Override me by chaining up to me at the end of your tearDown.
"""
# Using private variables is BAD ! this variable changed name in
# python 2.5
try:
methodName = self.__testMethodName
except:
methodName = self._testMethodName
gst.debug('%s.%s' % (self.__class__.__name__, methodName))
self.gccollect()
self.gcverify()
| lgpl-2.1 |
tvald/Jink | python/jink/extension.py | 1 | 1038 | import types
class LazyFactory(object):
def __init__(self, module_str, callable_str):
self.module_str = module_str
self.callable_str = callable_str
self.factory = None
def resolve(self):
if self.factory == None:
self.factory = reduce(lambda x, y: getattr(x, y),
self.module_str.split('.')[1:] +
self.callable_str.split('.'),
__import__(self.module_str))
return self.factory
def instantiate(self, *args, **kw):
self.resolve()
return self.factory(*args, **kw)
def __call__(self, *args, **kw):
self.instantiate(*args, **kw)
class Registry(object):
def __init__(self):
self._registry = dict()
def register(self, keys, factory):
try: iter(keys)
except TypeError, e: keys = (keys)
for k in keys: self._registry[k] = factory
def get(self, key):
return self._registry[key]
source = Registry()
sink = Registry()
__all__ = ['LazyFactory', 'source', 'sink', 'Registry']
| mit |
ARMmbed/yotta_osx_installer | workspace/lib/python2.7/site-packages/pip/_vendor/ipaddress.py | 198 | 79659 | # Copyright 2007 Google Inc.
# Licensed to PSF under a Contributor Agreement.
"""A fast, lightweight IPv4/IPv6 manipulation library in Python.
This library is used to create/poke/manipulate IPv4 and IPv6 addresses
and networks.
"""
from __future__ import unicode_literals
import itertools
import struct
__version__ = '1.0.14'
# Compatibility functions
_compat_int_types = (int,)
try:
_compat_int_types = (int, long)
except NameError:
pass
try:
_compat_str = unicode
except NameError:
_compat_str = str
assert bytes != str
if b'\0'[0] == 0: # Python 3 semantics
def _compat_bytes_to_byte_vals(byt):
return byt
else:
def _compat_bytes_to_byte_vals(byt):
return [struct.unpack(b'!B', b)[0] for b in byt]
try:
_compat_int_from_byte_vals = int.from_bytes
except AttributeError:
def _compat_int_from_byte_vals(bytvals, endianess):
assert endianess == 'big'
res = 0
for bv in bytvals:
assert isinstance(bv, _compat_int_types)
res = (res << 8) + bv
return res
def _compat_to_bytes(intval, length, endianess):
assert isinstance(intval, _compat_int_types)
assert endianess == 'big'
if length == 4:
if intval < 0 or intval >= 2 ** 32:
raise struct.error("integer out of range for 'I' format code")
return struct.pack(b'!I', intval)
elif length == 16:
if intval < 0 or intval >= 2 ** 128:
raise struct.error("integer out of range for 'QQ' format code")
return struct.pack(b'!QQ', intval >> 64, intval & 0xffffffffffffffff)
else:
raise NotImplementedError()
if hasattr(int, 'bit_length'):
# Not int.bit_length , since that won't work in 2.7 where long exists
def _compat_bit_length(i):
return i.bit_length()
else:
def _compat_bit_length(i):
for res in itertools.count():
if i >> res == 0:
return res
def _compat_range(start, end, step=1):
assert step > 0
i = start
while i < end:
yield i
i += step
class _TotalOrderingMixin(object):
__slots__ = ()
# Helper that derives the other comparison operations from
# __lt__ and __eq__
# We avoid functools.total_ordering because it doesn't handle
# NotImplemented correctly yet (http://bugs.python.org/issue10042)
def __eq__(self, other):
raise NotImplementedError
def __ne__(self, other):
equal = self.__eq__(other)
if equal is NotImplemented:
return NotImplemented
return not equal
def __lt__(self, other):
raise NotImplementedError
def __le__(self, other):
less = self.__lt__(other)
if less is NotImplemented or not less:
return self.__eq__(other)
return less
def __gt__(self, other):
less = self.__lt__(other)
if less is NotImplemented:
return NotImplemented
equal = self.__eq__(other)
if equal is NotImplemented:
return NotImplemented
return not (less or equal)
def __ge__(self, other):
less = self.__lt__(other)
if less is NotImplemented:
return NotImplemented
return not less
IPV4LENGTH = 32
IPV6LENGTH = 128
class AddressValueError(ValueError):
"""A Value Error related to the address."""
class NetmaskValueError(ValueError):
"""A Value Error related to the netmask."""
def ip_address(address):
"""Take an IP string/int and return an object of the correct type.
Args:
address: A string or integer, the IP address. Either IPv4 or
IPv6 addresses may be supplied; integers less than 2**32 will
be considered to be IPv4 by default.
Returns:
An IPv4Address or IPv6Address object.
Raises:
ValueError: if the *address* passed isn't either a v4 or a v6
address
"""
try:
return IPv4Address(address)
except (AddressValueError, NetmaskValueError):
pass
try:
return IPv6Address(address)
except (AddressValueError, NetmaskValueError):
pass
if isinstance(address, bytes):
raise AddressValueError(
'%r does not appear to be an IPv4 or IPv6 address. '
'Did you pass in a bytes (str in Python 2) instead of'
' a unicode object?' % address)
raise ValueError('%r does not appear to be an IPv4 or IPv6 address' %
address)
def ip_network(address, strict=True):
"""Take an IP string/int and return an object of the correct type.
Args:
address: A string or integer, the IP network. Either IPv4 or
IPv6 networks may be supplied; integers less than 2**32 will
be considered to be IPv4 by default.
Returns:
An IPv4Network or IPv6Network object.
Raises:
ValueError: if the string passed isn't either a v4 or a v6
address. Or if the network has host bits set.
"""
try:
return IPv4Network(address, strict)
except (AddressValueError, NetmaskValueError):
pass
try:
return IPv6Network(address, strict)
except (AddressValueError, NetmaskValueError):
pass
raise ValueError('%r does not appear to be an IPv4 or IPv6 network' %
address)
def ip_interface(address):
"""Take an IP string/int and return an object of the correct type.
Args:
address: A string or integer, the IP address. Either IPv4 or
IPv6 addresses may be supplied; integers less than 2**32 will
be considered to be IPv4 by default.
Returns:
An IPv4Interface or IPv6Interface object.
Raises:
ValueError: if the string passed isn't either a v4 or a v6
address.
Notes:
The IPv?Interface classes describe an Address on a particular
Network, so they're basically a combination of both the Address
and Network classes.
"""
try:
return IPv4Interface(address)
except (AddressValueError, NetmaskValueError):
pass
try:
return IPv6Interface(address)
except (AddressValueError, NetmaskValueError):
pass
raise ValueError('%r does not appear to be an IPv4 or IPv6 interface' %
address)
def v4_int_to_packed(address):
"""Represent an address as 4 packed bytes in network (big-endian) order.
Args:
address: An integer representation of an IPv4 IP address.
Returns:
The integer address packed as 4 bytes in network (big-endian) order.
Raises:
ValueError: If the integer is negative or too large to be an
IPv4 IP address.
"""
try:
return _compat_to_bytes(address, 4, 'big')
except (struct.error, OverflowError):
raise ValueError("Address negative or too large for IPv4")
def v6_int_to_packed(address):
"""Represent an address as 16 packed bytes in network (big-endian) order.
Args:
address: An integer representation of an IPv6 IP address.
Returns:
The integer address packed as 16 bytes in network (big-endian) order.
"""
try:
return _compat_to_bytes(address, 16, 'big')
except (struct.error, OverflowError):
raise ValueError("Address negative or too large for IPv6")
def _split_optional_netmask(address):
"""Helper to split the netmask and raise AddressValueError if needed"""
addr = _compat_str(address).split('/')
if len(addr) > 2:
raise AddressValueError("Only one '/' permitted in %r" % address)
return addr
def _find_address_range(addresses):
"""Find a sequence of sorted deduplicated IPv#Address.
Args:
addresses: a list of IPv#Address objects.
Yields:
A tuple containing the first and last IP addresses in the sequence.
"""
it = iter(addresses)
first = last = next(it)
for ip in it:
if ip._ip != last._ip + 1:
yield first, last
first = ip
last = ip
yield first, last
def _count_righthand_zero_bits(number, bits):
"""Count the number of zero bits on the right hand side.
Args:
number: an integer.
bits: maximum number of bits to count.
Returns:
The number of zero bits on the right hand side of the number.
"""
if number == 0:
return bits
return min(bits, _compat_bit_length(~number & (number - 1)))
def summarize_address_range(first, last):
"""Summarize a network range given the first and last IP addresses.
Example:
>>> list(summarize_address_range(IPv4Address('192.0.2.0'),
... IPv4Address('192.0.2.130')))
... #doctest: +NORMALIZE_WHITESPACE
[IPv4Network('192.0.2.0/25'), IPv4Network('192.0.2.128/31'),
IPv4Network('192.0.2.130/32')]
Args:
first: the first IPv4Address or IPv6Address in the range.
last: the last IPv4Address or IPv6Address in the range.
Returns:
An iterator of the summarized IPv(4|6) network objects.
Raise:
TypeError:
If the first and last objects are not IP addresses.
If the first and last objects are not the same version.
ValueError:
If the last object is not greater than the first.
If the version of the first address is not 4 or 6.
"""
if (not (isinstance(first, _BaseAddress) and
isinstance(last, _BaseAddress))):
raise TypeError('first and last must be IP addresses, not networks')
if first.version != last.version:
raise TypeError("%s and %s are not of the same version" % (
first, last))
if first > last:
raise ValueError('last IP address must be greater than first')
if first.version == 4:
ip = IPv4Network
elif first.version == 6:
ip = IPv6Network
else:
raise ValueError('unknown IP version')
ip_bits = first._max_prefixlen
first_int = first._ip
last_int = last._ip
while first_int <= last_int:
nbits = min(_count_righthand_zero_bits(first_int, ip_bits),
_compat_bit_length(last_int - first_int + 1) - 1)
net = ip((first_int, ip_bits - nbits))
yield net
first_int += 1 << nbits
if first_int - 1 == ip._ALL_ONES:
break
def _collapse_addresses_internal(addresses):
"""Loops through the addresses, collapsing concurrent netblocks.
Example:
ip1 = IPv4Network('192.0.2.0/26')
ip2 = IPv4Network('192.0.2.64/26')
ip3 = IPv4Network('192.0.2.128/26')
ip4 = IPv4Network('192.0.2.192/26')
_collapse_addresses_internal([ip1, ip2, ip3, ip4]) ->
[IPv4Network('192.0.2.0/24')]
This shouldn't be called directly; it is called via
collapse_addresses([]).
Args:
addresses: A list of IPv4Network's or IPv6Network's
Returns:
A list of IPv4Network's or IPv6Network's depending on what we were
passed.
"""
# First merge
to_merge = list(addresses)
subnets = {}
while to_merge:
net = to_merge.pop()
supernet = net.supernet()
existing = subnets.get(supernet)
if existing is None:
subnets[supernet] = net
elif existing != net:
# Merge consecutive subnets
del subnets[supernet]
to_merge.append(supernet)
# Then iterate over resulting networks, skipping subsumed subnets
last = None
for net in sorted(subnets.values()):
if last is not None:
# Since they are sorted,
# last.network_address <= net.network_address is a given.
if last.broadcast_address >= net.broadcast_address:
continue
yield net
last = net
def collapse_addresses(addresses):
"""Collapse a list of IP objects.
Example:
collapse_addresses([IPv4Network('192.0.2.0/25'),
IPv4Network('192.0.2.128/25')]) ->
[IPv4Network('192.0.2.0/24')]
Args:
addresses: An iterator of IPv4Network or IPv6Network objects.
Returns:
An iterator of the collapsed IPv(4|6)Network objects.
Raises:
TypeError: If passed a list of mixed version objects.
"""
addrs = []
ips = []
nets = []
# split IP addresses and networks
for ip in addresses:
if isinstance(ip, _BaseAddress):
if ips and ips[-1]._version != ip._version:
raise TypeError("%s and %s are not of the same version" % (
ip, ips[-1]))
ips.append(ip)
elif ip._prefixlen == ip._max_prefixlen:
if ips and ips[-1]._version != ip._version:
raise TypeError("%s and %s are not of the same version" % (
ip, ips[-1]))
try:
ips.append(ip.ip)
except AttributeError:
ips.append(ip.network_address)
else:
if nets and nets[-1]._version != ip._version:
raise TypeError("%s and %s are not of the same version" % (
ip, nets[-1]))
nets.append(ip)
# sort and dedup
ips = sorted(set(ips))
# find consecutive address ranges in the sorted sequence and summarize them
if ips:
for first, last in _find_address_range(ips):
addrs.extend(summarize_address_range(first, last))
return _collapse_addresses_internal(addrs + nets)
def get_mixed_type_key(obj):
"""Return a key suitable for sorting between networks and addresses.
Address and Network objects are not sortable by default; they're
fundamentally different so the expression
IPv4Address('192.0.2.0') <= IPv4Network('192.0.2.0/24')
doesn't make any sense. There are some times however, where you may wish
to have ipaddress sort these for you anyway. If you need to do this, you
can use this function as the key= argument to sorted().
Args:
obj: either a Network or Address object.
Returns:
appropriate key.
"""
if isinstance(obj, _BaseNetwork):
return obj._get_networks_key()
elif isinstance(obj, _BaseAddress):
return obj._get_address_key()
return NotImplemented
class _IPAddressBase(_TotalOrderingMixin):
"""The mother class."""
__slots__ = ()
@property
def exploded(self):
"""Return the longhand version of the IP address as a string."""
return self._explode_shorthand_ip_string()
@property
def compressed(self):
"""Return the shorthand version of the IP address as a string."""
return _compat_str(self)
@property
def reverse_pointer(self):
"""The name of the reverse DNS pointer for the IP address, e.g.:
>>> ipaddress.ip_address("127.0.0.1").reverse_pointer
'1.0.0.127.in-addr.arpa'
>>> ipaddress.ip_address("2001:db8::1").reverse_pointer
'1.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.8.b.d.0.1.0.0.2.ip6.arpa'
"""
return self._reverse_pointer()
@property
def version(self):
msg = '%200s has no version specified' % (type(self),)
raise NotImplementedError(msg)
def _check_int_address(self, address):
if address < 0:
msg = "%d (< 0) is not permitted as an IPv%d address"
raise AddressValueError(msg % (address, self._version))
if address > self._ALL_ONES:
msg = "%d (>= 2**%d) is not permitted as an IPv%d address"
raise AddressValueError(msg % (address, self._max_prefixlen,
self._version))
def _check_packed_address(self, address, expected_len):
address_len = len(address)
if address_len != expected_len:
msg = (
'%r (len %d != %d) is not permitted as an IPv%d address. '
'Did you pass in a bytes (str in Python 2) instead of'
' a unicode object?'
)
raise AddressValueError(msg % (address, address_len,
expected_len, self._version))
@classmethod
def _ip_int_from_prefix(cls, prefixlen):
"""Turn the prefix length into a bitwise netmask
Args:
prefixlen: An integer, the prefix length.
Returns:
An integer.
"""
return cls._ALL_ONES ^ (cls._ALL_ONES >> prefixlen)
@classmethod
def _prefix_from_ip_int(cls, ip_int):
"""Return prefix length from the bitwise netmask.
Args:
ip_int: An integer, the netmask in expanded bitwise format
Returns:
An integer, the prefix length.
Raises:
ValueError: If the input intermingles zeroes & ones
"""
trailing_zeroes = _count_righthand_zero_bits(ip_int,
cls._max_prefixlen)
prefixlen = cls._max_prefixlen - trailing_zeroes
leading_ones = ip_int >> trailing_zeroes
all_ones = (1 << prefixlen) - 1
if leading_ones != all_ones:
byteslen = cls._max_prefixlen // 8
details = _compat_to_bytes(ip_int, byteslen, 'big')
msg = 'Netmask pattern %r mixes zeroes & ones'
raise ValueError(msg % details)
return prefixlen
@classmethod
def _report_invalid_netmask(cls, netmask_str):
msg = '%r is not a valid netmask' % netmask_str
raise NetmaskValueError(msg)
@classmethod
def _prefix_from_prefix_string(cls, prefixlen_str):
"""Return prefix length from a numeric string
Args:
prefixlen_str: The string to be converted
Returns:
An integer, the prefix length.
Raises:
NetmaskValueError: If the input is not a valid netmask
"""
# int allows a leading +/- as well as surrounding whitespace,
# so we ensure that isn't the case
if not _BaseV4._DECIMAL_DIGITS.issuperset(prefixlen_str):
cls._report_invalid_netmask(prefixlen_str)
try:
prefixlen = int(prefixlen_str)
except ValueError:
cls._report_invalid_netmask(prefixlen_str)
if not (0 <= prefixlen <= cls._max_prefixlen):
cls._report_invalid_netmask(prefixlen_str)
return prefixlen
@classmethod
def _prefix_from_ip_string(cls, ip_str):
"""Turn a netmask/hostmask string into a prefix length
Args:
ip_str: The netmask/hostmask to be converted
Returns:
An integer, the prefix length.
Raises:
NetmaskValueError: If the input is not a valid netmask/hostmask
"""
# Parse the netmask/hostmask like an IP address.
try:
ip_int = cls._ip_int_from_string(ip_str)
except AddressValueError:
cls._report_invalid_netmask(ip_str)
# Try matching a netmask (this would be /1*0*/ as a bitwise regexp).
# Note that the two ambiguous cases (all-ones and all-zeroes) are
# treated as netmasks.
try:
return cls._prefix_from_ip_int(ip_int)
except ValueError:
pass
# Invert the bits, and try matching a /0+1+/ hostmask instead.
ip_int ^= cls._ALL_ONES
try:
return cls._prefix_from_ip_int(ip_int)
except ValueError:
cls._report_invalid_netmask(ip_str)
def __reduce__(self):
return self.__class__, (_compat_str(self),)
class _BaseAddress(_IPAddressBase):
"""A generic IP object.
This IP class contains the version independent methods which are
used by single IP addresses.
"""
__slots__ = ()
def __int__(self):
return self._ip
def __eq__(self, other):
try:
return (self._ip == other._ip and
self._version == other._version)
except AttributeError:
return NotImplemented
def __lt__(self, other):
if not isinstance(other, _IPAddressBase):
return NotImplemented
if not isinstance(other, _BaseAddress):
raise TypeError('%s and %s are not of the same type' % (
self, other))
if self._version != other._version:
raise TypeError('%s and %s are not of the same version' % (
self, other))
if self._ip != other._ip:
return self._ip < other._ip
return False
# Shorthand for Integer addition and subtraction. This is not
# meant to ever support addition/subtraction of addresses.
def __add__(self, other):
if not isinstance(other, _compat_int_types):
return NotImplemented
return self.__class__(int(self) + other)
def __sub__(self, other):
if not isinstance(other, _compat_int_types):
return NotImplemented
return self.__class__(int(self) - other)
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, _compat_str(self))
def __str__(self):
return _compat_str(self._string_from_ip_int(self._ip))
def __hash__(self):
return hash(hex(int(self._ip)))
def _get_address_key(self):
return (self._version, self)
def __reduce__(self):
return self.__class__, (self._ip,)
class _BaseNetwork(_IPAddressBase):
"""A generic IP network object.
This IP class contains the version independent methods which are
used by networks.
"""
def __init__(self, address):
self._cache = {}
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, _compat_str(self))
def __str__(self):
return '%s/%d' % (self.network_address, self.prefixlen)
def hosts(self):
"""Generate Iterator over usable hosts in a network.
This is like __iter__ except it doesn't return the network
or broadcast addresses.
"""
network = int(self.network_address)
broadcast = int(self.broadcast_address)
for x in _compat_range(network + 1, broadcast):
yield self._address_class(x)
def __iter__(self):
network = int(self.network_address)
broadcast = int(self.broadcast_address)
for x in _compat_range(network, broadcast + 1):
yield self._address_class(x)
def __getitem__(self, n):
network = int(self.network_address)
broadcast = int(self.broadcast_address)
if n >= 0:
if network + n > broadcast:
raise IndexError
return self._address_class(network + n)
else:
n += 1
if broadcast + n < network:
raise IndexError
return self._address_class(broadcast + n)
def __lt__(self, other):
if not isinstance(other, _IPAddressBase):
return NotImplemented
if not isinstance(other, _BaseNetwork):
raise TypeError('%s and %s are not of the same type' % (
self, other))
if self._version != other._version:
raise TypeError('%s and %s are not of the same version' % (
self, other))
if self.network_address != other.network_address:
return self.network_address < other.network_address
if self.netmask != other.netmask:
return self.netmask < other.netmask
return False
def __eq__(self, other):
try:
return (self._version == other._version and
self.network_address == other.network_address and
int(self.netmask) == int(other.netmask))
except AttributeError:
return NotImplemented
def __hash__(self):
return hash(int(self.network_address) ^ int(self.netmask))
def __contains__(self, other):
# always false if one is v4 and the other is v6.
if self._version != other._version:
return False
# dealing with another network.
if isinstance(other, _BaseNetwork):
return False
# dealing with another address
else:
# address
return (int(self.network_address) <= int(other._ip) <=
int(self.broadcast_address))
def overlaps(self, other):
"""Tell if self is partly contained in other."""
return self.network_address in other or (
self.broadcast_address in other or (
other.network_address in self or (
other.broadcast_address in self)))
@property
def broadcast_address(self):
x = self._cache.get('broadcast_address')
if x is None:
x = self._address_class(int(self.network_address) |
int(self.hostmask))
self._cache['broadcast_address'] = x
return x
@property
def hostmask(self):
x = self._cache.get('hostmask')
if x is None:
x = self._address_class(int(self.netmask) ^ self._ALL_ONES)
self._cache['hostmask'] = x
return x
@property
def with_prefixlen(self):
return '%s/%d' % (self.network_address, self._prefixlen)
@property
def with_netmask(self):
return '%s/%s' % (self.network_address, self.netmask)
@property
def with_hostmask(self):
return '%s/%s' % (self.network_address, self.hostmask)
@property
def num_addresses(self):
"""Number of hosts in the current subnet."""
return int(self.broadcast_address) - int(self.network_address) + 1
@property
def _address_class(self):
# Returning bare address objects (rather than interfaces) allows for
# more consistent behaviour across the network address, broadcast
# address and individual host addresses.
msg = '%200s has no associated address class' % (type(self),)
raise NotImplementedError(msg)
@property
def prefixlen(self):
return self._prefixlen
def address_exclude(self, other):
"""Remove an address from a larger block.
For example:
addr1 = ip_network('192.0.2.0/28')
addr2 = ip_network('192.0.2.1/32')
addr1.address_exclude(addr2) =
[IPv4Network('192.0.2.0/32'), IPv4Network('192.0.2.2/31'),
IPv4Network('192.0.2.4/30'), IPv4Network('192.0.2.8/29')]
or IPv6:
addr1 = ip_network('2001:db8::1/32')
addr2 = ip_network('2001:db8::1/128')
addr1.address_exclude(addr2) =
[ip_network('2001:db8::1/128'),
ip_network('2001:db8::2/127'),
ip_network('2001:db8::4/126'),
ip_network('2001:db8::8/125'),
...
ip_network('2001:db8:8000::/33')]
Args:
other: An IPv4Network or IPv6Network object of the same type.
Returns:
An iterator of the IPv(4|6)Network objects which is self
minus other.
Raises:
TypeError: If self and other are of differing address
versions, or if other is not a network object.
ValueError: If other is not completely contained by self.
"""
if not self._version == other._version:
raise TypeError("%s and %s are not of the same version" % (
self, other))
if not isinstance(other, _BaseNetwork):
raise TypeError("%s is not a network object" % other)
if not other.subnet_of(self):
raise ValueError('%s not contained in %s' % (other, self))
if other == self:
return
# Make sure we're comparing the network of other.
other = other.__class__('%s/%s' % (other.network_address,
other.prefixlen))
s1, s2 = self.subnets()
while s1 != other and s2 != other:
if other.subnet_of(s1):
yield s2
s1, s2 = s1.subnets()
elif other.subnet_of(s2):
yield s1
s1, s2 = s2.subnets()
else:
# If we got here, there's a bug somewhere.
raise AssertionError('Error performing exclusion: '
's1: %s s2: %s other: %s' %
(s1, s2, other))
if s1 == other:
yield s2
elif s2 == other:
yield s1
else:
# If we got here, there's a bug somewhere.
raise AssertionError('Error performing exclusion: '
's1: %s s2: %s other: %s' %
(s1, s2, other))
def compare_networks(self, other):
"""Compare two IP objects.
This is only concerned about the comparison of the integer
representation of the network addresses. This means that the
host bits aren't considered at all in this method. If you want
to compare host bits, you can easily enough do a
'HostA._ip < HostB._ip'
Args:
other: An IP object.
Returns:
If the IP versions of self and other are the same, returns:
-1 if self < other:
eg: IPv4Network('192.0.2.0/25') < IPv4Network('192.0.2.128/25')
IPv6Network('2001:db8::1000/124') <
IPv6Network('2001:db8::2000/124')
0 if self == other
eg: IPv4Network('192.0.2.0/24') == IPv4Network('192.0.2.0/24')
IPv6Network('2001:db8::1000/124') ==
IPv6Network('2001:db8::1000/124')
1 if self > other
eg: IPv4Network('192.0.2.128/25') > IPv4Network('192.0.2.0/25')
IPv6Network('2001:db8::2000/124') >
IPv6Network('2001:db8::1000/124')
Raises:
TypeError if the IP versions are different.
"""
# does this need to raise a ValueError?
if self._version != other._version:
raise TypeError('%s and %s are not of the same type' % (
self, other))
# self._version == other._version below here:
if self.network_address < other.network_address:
return -1
if self.network_address > other.network_address:
return 1
# self.network_address == other.network_address below here:
if self.netmask < other.netmask:
return -1
if self.netmask > other.netmask:
return 1
return 0
def _get_networks_key(self):
"""Network-only key function.
Returns an object that identifies this address' network and
netmask. This function is a suitable "key" argument for sorted()
and list.sort().
"""
return (self._version, self.network_address, self.netmask)
def subnets(self, prefixlen_diff=1, new_prefix=None):
"""The subnets which join to make the current subnet.
In the case that self contains only one IP
(self._prefixlen == 32 for IPv4 or self._prefixlen == 128
for IPv6), yield an iterator with just ourself.
Args:
prefixlen_diff: An integer, the amount the prefix length
should be increased by. This should not be set if
new_prefix is also set.
new_prefix: The desired new prefix length. This must be a
larger number (smaller prefix) than the existing prefix.
This should not be set if prefixlen_diff is also set.
Returns:
An iterator of IPv(4|6) objects.
Raises:
ValueError: The prefixlen_diff is too small or too large.
OR
prefixlen_diff and new_prefix are both set or new_prefix
is a smaller number than the current prefix (smaller
number means a larger network)
"""
if self._prefixlen == self._max_prefixlen:
yield self
return
if new_prefix is not None:
if new_prefix < self._prefixlen:
raise ValueError('new prefix must be longer')
if prefixlen_diff != 1:
raise ValueError('cannot set prefixlen_diff and new_prefix')
prefixlen_diff = new_prefix - self._prefixlen
if prefixlen_diff < 0:
raise ValueError('prefix length diff must be > 0')
new_prefixlen = self._prefixlen + prefixlen_diff
if new_prefixlen > self._max_prefixlen:
raise ValueError(
'prefix length diff %d is invalid for netblock %s' % (
new_prefixlen, self))
start = int(self.network_address)
end = int(self.broadcast_address)
step = (int(self.hostmask) + 1) >> prefixlen_diff
for new_addr in _compat_range(start, end, step):
current = self.__class__((new_addr, new_prefixlen))
yield current
def supernet(self, prefixlen_diff=1, new_prefix=None):
"""The supernet containing the current network.
Args:
prefixlen_diff: An integer, the amount the prefix length of
the network should be decreased by. For example, given a
/24 network and a prefixlen_diff of 3, a supernet with a
/21 netmask is returned.
Returns:
An IPv4 network object.
Raises:
ValueError: If self.prefixlen - prefixlen_diff < 0. I.e., you have
a negative prefix length.
OR
If prefixlen_diff and new_prefix are both set or new_prefix is a
larger number than the current prefix (larger number means a
smaller network)
"""
if self._prefixlen == 0:
return self
if new_prefix is not None:
if new_prefix > self._prefixlen:
raise ValueError('new prefix must be shorter')
if prefixlen_diff != 1:
raise ValueError('cannot set prefixlen_diff and new_prefix')
prefixlen_diff = self._prefixlen - new_prefix
new_prefixlen = self.prefixlen - prefixlen_diff
if new_prefixlen < 0:
raise ValueError(
'current prefixlen is %d, cannot have a prefixlen_diff of %d' %
(self.prefixlen, prefixlen_diff))
return self.__class__((
int(self.network_address) & (int(self.netmask) << prefixlen_diff),
new_prefixlen
))
@property
def is_multicast(self):
"""Test if the address is reserved for multicast use.
Returns:
A boolean, True if the address is a multicast address.
See RFC 2373 2.7 for details.
"""
return (self.network_address.is_multicast and
self.broadcast_address.is_multicast)
def subnet_of(self, other):
# always false if one is v4 and the other is v6.
if self._version != other._version:
return False
# dealing with another network.
if (hasattr(other, 'network_address') and
hasattr(other, 'broadcast_address')):
return (other.network_address <= self.network_address and
other.broadcast_address >= self.broadcast_address)
# dealing with another address
else:
raise TypeError('Unable to test subnet containment with element '
'of type %s' % type(other))
def supernet_of(self, other):
# always false if one is v4 and the other is v6.
if self._version != other._version:
return False
# dealing with another network.
if (hasattr(other, 'network_address') and
hasattr(other, 'broadcast_address')):
return (other.network_address >= self.network_address and
other.broadcast_address <= self.broadcast_address)
# dealing with another address
else:
raise TypeError('Unable to test subnet containment with element '
'of type %s' % type(other))
@property
def is_reserved(self):
"""Test if the address is otherwise IETF reserved.
Returns:
A boolean, True if the address is within one of the
reserved IPv6 Network ranges.
"""
return (self.network_address.is_reserved and
self.broadcast_address.is_reserved)
@property
def is_link_local(self):
"""Test if the address is reserved for link-local.
Returns:
A boolean, True if the address is reserved per RFC 4291.
"""
return (self.network_address.is_link_local and
self.broadcast_address.is_link_local)
@property
def is_private(self):
"""Test if this address is allocated for private networks.
Returns:
A boolean, True if the address is reserved per
iana-ipv4-special-registry or iana-ipv6-special-registry.
"""
return (self.network_address.is_private and
self.broadcast_address.is_private)
@property
def is_global(self):
"""Test if this address is allocated for public networks.
Returns:
A boolean, True if the address is not reserved per
iana-ipv4-special-registry or iana-ipv6-special-registry.
"""
return not self.is_private
@property
def is_unspecified(self):
"""Test if the address is unspecified.
Returns:
A boolean, True if this is the unspecified address as defined in
RFC 2373 2.5.2.
"""
return (self.network_address.is_unspecified and
self.broadcast_address.is_unspecified)
@property
def is_loopback(self):
"""Test if the address is a loopback address.
Returns:
A boolean, True if the address is a loopback address as defined in
RFC 2373 2.5.3.
"""
return (self.network_address.is_loopback and
self.broadcast_address.is_loopback)
class _BaseV4(object):
"""Base IPv4 object.
The following methods are used by IPv4 objects in both single IP
addresses and networks.
"""
__slots__ = ()
_version = 4
# Equivalent to 255.255.255.255 or 32 bits of 1's.
_ALL_ONES = (2 ** IPV4LENGTH) - 1
_DECIMAL_DIGITS = frozenset('0123456789')
# the valid octets for host and netmasks. only useful for IPv4.
_valid_mask_octets = frozenset([255, 254, 252, 248, 240, 224, 192, 128, 0])
_max_prefixlen = IPV4LENGTH
# There are only a handful of valid v4 netmasks, so we cache them all
# when constructed (see _make_netmask()).
_netmask_cache = {}
def _explode_shorthand_ip_string(self):
return _compat_str(self)
@classmethod
def _make_netmask(cls, arg):
"""Make a (netmask, prefix_len) tuple from the given argument.
Argument can be:
- an integer (the prefix length)
- a string representing the prefix length (e.g. "24")
- a string representing the prefix netmask (e.g. "255.255.255.0")
"""
if arg not in cls._netmask_cache:
if isinstance(arg, _compat_int_types):
prefixlen = arg
else:
try:
# Check for a netmask in prefix length form
prefixlen = cls._prefix_from_prefix_string(arg)
except NetmaskValueError:
# Check for a netmask or hostmask in dotted-quad form.
# This may raise NetmaskValueError.
prefixlen = cls._prefix_from_ip_string(arg)
netmask = IPv4Address(cls._ip_int_from_prefix(prefixlen))
cls._netmask_cache[arg] = netmask, prefixlen
return cls._netmask_cache[arg]
@classmethod
def _ip_int_from_string(cls, ip_str):
"""Turn the given IP string into an integer for comparison.
Args:
ip_str: A string, the IP ip_str.
Returns:
The IP ip_str as an integer.
Raises:
AddressValueError: if ip_str isn't a valid IPv4 Address.
"""
if not ip_str:
raise AddressValueError('Address cannot be empty')
octets = ip_str.split('.')
if len(octets) != 4:
raise AddressValueError("Expected 4 octets in %r" % ip_str)
try:
return _compat_int_from_byte_vals(
map(cls._parse_octet, octets), 'big')
except ValueError as exc:
raise AddressValueError("%s in %r" % (exc, ip_str))
@classmethod
def _parse_octet(cls, octet_str):
"""Convert a decimal octet into an integer.
Args:
octet_str: A string, the number to parse.
Returns:
The octet as an integer.
Raises:
ValueError: if the octet isn't strictly a decimal from [0..255].
"""
if not octet_str:
raise ValueError("Empty octet not permitted")
# Whitelist the characters, since int() allows a lot of bizarre stuff.
if not cls._DECIMAL_DIGITS.issuperset(octet_str):
msg = "Only decimal digits permitted in %r"
raise ValueError(msg % octet_str)
# We do the length check second, since the invalid character error
# is likely to be more informative for the user
if len(octet_str) > 3:
msg = "At most 3 characters permitted in %r"
raise ValueError(msg % octet_str)
# Convert to integer (we know digits are legal)
octet_int = int(octet_str, 10)
# Any octets that look like they *might* be written in octal,
# and which don't look exactly the same in both octal and
# decimal are rejected as ambiguous
if octet_int > 7 and octet_str[0] == '0':
msg = "Ambiguous (octal/decimal) value in %r not permitted"
raise ValueError(msg % octet_str)
if octet_int > 255:
raise ValueError("Octet %d (> 255) not permitted" % octet_int)
return octet_int
@classmethod
def _string_from_ip_int(cls, ip_int):
"""Turns a 32-bit integer into dotted decimal notation.
Args:
ip_int: An integer, the IP address.
Returns:
The IP address as a string in dotted decimal notation.
"""
return '.'.join(_compat_str(struct.unpack(b'!B', b)[0]
if isinstance(b, bytes)
else b)
for b in _compat_to_bytes(ip_int, 4, 'big'))
def _is_hostmask(self, ip_str):
"""Test if the IP string is a hostmask (rather than a netmask).
Args:
ip_str: A string, the potential hostmask.
Returns:
A boolean, True if the IP string is a hostmask.
"""
bits = ip_str.split('.')
try:
parts = [x for x in map(int, bits) if x in self._valid_mask_octets]
except ValueError:
return False
if len(parts) != len(bits):
return False
if parts[0] < parts[-1]:
return True
return False
def _reverse_pointer(self):
"""Return the reverse DNS pointer name for the IPv4 address.
This implements the method described in RFC1035 3.5.
"""
reverse_octets = _compat_str(self).split('.')[::-1]
return '.'.join(reverse_octets) + '.in-addr.arpa'
@property
def max_prefixlen(self):
return self._max_prefixlen
@property
def version(self):
return self._version
class IPv4Address(_BaseV4, _BaseAddress):
"""Represent and manipulate single IPv4 Addresses."""
__slots__ = ('_ip', '__weakref__')
def __init__(self, address):
"""
Args:
address: A string or integer representing the IP
Additionally, an integer can be passed, so
IPv4Address('192.0.2.1') == IPv4Address(3221225985).
or, more generally
IPv4Address(int(IPv4Address('192.0.2.1'))) ==
IPv4Address('192.0.2.1')
Raises:
AddressValueError: If ipaddress isn't a valid IPv4 address.
"""
# Efficient constructor from integer.
if isinstance(address, _compat_int_types):
self._check_int_address(address)
self._ip = address
return
# Constructing from a packed address
if isinstance(address, bytes):
self._check_packed_address(address, 4)
bvs = _compat_bytes_to_byte_vals(address)
self._ip = _compat_int_from_byte_vals(bvs, 'big')
return
# Assume input argument to be string or any object representation
# which converts into a formatted IP string.
addr_str = _compat_str(address)
if '/' in addr_str:
raise AddressValueError("Unexpected '/' in %r" % address)
self._ip = self._ip_int_from_string(addr_str)
@property
def packed(self):
"""The binary representation of this address."""
return v4_int_to_packed(self._ip)
@property
def is_reserved(self):
"""Test if the address is otherwise IETF reserved.
Returns:
A boolean, True if the address is within the
reserved IPv4 Network range.
"""
return self in self._constants._reserved_network
@property
def is_private(self):
"""Test if this address is allocated for private networks.
Returns:
A boolean, True if the address is reserved per
iana-ipv4-special-registry.
"""
return any(self in net for net in self._constants._private_networks)
@property
def is_multicast(self):
"""Test if the address is reserved for multicast use.
Returns:
A boolean, True if the address is multicast.
See RFC 3171 for details.
"""
return self in self._constants._multicast_network
@property
def is_unspecified(self):
"""Test if the address is unspecified.
Returns:
A boolean, True if this is the unspecified address as defined in
RFC 5735 3.
"""
return self == self._constants._unspecified_address
@property
def is_loopback(self):
"""Test if the address is a loopback address.
Returns:
A boolean, True if the address is a loopback per RFC 3330.
"""
return self in self._constants._loopback_network
@property
def is_link_local(self):
"""Test if the address is reserved for link-local.
Returns:
A boolean, True if the address is link-local per RFC 3927.
"""
return self in self._constants._linklocal_network
class IPv4Interface(IPv4Address):
def __init__(self, address):
if isinstance(address, (bytes, _compat_int_types)):
IPv4Address.__init__(self, address)
self.network = IPv4Network(self._ip)
self._prefixlen = self._max_prefixlen
return
if isinstance(address, tuple):
IPv4Address.__init__(self, address[0])
if len(address) > 1:
self._prefixlen = int(address[1])
else:
self._prefixlen = self._max_prefixlen
self.network = IPv4Network(address, strict=False)
self.netmask = self.network.netmask
self.hostmask = self.network.hostmask
return
addr = _split_optional_netmask(address)
IPv4Address.__init__(self, addr[0])
self.network = IPv4Network(address, strict=False)
self._prefixlen = self.network._prefixlen
self.netmask = self.network.netmask
self.hostmask = self.network.hostmask
def __str__(self):
return '%s/%d' % (self._string_from_ip_int(self._ip),
self.network.prefixlen)
def __eq__(self, other):
address_equal = IPv4Address.__eq__(self, other)
if not address_equal or address_equal is NotImplemented:
return address_equal
try:
return self.network == other.network
except AttributeError:
# An interface with an associated network is NOT the
# same as an unassociated address. That's why the hash
# takes the extra info into account.
return False
def __lt__(self, other):
address_less = IPv4Address.__lt__(self, other)
if address_less is NotImplemented:
return NotImplemented
try:
return self.network < other.network
except AttributeError:
# We *do* allow addresses and interfaces to be sorted. The
# unassociated address is considered less than all interfaces.
return False
def __hash__(self):
return self._ip ^ self._prefixlen ^ int(self.network.network_address)
__reduce__ = _IPAddressBase.__reduce__
@property
def ip(self):
return IPv4Address(self._ip)
@property
def with_prefixlen(self):
return '%s/%s' % (self._string_from_ip_int(self._ip),
self._prefixlen)
@property
def with_netmask(self):
return '%s/%s' % (self._string_from_ip_int(self._ip),
self.netmask)
@property
def with_hostmask(self):
return '%s/%s' % (self._string_from_ip_int(self._ip),
self.hostmask)
class IPv4Network(_BaseV4, _BaseNetwork):
"""This class represents and manipulates 32-bit IPv4 network + addresses..
Attributes: [examples for IPv4Network('192.0.2.0/27')]
.network_address: IPv4Address('192.0.2.0')
.hostmask: IPv4Address('0.0.0.31')
.broadcast_address: IPv4Address('192.0.2.32')
.netmask: IPv4Address('255.255.255.224')
.prefixlen: 27
"""
# Class to use when creating address objects
_address_class = IPv4Address
def __init__(self, address, strict=True):
"""Instantiate a new IPv4 network object.
Args:
address: A string or integer representing the IP [& network].
'192.0.2.0/24'
'192.0.2.0/255.255.255.0'
'192.0.0.2/0.0.0.255'
are all functionally the same in IPv4. Similarly,
'192.0.2.1'
'192.0.2.1/255.255.255.255'
'192.0.2.1/32'
are also functionally equivalent. That is to say, failing to
provide a subnetmask will create an object with a mask of /32.
If the mask (portion after the / in the argument) is given in
dotted quad form, it is treated as a netmask if it starts with a
non-zero field (e.g. /255.0.0.0 == /8) and as a hostmask if it
starts with a zero field (e.g. 0.255.255.255 == /8), with the
single exception of an all-zero mask which is treated as a
netmask == /0. If no mask is given, a default of /32 is used.
Additionally, an integer can be passed, so
IPv4Network('192.0.2.1') == IPv4Network(3221225985)
or, more generally
IPv4Interface(int(IPv4Interface('192.0.2.1'))) ==
IPv4Interface('192.0.2.1')
Raises:
AddressValueError: If ipaddress isn't a valid IPv4 address.
NetmaskValueError: If the netmask isn't valid for
an IPv4 address.
ValueError: If strict is True and a network address is not
supplied.
"""
_BaseNetwork.__init__(self, address)
# Constructing from a packed address or integer
if isinstance(address, (_compat_int_types, bytes)):
self.network_address = IPv4Address(address)
self.netmask, self._prefixlen = self._make_netmask(
self._max_prefixlen)
# fixme: address/network test here.
return
if isinstance(address, tuple):
if len(address) > 1:
arg = address[1]
else:
# We weren't given an address[1]
arg = self._max_prefixlen
self.network_address = IPv4Address(address[0])
self.netmask, self._prefixlen = self._make_netmask(arg)
packed = int(self.network_address)
if packed & int(self.netmask) != packed:
if strict:
raise ValueError('%s has host bits set' % self)
else:
self.network_address = IPv4Address(packed &
int(self.netmask))
return
# Assume input argument to be string or any object representation
# which converts into a formatted IP prefix string.
addr = _split_optional_netmask(address)
self.network_address = IPv4Address(self._ip_int_from_string(addr[0]))
if len(addr) == 2:
arg = addr[1]
else:
arg = self._max_prefixlen
self.netmask, self._prefixlen = self._make_netmask(arg)
if strict:
if (IPv4Address(int(self.network_address) & int(self.netmask)) !=
self.network_address):
raise ValueError('%s has host bits set' % self)
self.network_address = IPv4Address(int(self.network_address) &
int(self.netmask))
if self._prefixlen == (self._max_prefixlen - 1):
self.hosts = self.__iter__
@property
def is_global(self):
"""Test if this address is allocated for public networks.
Returns:
A boolean, True if the address is not reserved per
iana-ipv4-special-registry.
"""
return (not (self.network_address in IPv4Network('100.64.0.0/10') and
self.broadcast_address in IPv4Network('100.64.0.0/10')) and
not self.is_private)
class _IPv4Constants(object):
_linklocal_network = IPv4Network('169.254.0.0/16')
_loopback_network = IPv4Network('127.0.0.0/8')
_multicast_network = IPv4Network('224.0.0.0/4')
_private_networks = [
IPv4Network('0.0.0.0/8'),
IPv4Network('10.0.0.0/8'),
IPv4Network('127.0.0.0/8'),
IPv4Network('169.254.0.0/16'),
IPv4Network('172.16.0.0/12'),
IPv4Network('192.0.0.0/29'),
IPv4Network('192.0.0.170/31'),
IPv4Network('192.0.2.0/24'),
IPv4Network('192.168.0.0/16'),
IPv4Network('198.18.0.0/15'),
IPv4Network('198.51.100.0/24'),
IPv4Network('203.0.113.0/24'),
IPv4Network('240.0.0.0/4'),
IPv4Network('255.255.255.255/32'),
]
_reserved_network = IPv4Network('240.0.0.0/4')
_unspecified_address = IPv4Address('0.0.0.0')
IPv4Address._constants = _IPv4Constants
class _BaseV6(object):
"""Base IPv6 object.
The following methods are used by IPv6 objects in both single IP
addresses and networks.
"""
__slots__ = ()
_version = 6
_ALL_ONES = (2 ** IPV6LENGTH) - 1
_HEXTET_COUNT = 8
_HEX_DIGITS = frozenset('0123456789ABCDEFabcdef')
_max_prefixlen = IPV6LENGTH
# There are only a bunch of valid v6 netmasks, so we cache them all
# when constructed (see _make_netmask()).
_netmask_cache = {}
@classmethod
def _make_netmask(cls, arg):
"""Make a (netmask, prefix_len) tuple from the given argument.
Argument can be:
- an integer (the prefix length)
- a string representing the prefix length (e.g. "24")
- a string representing the prefix netmask (e.g. "255.255.255.0")
"""
if arg not in cls._netmask_cache:
if isinstance(arg, _compat_int_types):
prefixlen = arg
else:
prefixlen = cls._prefix_from_prefix_string(arg)
netmask = IPv6Address(cls._ip_int_from_prefix(prefixlen))
cls._netmask_cache[arg] = netmask, prefixlen
return cls._netmask_cache[arg]
@classmethod
def _ip_int_from_string(cls, ip_str):
"""Turn an IPv6 ip_str into an integer.
Args:
ip_str: A string, the IPv6 ip_str.
Returns:
An int, the IPv6 address
Raises:
AddressValueError: if ip_str isn't a valid IPv6 Address.
"""
if not ip_str:
raise AddressValueError('Address cannot be empty')
parts = ip_str.split(':')
# An IPv6 address needs at least 2 colons (3 parts).
_min_parts = 3
if len(parts) < _min_parts:
msg = "At least %d parts expected in %r" % (_min_parts, ip_str)
raise AddressValueError(msg)
# If the address has an IPv4-style suffix, convert it to hexadecimal.
if '.' in parts[-1]:
try:
ipv4_int = IPv4Address(parts.pop())._ip
except AddressValueError as exc:
raise AddressValueError("%s in %r" % (exc, ip_str))
parts.append('%x' % ((ipv4_int >> 16) & 0xFFFF))
parts.append('%x' % (ipv4_int & 0xFFFF))
# An IPv6 address can't have more than 8 colons (9 parts).
# The extra colon comes from using the "::" notation for a single
# leading or trailing zero part.
_max_parts = cls._HEXTET_COUNT + 1
if len(parts) > _max_parts:
msg = "At most %d colons permitted in %r" % (
_max_parts - 1, ip_str)
raise AddressValueError(msg)
# Disregarding the endpoints, find '::' with nothing in between.
# This indicates that a run of zeroes has been skipped.
skip_index = None
for i in _compat_range(1, len(parts) - 1):
if not parts[i]:
if skip_index is not None:
# Can't have more than one '::'
msg = "At most one '::' permitted in %r" % ip_str
raise AddressValueError(msg)
skip_index = i
# parts_hi is the number of parts to copy from above/before the '::'
# parts_lo is the number of parts to copy from below/after the '::'
if skip_index is not None:
# If we found a '::', then check if it also covers the endpoints.
parts_hi = skip_index
parts_lo = len(parts) - skip_index - 1
if not parts[0]:
parts_hi -= 1
if parts_hi:
msg = "Leading ':' only permitted as part of '::' in %r"
raise AddressValueError(msg % ip_str) # ^: requires ^::
if not parts[-1]:
parts_lo -= 1
if parts_lo:
msg = "Trailing ':' only permitted as part of '::' in %r"
raise AddressValueError(msg % ip_str) # :$ requires ::$
parts_skipped = cls._HEXTET_COUNT - (parts_hi + parts_lo)
if parts_skipped < 1:
msg = "Expected at most %d other parts with '::' in %r"
raise AddressValueError(msg % (cls._HEXTET_COUNT - 1, ip_str))
else:
# Otherwise, allocate the entire address to parts_hi. The
# endpoints could still be empty, but _parse_hextet() will check
# for that.
if len(parts) != cls._HEXTET_COUNT:
msg = "Exactly %d parts expected without '::' in %r"
raise AddressValueError(msg % (cls._HEXTET_COUNT, ip_str))
if not parts[0]:
msg = "Leading ':' only permitted as part of '::' in %r"
raise AddressValueError(msg % ip_str) # ^: requires ^::
if not parts[-1]:
msg = "Trailing ':' only permitted as part of '::' in %r"
raise AddressValueError(msg % ip_str) # :$ requires ::$
parts_hi = len(parts)
parts_lo = 0
parts_skipped = 0
try:
# Now, parse the hextets into a 128-bit integer.
ip_int = 0
for i in range(parts_hi):
ip_int <<= 16
ip_int |= cls._parse_hextet(parts[i])
ip_int <<= 16 * parts_skipped
for i in range(-parts_lo, 0):
ip_int <<= 16
ip_int |= cls._parse_hextet(parts[i])
return ip_int
except ValueError as exc:
raise AddressValueError("%s in %r" % (exc, ip_str))
@classmethod
def _parse_hextet(cls, hextet_str):
"""Convert an IPv6 hextet string into an integer.
Args:
hextet_str: A string, the number to parse.
Returns:
The hextet as an integer.
Raises:
ValueError: if the input isn't strictly a hex number from
[0..FFFF].
"""
# Whitelist the characters, since int() allows a lot of bizarre stuff.
if not cls._HEX_DIGITS.issuperset(hextet_str):
raise ValueError("Only hex digits permitted in %r" % hextet_str)
# We do the length check second, since the invalid character error
# is likely to be more informative for the user
if len(hextet_str) > 4:
msg = "At most 4 characters permitted in %r"
raise ValueError(msg % hextet_str)
# Length check means we can skip checking the integer value
return int(hextet_str, 16)
@classmethod
def _compress_hextets(cls, hextets):
"""Compresses a list of hextets.
Compresses a list of strings, replacing the longest continuous
sequence of "0" in the list with "" and adding empty strings at
the beginning or at the end of the string such that subsequently
calling ":".join(hextets) will produce the compressed version of
the IPv6 address.
Args:
hextets: A list of strings, the hextets to compress.
Returns:
A list of strings.
"""
best_doublecolon_start = -1
best_doublecolon_len = 0
doublecolon_start = -1
doublecolon_len = 0
for index, hextet in enumerate(hextets):
if hextet == '0':
doublecolon_len += 1
if doublecolon_start == -1:
# Start of a sequence of zeros.
doublecolon_start = index
if doublecolon_len > best_doublecolon_len:
# This is the longest sequence of zeros so far.
best_doublecolon_len = doublecolon_len
best_doublecolon_start = doublecolon_start
else:
doublecolon_len = 0
doublecolon_start = -1
if best_doublecolon_len > 1:
best_doublecolon_end = (best_doublecolon_start +
best_doublecolon_len)
# For zeros at the end of the address.
if best_doublecolon_end == len(hextets):
hextets += ['']
hextets[best_doublecolon_start:best_doublecolon_end] = ['']
# For zeros at the beginning of the address.
if best_doublecolon_start == 0:
hextets = [''] + hextets
return hextets
@classmethod
def _string_from_ip_int(cls, ip_int=None):
"""Turns a 128-bit integer into hexadecimal notation.
Args:
ip_int: An integer, the IP address.
Returns:
A string, the hexadecimal representation of the address.
Raises:
ValueError: The address is bigger than 128 bits of all ones.
"""
if ip_int is None:
ip_int = int(cls._ip)
if ip_int > cls._ALL_ONES:
raise ValueError('IPv6 address is too large')
hex_str = '%032x' % ip_int
hextets = ['%x' % int(hex_str[x:x + 4], 16) for x in range(0, 32, 4)]
hextets = cls._compress_hextets(hextets)
return ':'.join(hextets)
def _explode_shorthand_ip_string(self):
"""Expand a shortened IPv6 address.
Args:
ip_str: A string, the IPv6 address.
Returns:
A string, the expanded IPv6 address.
"""
if isinstance(self, IPv6Network):
ip_str = _compat_str(self.network_address)
elif isinstance(self, IPv6Interface):
ip_str = _compat_str(self.ip)
else:
ip_str = _compat_str(self)
ip_int = self._ip_int_from_string(ip_str)
hex_str = '%032x' % ip_int
parts = [hex_str[x:x + 4] for x in range(0, 32, 4)]
if isinstance(self, (_BaseNetwork, IPv6Interface)):
return '%s/%d' % (':'.join(parts), self._prefixlen)
return ':'.join(parts)
def _reverse_pointer(self):
"""Return the reverse DNS pointer name for the IPv6 address.
This implements the method described in RFC3596 2.5.
"""
reverse_chars = self.exploded[::-1].replace(':', '')
return '.'.join(reverse_chars) + '.ip6.arpa'
@property
def max_prefixlen(self):
return self._max_prefixlen
@property
def version(self):
return self._version
class IPv6Address(_BaseV6, _BaseAddress):
"""Represent and manipulate single IPv6 Addresses."""
__slots__ = ('_ip', '__weakref__')
def __init__(self, address):
"""Instantiate a new IPv6 address object.
Args:
address: A string or integer representing the IP
Additionally, an integer can be passed, so
IPv6Address('2001:db8::') ==
IPv6Address(42540766411282592856903984951653826560)
or, more generally
IPv6Address(int(IPv6Address('2001:db8::'))) ==
IPv6Address('2001:db8::')
Raises:
AddressValueError: If address isn't a valid IPv6 address.
"""
# Efficient constructor from integer.
if isinstance(address, _compat_int_types):
self._check_int_address(address)
self._ip = address
return
# Constructing from a packed address
if isinstance(address, bytes):
self._check_packed_address(address, 16)
bvs = _compat_bytes_to_byte_vals(address)
self._ip = _compat_int_from_byte_vals(bvs, 'big')
return
# Assume input argument to be string or any object representation
# which converts into a formatted IP string.
addr_str = _compat_str(address)
if '/' in addr_str:
raise AddressValueError("Unexpected '/' in %r" % address)
self._ip = self._ip_int_from_string(addr_str)
@property
def packed(self):
"""The binary representation of this address."""
return v6_int_to_packed(self._ip)
@property
def is_multicast(self):
"""Test if the address is reserved for multicast use.
Returns:
A boolean, True if the address is a multicast address.
See RFC 2373 2.7 for details.
"""
return self in self._constants._multicast_network
@property
def is_reserved(self):
"""Test if the address is otherwise IETF reserved.
Returns:
A boolean, True if the address is within one of the
reserved IPv6 Network ranges.
"""
return any(self in x for x in self._constants._reserved_networks)
@property
def is_link_local(self):
"""Test if the address is reserved for link-local.
Returns:
A boolean, True if the address is reserved per RFC 4291.
"""
return self in self._constants._linklocal_network
@property
def is_site_local(self):
"""Test if the address is reserved for site-local.
Note that the site-local address space has been deprecated by RFC 3879.
Use is_private to test if this address is in the space of unique local
addresses as defined by RFC 4193.
Returns:
A boolean, True if the address is reserved per RFC 3513 2.5.6.
"""
return self in self._constants._sitelocal_network
@property
def is_private(self):
"""Test if this address is allocated for private networks.
Returns:
A boolean, True if the address is reserved per
iana-ipv6-special-registry.
"""
return any(self in net for net in self._constants._private_networks)
@property
def is_global(self):
"""Test if this address is allocated for public networks.
Returns:
A boolean, true if the address is not reserved per
iana-ipv6-special-registry.
"""
return not self.is_private
@property
def is_unspecified(self):
"""Test if the address is unspecified.
Returns:
A boolean, True if this is the unspecified address as defined in
RFC 2373 2.5.2.
"""
return self._ip == 0
@property
def is_loopback(self):
"""Test if the address is a loopback address.
Returns:
A boolean, True if the address is a loopback address as defined in
RFC 2373 2.5.3.
"""
return self._ip == 1
@property
def ipv4_mapped(self):
"""Return the IPv4 mapped address.
Returns:
If the IPv6 address is a v4 mapped address, return the
IPv4 mapped address. Return None otherwise.
"""
if (self._ip >> 32) != 0xFFFF:
return None
return IPv4Address(self._ip & 0xFFFFFFFF)
@property
def teredo(self):
"""Tuple of embedded teredo IPs.
Returns:
Tuple of the (server, client) IPs or None if the address
doesn't appear to be a teredo address (doesn't start with
2001::/32)
"""
if (self._ip >> 96) != 0x20010000:
return None
return (IPv4Address((self._ip >> 64) & 0xFFFFFFFF),
IPv4Address(~self._ip & 0xFFFFFFFF))
@property
def sixtofour(self):
"""Return the IPv4 6to4 embedded address.
Returns:
The IPv4 6to4-embedded address if present or None if the
address doesn't appear to contain a 6to4 embedded address.
"""
if (self._ip >> 112) != 0x2002:
return None
return IPv4Address((self._ip >> 80) & 0xFFFFFFFF)
class IPv6Interface(IPv6Address):
def __init__(self, address):
if isinstance(address, (bytes, _compat_int_types)):
IPv6Address.__init__(self, address)
self.network = IPv6Network(self._ip)
self._prefixlen = self._max_prefixlen
return
if isinstance(address, tuple):
IPv6Address.__init__(self, address[0])
if len(address) > 1:
self._prefixlen = int(address[1])
else:
self._prefixlen = self._max_prefixlen
self.network = IPv6Network(address, strict=False)
self.netmask = self.network.netmask
self.hostmask = self.network.hostmask
return
addr = _split_optional_netmask(address)
IPv6Address.__init__(self, addr[0])
self.network = IPv6Network(address, strict=False)
self.netmask = self.network.netmask
self._prefixlen = self.network._prefixlen
self.hostmask = self.network.hostmask
def __str__(self):
return '%s/%d' % (self._string_from_ip_int(self._ip),
self.network.prefixlen)
def __eq__(self, other):
address_equal = IPv6Address.__eq__(self, other)
if not address_equal or address_equal is NotImplemented:
return address_equal
try:
return self.network == other.network
except AttributeError:
# An interface with an associated network is NOT the
# same as an unassociated address. That's why the hash
# takes the extra info into account.
return False
def __lt__(self, other):
address_less = IPv6Address.__lt__(self, other)
if address_less is NotImplemented:
return NotImplemented
try:
return self.network < other.network
except AttributeError:
# We *do* allow addresses and interfaces to be sorted. The
# unassociated address is considered less than all interfaces.
return False
def __hash__(self):
return self._ip ^ self._prefixlen ^ int(self.network.network_address)
__reduce__ = _IPAddressBase.__reduce__
@property
def ip(self):
return IPv6Address(self._ip)
@property
def with_prefixlen(self):
return '%s/%s' % (self._string_from_ip_int(self._ip),
self._prefixlen)
@property
def with_netmask(self):
return '%s/%s' % (self._string_from_ip_int(self._ip),
self.netmask)
@property
def with_hostmask(self):
return '%s/%s' % (self._string_from_ip_int(self._ip),
self.hostmask)
@property
def is_unspecified(self):
return self._ip == 0 and self.network.is_unspecified
@property
def is_loopback(self):
return self._ip == 1 and self.network.is_loopback
class IPv6Network(_BaseV6, _BaseNetwork):
"""This class represents and manipulates 128-bit IPv6 networks.
Attributes: [examples for IPv6('2001:db8::1000/124')]
.network_address: IPv6Address('2001:db8::1000')
.hostmask: IPv6Address('::f')
.broadcast_address: IPv6Address('2001:db8::100f')
.netmask: IPv6Address('ffff:ffff:ffff:ffff:ffff:ffff:ffff:fff0')
.prefixlen: 124
"""
# Class to use when creating address objects
_address_class = IPv6Address
def __init__(self, address, strict=True):
"""Instantiate a new IPv6 Network object.
Args:
address: A string or integer representing the IPv6 network or the
IP and prefix/netmask.
'2001:db8::/128'
'2001:db8:0000:0000:0000:0000:0000:0000/128'
'2001:db8::'
are all functionally the same in IPv6. That is to say,
failing to provide a subnetmask will create an object with
a mask of /128.
Additionally, an integer can be passed, so
IPv6Network('2001:db8::') ==
IPv6Network(42540766411282592856903984951653826560)
or, more generally
IPv6Network(int(IPv6Network('2001:db8::'))) ==
IPv6Network('2001:db8::')
strict: A boolean. If true, ensure that we have been passed
A true network address, eg, 2001:db8::1000/124 and not an
IP address on a network, eg, 2001:db8::1/124.
Raises:
AddressValueError: If address isn't a valid IPv6 address.
NetmaskValueError: If the netmask isn't valid for
an IPv6 address.
ValueError: If strict was True and a network address was not
supplied.
"""
_BaseNetwork.__init__(self, address)
# Efficient constructor from integer or packed address
if isinstance(address, (bytes, _compat_int_types)):
self.network_address = IPv6Address(address)
self.netmask, self._prefixlen = self._make_netmask(
self._max_prefixlen)
return
if isinstance(address, tuple):
if len(address) > 1:
arg = address[1]
else:
arg = self._max_prefixlen
self.netmask, self._prefixlen = self._make_netmask(arg)
self.network_address = IPv6Address(address[0])
packed = int(self.network_address)
if packed & int(self.netmask) != packed:
if strict:
raise ValueError('%s has host bits set' % self)
else:
self.network_address = IPv6Address(packed &
int(self.netmask))
return
# Assume input argument to be string or any object representation
# which converts into a formatted IP prefix string.
addr = _split_optional_netmask(address)
self.network_address = IPv6Address(self._ip_int_from_string(addr[0]))
if len(addr) == 2:
arg = addr[1]
else:
arg = self._max_prefixlen
self.netmask, self._prefixlen = self._make_netmask(arg)
if strict:
if (IPv6Address(int(self.network_address) & int(self.netmask)) !=
self.network_address):
raise ValueError('%s has host bits set' % self)
self.network_address = IPv6Address(int(self.network_address) &
int(self.netmask))
if self._prefixlen == (self._max_prefixlen - 1):
self.hosts = self.__iter__
def hosts(self):
"""Generate Iterator over usable hosts in a network.
This is like __iter__ except it doesn't return the
Subnet-Router anycast address.
"""
network = int(self.network_address)
broadcast = int(self.broadcast_address)
for x in _compat_range(network + 1, broadcast + 1):
yield self._address_class(x)
@property
def is_site_local(self):
"""Test if the address is reserved for site-local.
Note that the site-local address space has been deprecated by RFC 3879.
Use is_private to test if this address is in the space of unique local
addresses as defined by RFC 4193.
Returns:
A boolean, True if the address is reserved per RFC 3513 2.5.6.
"""
return (self.network_address.is_site_local and
self.broadcast_address.is_site_local)
class _IPv6Constants(object):
_linklocal_network = IPv6Network('fe80::/10')
_multicast_network = IPv6Network('ff00::/8')
_private_networks = [
IPv6Network('::1/128'),
IPv6Network('::/128'),
IPv6Network('::ffff:0:0/96'),
IPv6Network('100::/64'),
IPv6Network('2001::/23'),
IPv6Network('2001:2::/48'),
IPv6Network('2001:db8::/32'),
IPv6Network('2001:10::/28'),
IPv6Network('fc00::/7'),
IPv6Network('fe80::/10'),
]
_reserved_networks = [
IPv6Network('::/8'), IPv6Network('100::/8'),
IPv6Network('200::/7'), IPv6Network('400::/6'),
IPv6Network('800::/5'), IPv6Network('1000::/4'),
IPv6Network('4000::/3'), IPv6Network('6000::/3'),
IPv6Network('8000::/3'), IPv6Network('A000::/3'),
IPv6Network('C000::/3'), IPv6Network('E000::/4'),
IPv6Network('F000::/5'), IPv6Network('F800::/6'),
IPv6Network('FE00::/9'),
]
_sitelocal_network = IPv6Network('fec0::/10')
IPv6Address._constants = _IPv6Constants
| apache-2.0 |
jancsarc/KIM-Online | src/accounts/views.py | 1 | 2138 | from __future__ import unicode_literals
from django.core.urlresolvers import reverse_lazy
from django.views import generic
from django.shortcuts import redirect
from django.contrib.auth import get_user_model
from django.contrib import auth
from django.contrib import messages
from authtools import views as authviews
from braces import views as bracesviews
from django.conf import settings
from . import forms
User = get_user_model()
class LoginView(bracesviews.AnonymousRequiredMixin,
authviews.LoginView):
template_name = "accounts/login.html"
form_class = forms.LoginForm
def form_valid(self, form):
redirect = super(LoginView, self).form_valid(form)
remember_me = form.cleaned_data.get('remember_me')
if remember_me is True:
ONE_MONTH = 30*24*60*60
expiry = getattr(settings, "KEEP_LOGGED_DURATION", ONE_MONTH)
self.request.session.set_expiry(expiry)
return redirect
class LogoutView(authviews.LogoutView):
url = reverse_lazy('home')
class PasswordChangeView(authviews.PasswordChangeView):
form_class = forms.PasswordChangeForm
template_name = 'accounts/password-change.html'
success_url = reverse_lazy('home')
def form_valid(self, form):
form.save()
messages.success(self.request,
"Your password was changed, "
"hence you have been logged out. Please relogin")
return redirect("home")
class PasswordResetView(authviews.PasswordResetView):
form_class = forms.PasswordResetForm
template_name = 'accounts/password-reset.html'
success_url = reverse_lazy('accounts:password-reset-done')
subject_template_name = 'accounts/emails/password-reset-subject.txt'
email_template_name = 'accounts/emails/password-reset-email.html'
class PasswordResetDoneView(authviews.PasswordResetDoneView):
template_name = 'accounts/password-reset-done.html'
class PasswordResetConfirmView(authviews.PasswordResetConfirmAndLoginView):
template_name = 'accounts/password-reset-confirm.html'
form_class = forms.SetPasswordForm
| mit |
ibinti/intellij-community | python/lib/Lib/site-packages/django/utils/itercompat.py | 294 | 1169 | """
Providing iterator functions that are not in all version of Python we support.
Where possible, we try to use the system-native version and only fall back to
these implementations if necessary.
"""
import itertools
# Fallback for Python 2.4, Python 2.5
def product(*args, **kwds):
"""
Taken from http://docs.python.org/library/itertools.html#itertools.product
"""
# product('ABCD', 'xy') --> Ax Ay Bx By Cx Cy Dx Dy
# product(range(2), repeat=3) --> 000 001 010 011 100 101 110 111
pools = map(tuple, args) * kwds.get('repeat', 1)
result = [[]]
for pool in pools:
result = [x+[y] for x in result for y in pool]
for prod in result:
yield tuple(prod)
if hasattr(itertools, 'product'):
product = itertools.product
def is_iterable(x):
"A implementation independent way of checking for iterables"
try:
iter(x)
except TypeError:
return False
else:
return True
def all(iterable):
for item in iterable:
if not item:
return False
return True
def any(iterable):
for item in iterable:
if item:
return True
return False
| apache-2.0 |
fritsvanveen/QGIS | python/ext-libs/pygments/formatters/html.py | 21 | 31759 | # -*- coding: utf-8 -*-
"""
pygments.formatters.html
~~~~~~~~~~~~~~~~~~~~~~~~
Formatter for HTML output.
:copyright: Copyright 2006-2015 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from __future__ import print_function
import os
import sys
import os.path
from pygments.formatter import Formatter
from pygments.token import Token, Text, STANDARD_TYPES
from pygments.util import get_bool_opt, get_int_opt, get_list_opt, \
StringIO, string_types, iteritems
try:
import ctags
except ImportError:
ctags = None
__all__ = ['HtmlFormatter']
_escape_html_table = {
ord('&'): u'&',
ord('<'): u'<',
ord('>'): u'>',
ord('"'): u'"',
ord("'"): u''',
}
def escape_html(text, table=_escape_html_table):
"""Escape &, <, > as well as single and double quotes for HTML."""
return text.translate(table)
def _get_ttype_class(ttype):
fname = STANDARD_TYPES.get(ttype)
if fname:
return fname
aname = ''
while fname is None:
aname = '-' + ttype[-1] + aname
ttype = ttype.parent
fname = STANDARD_TYPES.get(ttype)
return fname + aname
CSSFILE_TEMPLATE = '''\
td.linenos { background-color: #f0f0f0; padding-right: 10px; }
span.lineno { background-color: #f0f0f0; padding: 0 5px 0 5px; }
pre { line-height: 125%%; }
%(styledefs)s
'''
DOC_HEADER = '''\
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01//EN"
"http://www.w3.org/TR/html4/strict.dtd">
<html>
<head>
<title>%(title)s</title>
<meta http-equiv="content-type" content="text/html; charset=%(encoding)s">
<style type="text/css">
''' + CSSFILE_TEMPLATE + '''
</style>
</head>
<body>
<h2>%(title)s</h2>
'''
DOC_HEADER_EXTERNALCSS = '''\
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01//EN"
"http://www.w3.org/TR/html4/strict.dtd">
<html>
<head>
<title>%(title)s</title>
<meta http-equiv="content-type" content="text/html; charset=%(encoding)s">
<link rel="stylesheet" href="%(cssfile)s" type="text/css">
</head>
<body>
<h2>%(title)s</h2>
'''
DOC_FOOTER = '''\
</body>
</html>
'''
class HtmlFormatter(Formatter):
r"""
Format tokens as HTML 4 ``<span>`` tags within a ``<pre>`` tag, wrapped
in a ``<div>`` tag. The ``<div>``'s CSS class can be set by the `cssclass`
option.
If the `linenos` option is set to ``"table"``, the ``<pre>`` is
additionally wrapped inside a ``<table>`` which has one row and two
cells: one containing the line numbers and one containing the code.
Example:
.. sourcecode:: html
<div class="highlight" >
<table><tr>
<td class="linenos" title="click to toggle"
onclick="with (this.firstChild.style)
{ display = (display == '') ? 'none' : '' }">
<pre>1
2</pre>
</td>
<td class="code">
<pre><span class="Ke">def </span><span class="NaFu">foo</span>(bar):
<span class="Ke">pass</span>
</pre>
</td>
</tr></table></div>
(whitespace added to improve clarity).
Wrapping can be disabled using the `nowrap` option.
A list of lines can be specified using the `hl_lines` option to make these
lines highlighted (as of Pygments 0.11).
With the `full` option, a complete HTML 4 document is output, including
the style definitions inside a ``<style>`` tag, or in a separate file if
the `cssfile` option is given.
When `tagsfile` is set to the path of a ctags index file, it is used to
generate hyperlinks from names to their definition. You must enable
`lineanchors` and run ctags with the `-n` option for this to work. The
`python-ctags` module from PyPI must be installed to use this feature;
otherwise a `RuntimeError` will be raised.
The `get_style_defs(arg='')` method of a `HtmlFormatter` returns a string
containing CSS rules for the CSS classes used by the formatter. The
argument `arg` can be used to specify additional CSS selectors that
are prepended to the classes. A call `fmter.get_style_defs('td .code')`
would result in the following CSS classes:
.. sourcecode:: css
td .code .kw { font-weight: bold; color: #00FF00 }
td .code .cm { color: #999999 }
...
If you have Pygments 0.6 or higher, you can also pass a list or tuple to the
`get_style_defs()` method to request multiple prefixes for the tokens:
.. sourcecode:: python
formatter.get_style_defs(['div.syntax pre', 'pre.syntax'])
The output would then look like this:
.. sourcecode:: css
div.syntax pre .kw,
pre.syntax .kw { font-weight: bold; color: #00FF00 }
div.syntax pre .cm,
pre.syntax .cm { color: #999999 }
...
Additional options accepted:
`nowrap`
If set to ``True``, don't wrap the tokens at all, not even inside a ``<pre>``
tag. This disables most other options (default: ``False``).
`full`
Tells the formatter to output a "full" document, i.e. a complete
self-contained document (default: ``False``).
`title`
If `full` is true, the title that should be used to caption the
document (default: ``''``).
`style`
The style to use, can be a string or a Style subclass (default:
``'default'``). This option has no effect if the `cssfile`
and `noclobber_cssfile` option are given and the file specified in
`cssfile` exists.
`noclasses`
If set to true, token ``<span>`` tags will not use CSS classes, but
inline styles. This is not recommended for larger pieces of code since
it increases output size by quite a bit (default: ``False``).
`classprefix`
Since the token types use relatively short class names, they may clash
with some of your own class names. In this case you can use the
`classprefix` option to give a string to prepend to all Pygments-generated
CSS class names for token types.
Note that this option also affects the output of `get_style_defs()`.
`cssclass`
CSS class for the wrapping ``<div>`` tag (default: ``'highlight'``).
If you set this option, the default selector for `get_style_defs()`
will be this class.
.. versionadded:: 0.9
If you select the ``'table'`` line numbers, the wrapping table will
have a CSS class of this string plus ``'table'``, the default is
accordingly ``'highlighttable'``.
`cssstyles`
Inline CSS styles for the wrapping ``<div>`` tag (default: ``''``).
`prestyles`
Inline CSS styles for the ``<pre>`` tag (default: ``''``).
.. versionadded:: 0.11
`cssfile`
If the `full` option is true and this option is given, it must be the
name of an external file. If the filename does not include an absolute
path, the file's path will be assumed to be relative to the main output
file's path, if the latter can be found. The stylesheet is then written
to this file instead of the HTML file.
.. versionadded:: 0.6
`noclobber_cssfile`
If `cssfile` is given and the specified file exists, the css file will
not be overwritten. This allows the use of the `full` option in
combination with a user specified css file. Default is ``False``.
.. versionadded:: 1.1
`linenos`
If set to ``'table'``, output line numbers as a table with two cells,
one containing the line numbers, the other the whole code. This is
copy-and-paste-friendly, but may cause alignment problems with some
browsers or fonts. If set to ``'inline'``, the line numbers will be
integrated in the ``<pre>`` tag that contains the code (that setting
is *new in Pygments 0.8*).
For compatibility with Pygments 0.7 and earlier, every true value
except ``'inline'`` means the same as ``'table'`` (in particular, that
means also ``True``).
The default value is ``False``, which means no line numbers at all.
**Note:** with the default ("table") line number mechanism, the line
numbers and code can have different line heights in Internet Explorer
unless you give the enclosing ``<pre>`` tags an explicit ``line-height``
CSS property (you get the default line spacing with ``line-height:
125%``).
`hl_lines`
Specify a list of lines to be highlighted.
.. versionadded:: 0.11
`linenostart`
The line number for the first line (default: ``1``).
`linenostep`
If set to a number n > 1, only every nth line number is printed.
`linenospecial`
If set to a number n > 0, every nth line number is given the CSS
class ``"special"`` (default: ``0``).
`nobackground`
If set to ``True``, the formatter won't output the background color
for the wrapping element (this automatically defaults to ``False``
when there is no wrapping element [eg: no argument for the
`get_syntax_defs` method given]) (default: ``False``).
.. versionadded:: 0.6
`lineseparator`
This string is output between lines of code. It defaults to ``"\n"``,
which is enough to break a line inside ``<pre>`` tags, but you can
e.g. set it to ``"<br>"`` to get HTML line breaks.
.. versionadded:: 0.7
`lineanchors`
If set to a nonempty string, e.g. ``foo``, the formatter will wrap each
output line in an anchor tag with a ``name`` of ``foo-linenumber``.
This allows easy linking to certain lines.
.. versionadded:: 0.9
`linespans`
If set to a nonempty string, e.g. ``foo``, the formatter will wrap each
output line in a span tag with an ``id`` of ``foo-linenumber``.
This allows easy access to lines via javascript.
.. versionadded:: 1.6
`anchorlinenos`
If set to `True`, will wrap line numbers in <a> tags. Used in
combination with `linenos` and `lineanchors`.
`tagsfile`
If set to the path of a ctags file, wrap names in anchor tags that
link to their definitions. `lineanchors` should be used, and the
tags file should specify line numbers (see the `-n` option to ctags).
.. versionadded:: 1.6
`tagurlformat`
A string formatting pattern used to generate links to ctags definitions.
Available variables are `%(path)s`, `%(fname)s` and `%(fext)s`.
Defaults to an empty string, resulting in just `#prefix-number` links.
.. versionadded:: 1.6
`filename`
A string used to generate a filename when rendering <pre> blocks,
for example if displaying source code.
.. versionadded:: 2.1
**Subclassing the HTML formatter**
.. versionadded:: 0.7
The HTML formatter is now built in a way that allows easy subclassing, thus
customizing the output HTML code. The `format()` method calls
`self._format_lines()` which returns a generator that yields tuples of ``(1,
line)``, where the ``1`` indicates that the ``line`` is a line of the
formatted source code.
If the `nowrap` option is set, the generator is the iterated over and the
resulting HTML is output.
Otherwise, `format()` calls `self.wrap()`, which wraps the generator with
other generators. These may add some HTML code to the one generated by
`_format_lines()`, either by modifying the lines generated by the latter,
then yielding them again with ``(1, line)``, and/or by yielding other HTML
code before or after the lines, with ``(0, html)``. The distinction between
source lines and other code makes it possible to wrap the generator multiple
times.
The default `wrap()` implementation adds a ``<div>`` and a ``<pre>`` tag.
A custom `HtmlFormatter` subclass could look like this:
.. sourcecode:: python
class CodeHtmlFormatter(HtmlFormatter):
def wrap(self, source, outfile):
return self._wrap_code(source)
def _wrap_code(self, source):
yield 0, '<code>'
for i, t in source:
if i == 1:
# it's a line of formatted code
t += '<br>'
yield i, t
yield 0, '</code>'
This results in wrapping the formatted lines with a ``<code>`` tag, where the
source lines are broken using ``<br>`` tags.
After calling `wrap()`, the `format()` method also adds the "line numbers"
and/or "full document" wrappers if the respective options are set. Then, all
HTML yielded by the wrapped generator is output.
"""
name = 'HTML'
aliases = ['html']
filenames = ['*.html', '*.htm']
def __init__(self, **options):
Formatter.__init__(self, **options)
self.title = self._decodeifneeded(self.title)
self.nowrap = get_bool_opt(options, 'nowrap', False)
self.noclasses = get_bool_opt(options, 'noclasses', False)
self.classprefix = options.get('classprefix', '')
self.cssclass = self._decodeifneeded(options.get('cssclass', 'highlight'))
self.cssstyles = self._decodeifneeded(options.get('cssstyles', ''))
self.prestyles = self._decodeifneeded(options.get('prestyles', ''))
self.cssfile = self._decodeifneeded(options.get('cssfile', ''))
self.noclobber_cssfile = get_bool_opt(options, 'noclobber_cssfile', False)
self.tagsfile = self._decodeifneeded(options.get('tagsfile', ''))
self.tagurlformat = self._decodeifneeded(options.get('tagurlformat', ''))
self.filename = self._decodeifneeded(options.get('filename', ''))
if self.tagsfile:
if not ctags:
raise RuntimeError('The "ctags" package must to be installed '
'to be able to use the "tagsfile" feature.')
self._ctags = ctags.CTags(self.tagsfile)
linenos = options.get('linenos', False)
if linenos == 'inline':
self.linenos = 2
elif linenos:
# compatibility with <= 0.7
self.linenos = 1
else:
self.linenos = 0
self.linenostart = abs(get_int_opt(options, 'linenostart', 1))
self.linenostep = abs(get_int_opt(options, 'linenostep', 1))
self.linenospecial = abs(get_int_opt(options, 'linenospecial', 0))
self.nobackground = get_bool_opt(options, 'nobackground', False)
self.lineseparator = options.get('lineseparator', '\n')
self.lineanchors = options.get('lineanchors', '')
self.linespans = options.get('linespans', '')
self.anchorlinenos = options.get('anchorlinenos', False)
self.hl_lines = set()
for lineno in get_list_opt(options, 'hl_lines', []):
try:
self.hl_lines.add(int(lineno))
except ValueError:
pass
self._create_stylesheet()
def _get_css_class(self, ttype):
"""Return the css class of this token type prefixed with
the classprefix option."""
ttypeclass = _get_ttype_class(ttype)
if ttypeclass:
return self.classprefix + ttypeclass
return ''
def _get_css_classes(self, ttype):
"""Return the css classes of this token type prefixed with
the classprefix option."""
cls = self._get_css_class(ttype)
while ttype not in STANDARD_TYPES:
ttype = ttype.parent
cls = self._get_css_class(ttype) + ' ' + cls
return cls
def _create_stylesheet(self):
t2c = self.ttype2class = {Token: ''}
c2s = self.class2style = {}
for ttype, ndef in self.style:
name = self._get_css_class(ttype)
style = ''
if ndef['color']:
style += 'color: #%s; ' % ndef['color']
if ndef['bold']:
style += 'font-weight: bold; '
if ndef['italic']:
style += 'font-style: italic; '
if ndef['underline']:
style += 'text-decoration: underline; '
if ndef['bgcolor']:
style += 'background-color: #%s; ' % ndef['bgcolor']
if ndef['border']:
style += 'border: 1px solid #%s; ' % ndef['border']
if style:
t2c[ttype] = name
# save len(ttype) to enable ordering the styles by
# hierarchy (necessary for CSS cascading rules!)
c2s[name] = (style[:-2], ttype, len(ttype))
def get_style_defs(self, arg=None):
"""
Return CSS style definitions for the classes produced by the current
highlighting style. ``arg`` can be a string or list of selectors to
insert before the token type classes.
"""
if arg is None:
arg = ('cssclass' in self.options and '.'+self.cssclass or '')
if isinstance(arg, string_types):
args = [arg]
else:
args = list(arg)
def prefix(cls):
if cls:
cls = '.' + cls
tmp = []
for arg in args:
tmp.append((arg and arg + ' ' or '') + cls)
return ', '.join(tmp)
styles = [(level, ttype, cls, style)
for cls, (style, ttype, level) in iteritems(self.class2style)
if cls and style]
styles.sort()
lines = ['%s { %s } /* %s */' % (prefix(cls), style, repr(ttype)[6:])
for (level, ttype, cls, style) in styles]
if arg and not self.nobackground and \
self.style.background_color is not None:
text_style = ''
if Text in self.ttype2class:
text_style = ' ' + self.class2style[self.ttype2class[Text]][0]
lines.insert(0, '%s { background: %s;%s }' %
(prefix(''), self.style.background_color, text_style))
if self.style.highlight_color is not None:
lines.insert(0, '%s.hll { background-color: %s }' %
(prefix(''), self.style.highlight_color))
return '\n'.join(lines)
def _decodeifneeded(self, value):
if isinstance(value, bytes):
if self.encoding:
return value.decode(self.encoding)
return value.decode()
return value
def _wrap_full(self, inner, outfile):
if self.cssfile:
if os.path.isabs(self.cssfile):
# it's an absolute filename
cssfilename = self.cssfile
else:
try:
filename = outfile.name
if not filename or filename[0] == '<':
# pseudo files, e.g. name == '<fdopen>'
raise AttributeError
cssfilename = os.path.join(os.path.dirname(filename),
self.cssfile)
except AttributeError:
print('Note: Cannot determine output file name, '
'using current directory as base for the CSS file name',
file=sys.stderr)
cssfilename = self.cssfile
# write CSS file only if noclobber_cssfile isn't given as an option.
try:
if not os.path.exists(cssfilename) or not self.noclobber_cssfile:
cf = open(cssfilename, "w")
cf.write(CSSFILE_TEMPLATE %
{'styledefs': self.get_style_defs('body')})
cf.close()
except IOError as err:
err.strerror = 'Error writing CSS file: ' + err.strerror
raise
yield 0, (DOC_HEADER_EXTERNALCSS %
dict(title=self.title,
cssfile=self.cssfile,
encoding=self.encoding))
else:
yield 0, (DOC_HEADER %
dict(title=self.title,
styledefs=self.get_style_defs('body'),
encoding=self.encoding))
for t, line in inner:
yield t, line
yield 0, DOC_FOOTER
def _wrap_tablelinenos(self, inner):
dummyoutfile = StringIO()
lncount = 0
for t, line in inner:
if t:
lncount += 1
dummyoutfile.write(line)
fl = self.linenostart
mw = len(str(lncount + fl - 1))
sp = self.linenospecial
st = self.linenostep
la = self.lineanchors
aln = self.anchorlinenos
nocls = self.noclasses
if sp:
lines = []
for i in range(fl, fl+lncount):
if i % st == 0:
if i % sp == 0:
if aln:
lines.append('<a href="#%s-%d" class="special">%*d</a>' %
(la, i, mw, i))
else:
lines.append('<span class="special">%*d</span>' % (mw, i))
else:
if aln:
lines.append('<a href="#%s-%d">%*d</a>' % (la, i, mw, i))
else:
lines.append('%*d' % (mw, i))
else:
lines.append('')
ls = '\n'.join(lines)
else:
lines = []
for i in range(fl, fl+lncount):
if i % st == 0:
if aln:
lines.append('<a href="#%s-%d">%*d</a>' % (la, i, mw, i))
else:
lines.append('%*d' % (mw, i))
else:
lines.append('')
ls = '\n'.join(lines)
# in case you wonder about the seemingly redundant <div> here: since the
# content in the other cell also is wrapped in a div, some browsers in
# some configurations seem to mess up the formatting...
if nocls:
yield 0, ('<table class="%stable">' % self.cssclass +
'<tr><td><div class="linenodiv" '
'style="background-color: #f0f0f0; padding-right: 10px">'
'<pre style="line-height: 125%">' +
ls + '</pre></div></td><td class="code">')
else:
yield 0, ('<table class="%stable">' % self.cssclass +
'<tr><td class="linenos"><div class="linenodiv"><pre>' +
ls + '</pre></div></td><td class="code">')
yield 0, dummyoutfile.getvalue()
yield 0, '</td></tr></table>'
def _wrap_inlinelinenos(self, inner):
# need a list of lines since we need the width of a single number :(
lines = list(inner)
sp = self.linenospecial
st = self.linenostep
num = self.linenostart
mw = len(str(len(lines) + num - 1))
if self.noclasses:
if sp:
for t, line in lines:
if num % sp == 0:
style = 'background-color: #ffffc0; padding: 0 5px 0 5px'
else:
style = 'background-color: #f0f0f0; padding: 0 5px 0 5px'
yield 1, '<span style="%s">%*s </span>' % (
style, mw, (num % st and ' ' or num)) + line
num += 1
else:
for t, line in lines:
yield 1, ('<span style="background-color: #f0f0f0; '
'padding: 0 5px 0 5px">%*s </span>' % (
mw, (num % st and ' ' or num)) + line)
num += 1
elif sp:
for t, line in lines:
yield 1, '<span class="lineno%s">%*s </span>' % (
num % sp == 0 and ' special' or '', mw,
(num % st and ' ' or num)) + line
num += 1
else:
for t, line in lines:
yield 1, '<span class="lineno">%*s </span>' % (
mw, (num % st and ' ' or num)) + line
num += 1
def _wrap_lineanchors(self, inner):
s = self.lineanchors
# subtract 1 since we have to increment i *before* yielding
i = self.linenostart - 1
for t, line in inner:
if t:
i += 1
yield 1, '<a name="%s-%d"></a>' % (s, i) + line
else:
yield 0, line
def _wrap_linespans(self, inner):
s = self.linespans
i = self.linenostart - 1
for t, line in inner:
if t:
i += 1
yield 1, '<span id="%s-%d">%s</span>' % (s, i, line)
else:
yield 0, line
def _wrap_div(self, inner):
style = []
if (self.noclasses and not self.nobackground and
self.style.background_color is not None):
style.append('background: %s' % (self.style.background_color,))
if self.cssstyles:
style.append(self.cssstyles)
style = '; '.join(style)
yield 0, ('<div' + (self.cssclass and ' class="%s"' % self.cssclass) +
(style and (' style="%s"' % style)) + '>')
for tup in inner:
yield tup
yield 0, '</div>\n'
def _wrap_pre(self, inner):
style = []
if self.prestyles:
style.append(self.prestyles)
if self.noclasses:
style.append('line-height: 125%')
style = '; '.join(style)
if self.filename:
yield 0, ('<span class="filename">' + self.filename + '</span>')
# the empty span here is to keep leading empty lines from being
# ignored by HTML parsers
yield 0, ('<pre' + (style and ' style="%s"' % style) + '><span></span>')
for tup in inner:
yield tup
yield 0, '</pre>'
def _format_lines(self, tokensource):
"""
Just format the tokens, without any wrapping tags.
Yield individual lines.
"""
nocls = self.noclasses
lsep = self.lineseparator
# for <span style=""> lookup only
getcls = self.ttype2class.get
c2s = self.class2style
escape_table = _escape_html_table
tagsfile = self.tagsfile
lspan = ''
line = []
for ttype, value in tokensource:
if nocls:
cclass = getcls(ttype)
while cclass is None:
ttype = ttype.parent
cclass = getcls(ttype)
cspan = cclass and '<span style="%s">' % c2s[cclass][0] or ''
else:
cls = self._get_css_classes(ttype)
cspan = cls and '<span class="%s">' % cls or ''
parts = value.translate(escape_table).split('\n')
if tagsfile and ttype in Token.Name:
filename, linenumber = self._lookup_ctag(value)
if linenumber:
base, filename = os.path.split(filename)
if base:
base += '/'
filename, extension = os.path.splitext(filename)
url = self.tagurlformat % {'path': base, 'fname': filename,
'fext': extension}
parts[0] = "<a href=\"%s#%s-%d\">%s" % \
(url, self.lineanchors, linenumber, parts[0])
parts[-1] = parts[-1] + "</a>"
# for all but the last line
for part in parts[:-1]:
if line:
if lspan != cspan:
line.extend(((lspan and '</span>'), cspan, part,
(cspan and '</span>'), lsep))
else: # both are the same
line.extend((part, (lspan and '</span>'), lsep))
yield 1, ''.join(line)
line = []
elif part:
yield 1, ''.join((cspan, part, (cspan and '</span>'), lsep))
else:
yield 1, lsep
# for the last line
if line and parts[-1]:
if lspan != cspan:
line.extend(((lspan and '</span>'), cspan, parts[-1]))
lspan = cspan
else:
line.append(parts[-1])
elif parts[-1]:
line = [cspan, parts[-1]]
lspan = cspan
# else we neither have to open a new span nor set lspan
if line:
line.extend(((lspan and '</span>'), lsep))
yield 1, ''.join(line)
def _lookup_ctag(self, token):
entry = ctags.TagEntry()
if self._ctags.find(entry, token, 0):
return entry['file'], entry['lineNumber']
else:
return None, None
def _highlight_lines(self, tokensource):
"""
Highlighted the lines specified in the `hl_lines` option by
post-processing the token stream coming from `_format_lines`.
"""
hls = self.hl_lines
for i, (t, value) in enumerate(tokensource):
if t != 1:
yield t, value
if i + 1 in hls: # i + 1 because Python indexes start at 0
if self.noclasses:
style = ''
if self.style.highlight_color is not None:
style = (' style="background-color: %s"' %
(self.style.highlight_color,))
yield 1, '<span%s>%s</span>' % (style, value)
else:
yield 1, '<span class="hll">%s</span>' % value
else:
yield 1, value
def wrap(self, source, outfile):
"""
Wrap the ``source``, which is a generator yielding
individual lines, in custom generators. See docstring
for `format`. Can be overridden.
"""
return self._wrap_div(self._wrap_pre(source))
def format_unencoded(self, tokensource, outfile):
"""
The formatting process uses several nested generators; which of
them are used is determined by the user's options.
Each generator should take at least one argument, ``inner``,
and wrap the pieces of text generated by this.
Always yield 2-tuples: (code, text). If "code" is 1, the text
is part of the original tokensource being highlighted, if it's
0, the text is some piece of wrapping. This makes it possible to
use several different wrappers that process the original source
linewise, e.g. line number generators.
"""
source = self._format_lines(tokensource)
if self.hl_lines:
source = self._highlight_lines(source)
if not self.nowrap:
if self.linenos == 2:
source = self._wrap_inlinelinenos(source)
if self.lineanchors:
source = self._wrap_lineanchors(source)
if self.linespans:
source = self._wrap_linespans(source)
source = self.wrap(source, outfile)
if self.linenos == 1:
source = self._wrap_tablelinenos(source)
if self.full:
source = self._wrap_full(source, outfile)
for t, piece in source:
outfile.write(piece)
| gpl-2.0 |
sohail-aspose/Aspose_Slides_Cloud | SDKs/Aspose.Slides_Cloud_SDK_for_Python/asposeslidescloud/models/Image.py | 4 | 1028 | #!/usr/bin/env python
class Image(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually."""
def __init__(self):
"""
Attributes:
swaggerTypes (dict): The key is attribute name and the value is attribute type.
attributeMap (dict): The key is attribute name and the value is json key in definition.
"""
self.swaggerTypes = {
'Width': 'int',
'Height': 'int',
'SelfUri': 'ResourceUri',
'AlternateLinks': 'list[ResourceUri]',
'Links': 'list[ResourceUri]'
}
self.attributeMap = {
'Width': 'Width','Height': 'Height','SelfUri': 'SelfUri','AlternateLinks': 'AlternateLinks','Links': 'Links'}
self.Width = None # int
self.Height = None # int
self.SelfUri = None # ResourceUri
self.AlternateLinks = None # list[ResourceUri]
self.Links = None # list[ResourceUri]
| mit |
seanwisdom/program-with-objects_2 | new_section_python_code.py | 4 | 1119 |
def new_section(concept_title, concept_description):
html_a = '''
<div class="concept">
<div class="concept-title">
<h3>''' + concept_title + '</h3>'
html_b= '''
</div>
<div class="concept-description">
<p>
''' + concept_description
html_c='''
</p>
</div>
</div>'''
full_html = html_a + html_b + html_c
return full_html
def create_HTML(concept):
concept_title = concept[0]
concept_description = concept[1]
return new_section(concept_title, concept_description)
EXAMPLE_LIST_OF_CONCEPTS = [ ['test title 1', 'test description 1'],
['test title 2', 'test description 2'],
['test title 3', 'test description 3']]
def create_HTML_for_list_of_concepts(list_of_concepts):
HTML = ""
for concept in list_of_concepts:
new_HTML = create_HTML(concept)
HTML += new_HTML
return HTML
print create_HTML_for_list_of_concepts(EXAMPLE_LIST_OF_CONCEPTS) | mit |
mluo613/osf.io | scripts/prereg/approve_draft_registrations.py | 28 | 1260 | """ A script for testing DraftRegistrationApprovals. Automatically approves all pending
DraftRegistrationApprovals.
"""
import sys
import logging
from framework.celery_tasks.handlers import celery_teardown_request
from website.app import init_app
from website.project.model import DraftRegistration, Sanction
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.WARN)
logging.disable(level=logging.INFO)
def main(dry_run=True):
if dry_run:
logger.warn('DRY RUN mode')
pending_approval_drafts = DraftRegistration.find()
need_approval_drafts = [draft for draft in pending_approval_drafts
if draft.approval and draft.requires_approval and draft.approval.state == Sanction.UNAPPROVED]
for draft in need_approval_drafts:
sanction = draft.approval
try:
if not dry_run:
sanction.state = Sanction.APPROVED
sanction._on_complete(None)
sanction.save()
logger.warn('Approved {0}'.format(draft._id))
except Exception as e:
logger.error(e)
if __name__ == '__main__':
dry_run = 'dry' in sys.argv
app = init_app(routes=False)
main(dry_run=dry_run)
celery_teardown_request()
| apache-2.0 |
RussellRiesJr/CoupleComeStatWithMe | ccswm/settings.py | 1 | 3350 | """
Django settings for ccswm project.
Generated by 'django-admin startproject' using Django 1.10.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'b)2xn=0)bhu89x#@*eiwvce6+5*=2+n4((er3^1phiu7@qjgo4'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'ccswm.statApi',
'corsheaders'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
CORS_ORIGIN_ALLOW_ALL = False
CORS_ORIGIN_WHITELIST = (
'localhost:8080',
'apiUrl',
'couplescomestatwithme.co.uk',
'138.68.146.190',
)
ROOT_URLCONF = 'ccswm.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'ccswm.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
| mit |
tillrohrmann/flink | flink-python/pyflink/ml/api/ml_environment.py | 9 | 3953 | ################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from pyflink.dataset.execution_environment import ExecutionEnvironment
from pyflink.datastream.stream_execution_environment import StreamExecutionEnvironment
from pyflink.table.table_environment import BatchTableEnvironment, StreamTableEnvironment
class MLEnvironment(object):
"""
The MLEnvironment stores the necessary context in Flink. Each MLEnvironment
will be associated with a unique ID. The operations associated with the same
MLEnvironment ID will share the same Flink job context. Both MLEnvironment
ID and MLEnvironment can only be retrieved from MLEnvironmentFactory.
.. versionadded:: 1.11.0
"""
def __init__(self, exe_env=None, stream_exe_env=None, batch_tab_env=None, stream_tab_env=None):
self._exe_env = exe_env
self._stream_exe_env = stream_exe_env
self._batch_tab_env = batch_tab_env
self._stream_tab_env = stream_tab_env
def get_execution_environment(self) -> ExecutionEnvironment:
"""
Get the ExecutionEnvironment. If the ExecutionEnvironment has not been set,
it initial the ExecutionEnvironment with default Configuration.
:return: the batch ExecutionEnvironment.
.. versionadded:: 1.11.0
"""
if self._exe_env is None:
self._exe_env = ExecutionEnvironment.get_execution_environment()
return self._exe_env
def get_stream_execution_environment(self) -> StreamExecutionEnvironment:
"""
Get the StreamExecutionEnvironment. If the StreamExecutionEnvironment has not been
set, it initial the StreamExecutionEnvironment with default Configuration.
:return: the StreamExecutionEnvironment.
.. versionadded:: 1.11.0
"""
if self._stream_exe_env is None:
self._stream_exe_env = StreamExecutionEnvironment.get_execution_environment()
return self._stream_exe_env
def get_batch_table_environment(self) -> BatchTableEnvironment:
"""
Get the BatchTableEnvironment. If the BatchTableEnvironment has not been set,
it initial the BatchTableEnvironment with default Configuration.
:return: the BatchTableEnvironment.
.. versionadded:: 1.11.0
"""
if self._batch_tab_env is None:
self._batch_tab_env = BatchTableEnvironment.create(
ExecutionEnvironment.get_execution_environment())
return self._batch_tab_env
def get_stream_table_environment(self) -> StreamTableEnvironment:
"""
Get the StreamTableEnvironment. If the StreamTableEnvironment has not been set,
it initial the StreamTableEnvironment with default Configuration.
:return: the StreamTableEnvironment.
.. versionadded:: 1.11.0
"""
if self._stream_tab_env is None:
self._stream_tab_env = StreamTableEnvironment.create(
StreamExecutionEnvironment.get_execution_environment())
return self._stream_tab_env
| apache-2.0 |
JCROM-Android/jcrom_external_chromium_org | chrome/test/functional/ispy/ispy_core/tests/rendering_test_manager/mock_cloud_bucket.py | 23 | 1390 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Subclass of CloudBucket used for testing."""
from tests.rendering_test_manager import cloud_bucket
class MockCloudBucket(cloud_bucket.CloudBucket):
"""Subclass of CloudBucket used for testing."""
def __init__(self):
"""Initializes the MockCloudBucket with its datastore.
Returns:
An instance of MockCloudBucket.
"""
self.datastore = {}
def Reset(self):
"""Clears the MockCloudBucket's datastore."""
self.datastore = {}
# override
def UploadFile(self, path, contents, content_type):
self.datastore[path] = contents
# override
def DownloadFile(self, path):
if self.datastore.has_key(path):
return self.datastore[path]
else:
raise cloud_bucket.FileNotFoundError
# override
def RemoveFile(self, path):
if self.datastore.has_key(path):
self.datastore.pop(path)
# override
def FileExists(self, path):
return self.datastore.has_key(path)
# override
def GetURL(self, path):
if self.datastore.has_key(path):
return path
else:
raise cloud_bucket.FileNotFoundError
# override
def GetAllPaths(self, prefix):
return (item[0] for item in self.datastore.items()
if item[0].startswith(prefix))
| bsd-3-clause |
prismskylabs/pycounters | src/pycounters/__init__.py | 1 | 3969 | """
PyCounters is a light weight library to monitor performance in production system.
It is meant to be used in scenarios where using a profile is unrealistic due to the overhead it requires.
Use PyCounters to get high level and concise overview of what's going on in your production code.
See #### (read the docs) for more information
"""
import logging
from pycounters.reporters.base import CollectingRole
from shortcuts import _reporting_decorator_context_manager
from . import reporters, base
def report_start(name):
""" reports an event's start.
NOTE: you *must* fire off a corresponding event end with report_end
"""
base.THREAD_DISPATCHER.dispatch_event(name, "start", None)
def report_end(name):
""" reports an event's end.
NOTE: you *must* have fired off a corresponding event start with report_start
"""
base.THREAD_DISPATCHER.dispatch_event(name, "end", None)
def report_start_end(name=None):
"""
returns a function decorator and/or context manager which raises start and end events.
If name is None events name is set to the name of the decorated function. In that case report_start_end
can not be used as a context manager.
"""
return _reporting_decorator_context_manager(name)
def report_value(name, value):
"""
reports a value event to the counters.
"""
base.THREAD_DISPATCHER.dispatch_event(name, "value", value)
def register_counter(counter, throw_if_exists=True):
""" Register a counter with PyCounters
"""
base.GLOBAL_REGISTRY.add_counter(counter, throw=throw_if_exists)
def unregister_counter(counter=None, name=None):
""" Removes a previously registered counter
"""
base.GLOBAL_REGISTRY.remove_counter(counter=counter, name=name)
def output_report():
"""
Manually cause the current values of all registered counters to be reported.
"""
reporters.base.GLOBAL_REPORTING_CONTROLLER.report()
def start_auto_reporting(seconds=300):
"""
Start reporting in a background thread. Reporting frequency is set by seconds param.
"""
reporters.base.GLOBAL_REPORTING_CONTROLLER.start_auto_report(seconds=seconds)
def stop_auto_reporting():
""" Stop auto reporting """
reporters.base.GLOBAL_REPORTING_CONTROLLER.stop_auto_report()
def register_reporter(reporter=None):
"""
add a reporter to PyCounters. Registered reporters will output collected metrics
"""
reporters.base.GLOBAL_REPORTING_CONTROLLER.register_reporter(reporter)
def unregister_reporter(reporter=None):
"""
remove a reporter from PyCounters.
"""
reporters.base.GLOBAL_REPORTING_CONTROLLER.unregister_reporter(reporter)
def configure_multi_process_collection(collecting_address=[("", 60907), ("", 60906)], timeout_in_sec=120,
role=CollectingRole.AUTO_ROLE):
"""
configures PyCounters to collect values from multiple processes
:param collecting_address: a list of (address,port) tuples address of machines and ports data should be collected on.
the extra tuples are used as backup in case the first address/port combination is (temporarily)
unavailable. PyCounters would automatically start using the preferred address/port when it becomes
available again. This behavior is handy when restarting the program and the old port is not yet
freed by the OS.
:param timeout_in_sec: timeout configuration for connections. Default should be good enough for pratically
everyone.
:param role: the role of this process. Leave at the default of AUTO_ROLE for pycounters to automatically choose
a collecting leader.
"""
reporters.base.GLOBAL_REPORTING_CONTROLLER.configure_multi_process(collecting_address=collecting_address,
timeout_in_sec=timeout_in_sec, debug_log=logging.getLogger(name="pycounters_multi_proc"), role=role)
| apache-2.0 |
bitifirefly/edx-platform | common/djangoapps/microsite_configuration/templatetags/microsite.py | 107 | 2058 | """
Template tags and helper functions for displaying breadcrumbs in page titles
based on the current micro site.
"""
from django import template
from django.conf import settings
from microsite_configuration import microsite
from django.templatetags.static import static
register = template.Library()
def page_title_breadcrumbs(*crumbs, **kwargs):
"""
This function creates a suitable page title in the form:
Specific | Less Specific | General | edX
It will output the correct platform name for the request.
Pass in a `separator` kwarg to override the default of " | "
"""
separator = kwargs.get("separator", " | ")
if crumbs:
return u'{}{}{}'.format(separator.join(crumbs), separator, platform_name())
else:
return platform_name()
@register.simple_tag(name="page_title_breadcrumbs", takes_context=True)
def page_title_breadcrumbs_tag(context, *crumbs):
"""
Django template that creates breadcrumbs for page titles:
{% page_title_breadcrumbs "Specific" "Less Specific" General %}
"""
return page_title_breadcrumbs(*crumbs)
@register.simple_tag(name="platform_name")
def platform_name():
"""
Django template tag that outputs the current platform name:
{% platform_name %}
"""
return microsite.get_value('platform_name', settings.PLATFORM_NAME)
@register.simple_tag(name="favicon_path")
def favicon_path(default=getattr(settings, 'FAVICON_PATH', 'images/favicon.ico')):
"""
Django template tag that outputs the configured favicon:
{% favicon_path %}
"""
return static(microsite.get_value('favicon_path', default))
@register.simple_tag(name="microsite_css_overrides_file")
def microsite_css_overrides_file():
"""
Django template tag that outputs the css import for a:
{% microsite_css_overrides_file %}
"""
file_path = microsite.get_value('css_overrides_file', None)
if file_path is not None:
return "<link href='{}' rel='stylesheet' type='text/css'>".format(static(file_path))
else:
return ""
| agpl-3.0 |
moxon6/chemlab | tests/test_core.py | 4 | 12153 | """Test core types like Molecule and Atom."""
from chemlab.core import Molecule, Atom
from chemlab.core import System, subsystem_from_molecules, subsystem_from_atoms
from chemlab.core import merge_systems
from chemlab.core import crystal, random_lattice_box
import numpy as np
from nose.tools import eq_, assert_equals
from nose.plugins.attrib import attr
from chemlab.graphics import display_system
def assert_npequal(a, b):
assert np.array_equal(a, b), '\n{} != {}'.format(a, b)
def assert_eqbonds(a, b):
# compare bonds by sorting
a = np.sort(np.sort(a, axis=0))
b = np.sort(np.sort(b, axis=0))
assert_npequal(a, b)
def assert_allclose(a, b):
assert np.allclose(a, b), '\n{} != {}'.format(a, b)
def _make_water():
mol = Molecule([Atom("O", [-4.99, 2.49, 0.0]),
Atom("H", [-4.02, 2.49, 0.0]),
Atom("H", [-5.32, 1.98, 1.0])],
bonds=[[0, 1], [0, 2]],
export={'hello': 1.0})
return mol
class TestMolecule(object):
def test_init(self):
mol = _make_water()
assert_npequal(mol.type_array, ['O', 'H', 'H'])
class TestSystem(object):
def _make_molecules(self):
wat = _make_water()
wat.r_array *= 0.1
# Initialization from empty
s = System.empty(4, 4*3)
mols = []
# Array to be compared
for _ in range(s.n_mol):
wat.r_array += 0.1
mols.append(wat.copy())
return mols
def _assert_init(self, system):
assert_npequal(system.type_array, ['O', 'H', 'H',
'O', 'H', 'H',
'O', 'H', 'H',
'O', 'H', 'H',])
# Test atom coordinates
#print "Atom Coordinates"
#print s.r_array
# Test atom masses
#print s.m_array
# Test charges
assert_allclose(system.charge_array, [0.0, 0.0, 0.0,
0.0, 0.0, 0.0,
0.0, 0.0, 0.0,
0.0, 0.0, 0.0])
# Test mol indices
assert_npequal(system.mol_indices, [0, 3, 6, 9])
# Test mol n_atoms
assert_npequal(system.mol_n_atoms, [3, 3, 3, 3])
# Test get molecule entry
assert_npequal(system.molecules[0].type_array, ['O', 'H', 'H'])
# Test derived property -- center of mass
assert_allclose(system.get_derived_molecule_array('center_of_mass'),
[[-1.00621917, 0.05572538, 0.02237967],
[-0.73978867, 0.07251013, 0.03916442],
[-0.47335818, 0.08929488, 0.05594917],
[-0.20692768, 0.10607963, 0.07273392]])
# Test bonds
assert_eqbonds(system.bonds, [[0, 1], [0, 2],
[3, 4], [3, 5],
[6, 7], [6, 8],
[9, 10], [9, 11]])
# Test bond orders
#print 'Test Indexing of system.molecule'
#print s.molecules[0]
#print s.molecules[:], s.molecules[:-5]
#print s.atoms[0]
#print s.atoms[:]
def test_init(self):
mols = self._make_molecules()
system = System(mols)
self._assert_init(system)
def test_from_empty(self):
mols = self._make_molecules()
system = System.empty(4, 4*3)
[system.add(mol) for mol in mols]
self._assert_init(system)
def test_from_actual_empty(self):
mols = self._make_molecules()
system = System([])
[system.add(mol) for mol in mols]
def test_from_arrays(self):
mols = self._make_molecules()
r_array = np.concatenate([m.r_array for m in mols])
type_array = np.concatenate([m.type_array for m in mols])
mol_indices = [0, 3, 6, 9]
bonds = np.concatenate([m.bonds + 3*i for i, m in enumerate(mols)])
system = System.from_arrays(r_array=r_array,
type_array=type_array,
mol_indices=mol_indices,
bonds=bonds)
self._assert_init(system)
def test_subsystem_from_molecules(self):
mols = self._make_molecules()
system = System(mols)
subsystem = subsystem_from_molecules(system, np.array([0, 2]))
assert_equals(subsystem.n_mol, 2)
def test_subsystem_from_atoms(self):
mols = self._make_molecules()
system = System(mols)
sub = subsystem_from_atoms(system, np.array([True, True, False,
False, False, False,
False, False, False]))
assert_equals(sub.n_mol, 1)
def test_remove_atoms(self):
# This will remove the first and last molecules
mols = self._make_molecules()
system = System(mols)
system.remove_atoms([0, 1, 11])
assert_eqbonds(system.bonds,
[[0, 1], [0, 2],
[3, 4], [3, 5]])
assert_npequal(system.type_array,
np.array(['O', 'H', 'H', 'O', 'H', 'H'],
dtype='object'))
def test_reorder_molecules(self):
mols = self._make_molecules()
system = System(mols)
system.bonds = np.array([[0, 1], [3, 5]])
# Reordering
system.reorder_molecules([1, 0, 2, 3])
assert_eqbonds(system.bonds, [[0, 2],
[3, 4]])
@attr('slow')
def test_merge_system():
# take a protein
from chemlab.io import datafile
from chemlab.graphics import display_system
from chemlab.db import ChemlabDB
water = ChemlabDB().get("molecule", "example.water")
prot = datafile("tests/data/3ZJE.pdb").read("system")
# Take a box of water
NWAT = 50000
bsize = 20.0
pos = np.random.random((NWAT, 3)) * bsize
wat = water.copy()
s = System.empty(NWAT, NWAT*3, box_vectors=np.eye(3)*bsize)
for i in range(NWAT):
wat.move_to(pos[i])
s.add(wat)
prot.r_array += 10
s = merge_systems(s, prot, 0.5)
display_system(s, 'ball-and-stick')
def test_crystal():
'''Building a crystal by using spacegroup module'''
na = Molecule([Atom('Na', [0.0, 0.0, 0.0])])
cl = Molecule([Atom('Cl', [0.0, 0.0, 0.0])])
# Fract position of Na and Cl, space group 255
tsys = crystal([[0.0, 0.0, 0.0],[0.5, 0.5, 0.5]], [na, cl], 225, repetitions=[13,13,13])
def test_sort():
na = Molecule([Atom('Na', [0.0, 0.0, 0.0])])
cl = Molecule([Atom('Cl', [0.0, 0.0, 0.0])])
# Fract position of Na and Cl, space group 255
tsys = crystal([[0.0, 0.0, 0.0],[0.5, 0.5, 0.5]], [na, cl], 225, repetitions=[3,3,3])
tsys.sort()
assert np.all(tsys.type_array[:tsys.n_mol/2] == 'Cl')
def test_bonds():
from chemlab.io import datafile
bz = datafile("tests/data/benzene.mol").read('molecule')
na = Molecule([Atom('Na', [0.0, 0.0, 0.0])])
# Adding bonds
s = System.empty(2, 2*bz.n_atoms)
s.add(bz)
assert_npequal(s.bonds, bz.bonds)
assert_npequal(bz.bond_orders, [1, 2, 2, 1, 1, 2])
assert_npequal(s.bond_orders, bz.bond_orders)
s.add(bz)
assert_npequal(s.bonds, np.concatenate((bz.bonds, bz.bonds + 6)))
#assert_npequal(s.bond_orders)
# Reordering
orig = np.array([[0, 1], [6, 8]])
s.bonds = orig
s.reorder_molecules([1, 0])
assert_npequal(s.bonds, np.array([[6, 7], [0, 2]]))
# This doesn't change the bond_ordering
# Selection
ss = subsystem_from_molecules(s, [1])
assert_npequal(ss.bonds, np.array([[0, 1]]))
import inspect
ss2 = System.from_arrays(**dict(inspect.getmembers(ss)))
ss2.r_array += 10.0
ms = merge_systems(ss, ss2)
assert_npequal(ms.bonds, np.array([[0, 1], [6, 7]]))
assert_npequal(ms.bond_orders, np.array([1, 1]))
# From_arrays
s = System.from_arrays(mol_indices=[0], bonds=bz.bonds, **bz.__dict__)
assert_npequal(s.bonds, bz.bonds)
assert_npequal(s.bond_orders, bz.bond_orders)
# Get molecule entry
# Test the bonds when they're 0
s.bonds = np.array([])
assert_equals(s.get_derived_molecule_array('formula'), 'C6')
def test_bond_orders():
# Get a molecule with some bonds
wat = _make_water()
wat_o = wat.copy()
# 0,1 0,2
assert_npequal(wat.bond_orders, np.array([1, 1]))
# Remove a bond
wat.bonds = np.array([[0, 1]])
assert_npequal(wat.bond_orders, np.array([1]))
wat.bond_orders = np.array([2])
# Try with a system
s = System.empty(2, 6)
s.add(wat_o)
s.add(wat)
assert_npequal(s.bond_orders , np.array([1, 1, 2]))
s.reorder_molecules([1, 0]) # We don't actually sort bonds again
assert_npequal(s.bond_orders , np.array([1, 1, 2]))
s.bonds = np.array([[0, 1], [0, 2], [3, 4], [3, 5]])
assert_npequal(s.bond_orders, np.array([1, 1, 2, 1]))
def test_random():
'''Testing random made box'''
from chemlab.db import ChemlabDB
cdb = ChemlabDB()
na = Molecule([Atom('Na', [0.0, 0.0, 0.0])])
cl = Molecule([Atom('Cl', [0.0, 0.0, 0.0])])
wat = cdb.get("molecule", 'gromacs.spce')
s = random_lattice_box([na, cl, wat], [160, 160, 160], [4, 4, 4])
#display_system(s)
def test_bond_guessing():
from chemlab.db import ChemlabDB, CirDB
from chemlab.graphics import display_molecule
from chemlab.io import datafile
mol = datafile('tests/data/3ZJE.pdb').read('molecule')
print(mol.r_array)
mol.guess_bonds()
assert mol.bonds.size > 0
# We should find the bond guessing also for systems
# System Made of two benzenes
bz = datafile("tests/data/benzene.mol").read('molecule')
bzbonds = bz.bonds
bz.bonds = np.array([])
# Separating the benzenes by large amount
bz2 = bz.copy()
bz2.r_array += 2.0
s = System([bz, bz2])
s.guess_bonds()
assert_eqbonds(s.bonds, np.concatenate((bzbonds, bzbonds + 6)))
# Separating benzenes by small amount
bz2 = bz.copy()
bz2.r_array += 0.15
s = System([bz, bz2])
s.guess_bonds()
assert_eqbonds(s.bonds, np.concatenate((bzbonds, bzbonds + 6)))
#display_molecule(mol)
def test_extending():
from chemlab.core.attributes import NDArrayAttr, MArrayAttr
from chemlab.core.fields import AtomicField
class MySystem(System):
attributes = System.attributes + [NDArrayAttr('v_array', 'v_array', np.float, 3)]
class MyMolecule(Molecule):
attributes = Molecule.attributes + [MArrayAttr('v_array', 'v', np.float)]
class MyAtom(Atom):
fields = Atom.fields + [AtomicField('v', default=lambda at: np.zeros(3, np.float))]
na = MyMolecule([MyAtom.from_fields(type='Na', r=[0.0, 0.0, 0.0], v=[1.0, 0.0, 0.0])])
cl = MyMolecule([MyAtom.from_fields(type='Cl', r=[0.0, 0.0, 0.0])])
s = MySystem([na, cl])
na_atom = MyAtom.from_fields(type='Na', r=[0.0, 0.0, 0.0], v=[1.0, 0.0, 0.0])
print(na_atom.copy())
print(s.v_array)
# Try to adapt
orig_s = s.astype(System)
s = orig_s.astype(MySystem) # We lost the v information by converting back and forth
print(orig_s, s)
print(s.v_array)
# Adapt for molecule and atoms
print(type(na.astype(Molecule)))
na_atom = MyAtom.from_fields(type='Na', r=[0.0, 0.0, 0.0], v=[1.0, 0.0, 0.0])
print(type(na_atom.astype(Atom)))
def test_serialization():
cl = Molecule([Atom.from_fields(type='Cl', r=[0.0, 0.0, 0.0])])
jsonstr = cl.tojson()
assert Molecule.from_json(jsonstr).tojson() == jsonstr
na = Molecule([Atom('Na', [0.0, 0.0, 0.0])])
cl = Molecule([Atom('Cl', [0.0, 0.0, 0.0])])
# Fract position of Na and Cl, space group 255
tsys = crystal([[0.0, 0.0, 0.0],[0.5, 0.5, 0.5]], [na, cl], 225, repetitions=[3,3,3])
jsonstr = tsys.tojson()
assert System.from_json(jsonstr).tojson() == jsonstr
| gpl-3.0 |
shtouff/django | django/db/utils.py | 12 | 10612 | import inspect
import os
import pkgutil
import warnings
from importlib import import_module
from threading import local
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.utils import six
from django.utils._os import upath
from django.utils.deprecation import RemovedInDjango20Warning
from django.utils.functional import cached_property
from django.utils.module_loading import import_string
DEFAULT_DB_ALIAS = 'default'
DJANGO_VERSION_PICKLE_KEY = '_django_version'
class Error(Exception if six.PY3 else StandardError):
pass
class InterfaceError(Error):
pass
class DatabaseError(Error):
pass
class DataError(DatabaseError):
pass
class OperationalError(DatabaseError):
pass
class IntegrityError(DatabaseError):
pass
class InternalError(DatabaseError):
pass
class ProgrammingError(DatabaseError):
pass
class NotSupportedError(DatabaseError):
pass
class DatabaseErrorWrapper(object):
"""
Context manager and decorator that re-throws backend-specific database
exceptions using Django's common wrappers.
"""
def __init__(self, wrapper):
"""
wrapper is a database wrapper.
It must have a Database attribute defining PEP-249 exceptions.
"""
self.wrapper = wrapper
def __enter__(self):
pass
def __exit__(self, exc_type, exc_value, traceback):
if exc_type is None:
return
for dj_exc_type in (
DataError,
OperationalError,
IntegrityError,
InternalError,
ProgrammingError,
NotSupportedError,
DatabaseError,
InterfaceError,
Error,
):
db_exc_type = getattr(self.wrapper.Database, dj_exc_type.__name__)
if issubclass(exc_type, db_exc_type):
dj_exc_value = dj_exc_type(*exc_value.args)
dj_exc_value.__cause__ = exc_value
# Only set the 'errors_occurred' flag for errors that may make
# the connection unusable.
if dj_exc_type not in (DataError, IntegrityError):
self.wrapper.errors_occurred = True
six.reraise(dj_exc_type, dj_exc_value, traceback)
def __call__(self, func):
# Note that we are intentionally not using @wraps here for performance
# reasons. Refs #21109.
def inner(*args, **kwargs):
with self:
return func(*args, **kwargs)
return inner
def load_backend(backend_name):
# Look for a fully qualified database backend name
try:
return import_module('%s.base' % backend_name)
except ImportError as e_user:
# The database backend wasn't found. Display a helpful error message
# listing all possible (built-in) database backends.
backend_dir = os.path.join(os.path.dirname(upath(__file__)), 'backends')
try:
builtin_backends = [
name for _, name, ispkg in pkgutil.iter_modules([backend_dir])
if ispkg and name != 'dummy']
except EnvironmentError:
builtin_backends = []
if backend_name not in ['django.db.backends.%s' % b for b in
builtin_backends]:
backend_reprs = map(repr, sorted(builtin_backends))
error_msg = ("%r isn't an available database backend.\n"
"Try using 'django.db.backends.XXX', where XXX "
"is one of:\n %s\nError was: %s" %
(backend_name, ", ".join(backend_reprs), e_user))
raise ImproperlyConfigured(error_msg)
else:
# If there's some other error, this must be an error in Django
raise
class ConnectionDoesNotExist(Exception):
pass
class ConnectionHandler(object):
def __init__(self, databases=None):
"""
databases is an optional dictionary of database definitions (structured
like settings.DATABASES).
"""
self._databases = databases
self._connections = local()
@cached_property
def databases(self):
if self._databases is None:
self._databases = settings.DATABASES
if self._databases == {}:
self._databases = {
DEFAULT_DB_ALIAS: {
'ENGINE': 'django.db.backends.dummy',
},
}
if self._databases[DEFAULT_DB_ALIAS] == {}:
self._databases[DEFAULT_DB_ALIAS]['ENGINE'] = 'django.db.backends.dummy'
if DEFAULT_DB_ALIAS not in self._databases:
raise ImproperlyConfigured("You must define a '%s' database" % DEFAULT_DB_ALIAS)
return self._databases
def ensure_defaults(self, alias):
"""
Puts the defaults into the settings dictionary for a given connection
where no settings is provided.
"""
try:
conn = self.databases[alias]
except KeyError:
raise ConnectionDoesNotExist("The connection %s doesn't exist" % alias)
conn.setdefault('ATOMIC_REQUESTS', False)
conn.setdefault('AUTOCOMMIT', True)
conn.setdefault('ENGINE', 'django.db.backends.dummy')
if conn['ENGINE'] == 'django.db.backends.' or not conn['ENGINE']:
conn['ENGINE'] = 'django.db.backends.dummy'
conn.setdefault('CONN_MAX_AGE', 0)
conn.setdefault('OPTIONS', {})
conn.setdefault('TIME_ZONE', None)
for setting in ['NAME', 'USER', 'PASSWORD', 'HOST', 'PORT']:
conn.setdefault(setting, '')
def prepare_test_settings(self, alias):
"""
Makes sure the test settings are available in the 'TEST' sub-dictionary.
"""
try:
conn = self.databases[alias]
except KeyError:
raise ConnectionDoesNotExist("The connection %s doesn't exist" % alias)
test_settings = conn.setdefault('TEST', {})
for key in ['CHARSET', 'COLLATION', 'NAME', 'MIRROR']:
test_settings.setdefault(key, None)
def __getitem__(self, alias):
if hasattr(self._connections, alias):
return getattr(self._connections, alias)
self.ensure_defaults(alias)
self.prepare_test_settings(alias)
db = self.databases[alias]
backend = load_backend(db['ENGINE'])
conn = backend.DatabaseWrapper(db, alias)
setattr(self._connections, alias, conn)
return conn
def __setitem__(self, key, value):
setattr(self._connections, key, value)
def __delitem__(self, key):
delattr(self._connections, key)
def __iter__(self):
return iter(self.databases)
def all(self):
return [self[alias] for alias in self]
def close_all(self):
for alias in self:
try:
connection = getattr(self._connections, alias)
except AttributeError:
continue
connection.close()
class ConnectionRouter(object):
def __init__(self, routers=None):
"""
If routers is not specified, will default to settings.DATABASE_ROUTERS.
"""
self._routers = routers
@cached_property
def routers(self):
if self._routers is None:
self._routers = settings.DATABASE_ROUTERS
routers = []
for r in self._routers:
if isinstance(r, six.string_types):
router = import_string(r)()
else:
router = r
routers.append(router)
return routers
def _router_func(action):
def _route_db(self, model, **hints):
chosen_db = None
for router in self.routers:
try:
method = getattr(router, action)
except AttributeError:
# If the router doesn't have a method, skip to the next one.
pass
else:
chosen_db = method(model, **hints)
if chosen_db:
return chosen_db
instance = hints.get('instance')
if instance is not None and instance._state.db:
return instance._state.db
return DEFAULT_DB_ALIAS
return _route_db
db_for_read = _router_func('db_for_read')
db_for_write = _router_func('db_for_write')
def allow_relation(self, obj1, obj2, **hints):
for router in self.routers:
try:
method = router.allow_relation
except AttributeError:
# If the router doesn't have a method, skip to the next one.
pass
else:
allow = method(obj1, obj2, **hints)
if allow is not None:
return allow
return obj1._state.db == obj2._state.db
def allow_migrate(self, db, app_label, **hints):
for router in self.routers:
try:
method = router.allow_migrate
except AttributeError:
# If the router doesn't have a method, skip to the next one.
continue
argspec = inspect.getargspec(router.allow_migrate)
if len(argspec.args) == 3 and not argspec.keywords:
warnings.warn(
"The signature of allow_migrate has changed from "
"allow_migrate(self, db, model) to "
"allow_migrate(self, db, app_label, model_name=None, **hints). "
"Support for the old signature will be removed in Django 2.0.",
RemovedInDjango20Warning)
model = hints.get('model')
allow = None if model is None else method(db, model)
else:
allow = method(db, app_label, **hints)
if allow is not None:
return allow
return True
def allow_migrate_model(self, db, model):
return self.allow_migrate(
db,
model._meta.app_label,
model_name=model._meta.model_name,
model=model,
)
def get_migratable_models(self, app_config, db, include_auto_created=False):
"""
Return app models allowed to be synchronized on provided db.
"""
models = app_config.get_models(include_auto_created=include_auto_created)
return [model for model in models if self.allow_migrate_model(db, model)]
| bsd-3-clause |
Vixionar/django | tests/gis_tests/test_spatialrefsys.py | 319 | 4891 | import unittest
from django.contrib.gis.gdal import HAS_GDAL
from django.db import connection
from django.test import skipUnlessDBFeature
from django.utils import six
from .utils import SpatialRefSys, oracle, postgis, spatialite
test_srs = ({
'srid': 4326,
'auth_name': ('EPSG', True),
'auth_srid': 4326,
# Only the beginning, because there are differences depending on installed libs
'srtext': 'GEOGCS["WGS 84",DATUM["WGS_1984",SPHEROID["WGS 84"',
# +ellps=WGS84 has been removed in the 4326 proj string in proj-4.8
'proj4_re': r'\+proj=longlat (\+ellps=WGS84 )?(\+datum=WGS84 |\+towgs84=0,0,0,0,0,0,0 )\+no_defs ',
'spheroid': 'WGS 84', 'name': 'WGS 84',
'geographic': True, 'projected': False, 'spatialite': True,
# From proj's "cs2cs -le" and Wikipedia (semi-minor only)
'ellipsoid': (6378137.0, 6356752.3, 298.257223563),
'eprec': (1, 1, 9),
}, {
'srid': 32140,
'auth_name': ('EPSG', False),
'auth_srid': 32140,
'srtext': (
'PROJCS["NAD83 / Texas South Central",GEOGCS["NAD83",'
'DATUM["North_American_Datum_1983",SPHEROID["GRS 1980"'
),
'proj4_re': r'\+proj=lcc \+lat_1=30.28333333333333 \+lat_2=28.38333333333333 \+lat_0=27.83333333333333 '
r'\+lon_0=-99 \+x_0=600000 \+y_0=4000000 (\+ellps=GRS80 )?'
r'(\+datum=NAD83 |\+towgs84=0,0,0,0,0,0,0 )?\+units=m \+no_defs ',
'spheroid': 'GRS 1980', 'name': 'NAD83 / Texas South Central',
'geographic': False, 'projected': True, 'spatialite': False,
# From proj's "cs2cs -le" and Wikipedia (semi-minor only)
'ellipsoid': (6378137.0, 6356752.31414, 298.257222101),
'eprec': (1, 5, 10),
})
@unittest.skipUnless(HAS_GDAL, "SpatialRefSysTest needs gdal support")
@skipUnlessDBFeature("has_spatialrefsys_table")
class SpatialRefSysTest(unittest.TestCase):
def test_retrieve(self):
"""
Test retrieval of SpatialRefSys model objects.
"""
for sd in test_srs:
srs = SpatialRefSys.objects.get(srid=sd['srid'])
self.assertEqual(sd['srid'], srs.srid)
# Some of the authority names are borked on Oracle, e.g., SRID=32140.
# also, Oracle Spatial seems to add extraneous info to fields, hence the
# the testing with the 'startswith' flag.
auth_name, oracle_flag = sd['auth_name']
if postgis or (oracle and oracle_flag):
self.assertTrue(srs.auth_name.startswith(auth_name))
self.assertEqual(sd['auth_srid'], srs.auth_srid)
# No proj.4 and different srtext on oracle backends :(
if postgis:
self.assertTrue(srs.wkt.startswith(sd['srtext']))
six.assertRegex(self, srs.proj4text, sd['proj4_re'])
def test_osr(self):
"""
Test getting OSR objects from SpatialRefSys model objects.
"""
for sd in test_srs:
sr = SpatialRefSys.objects.get(srid=sd['srid'])
self.assertTrue(sr.spheroid.startswith(sd['spheroid']))
self.assertEqual(sd['geographic'], sr.geographic)
self.assertEqual(sd['projected'], sr.projected)
if not (spatialite and not sd['spatialite']):
# Can't get 'NAD83 / Texas South Central' from PROJ.4 string
# on SpatiaLite
self.assertTrue(sr.name.startswith(sd['name']))
# Testing the SpatialReference object directly.
if postgis or spatialite:
srs = sr.srs
six.assertRegex(self, srs.proj4, sd['proj4_re'])
# No `srtext` field in the `spatial_ref_sys` table in SpatiaLite < 4
if not spatialite or connection.ops.spatial_version[0] >= 4:
self.assertTrue(srs.wkt.startswith(sd['srtext']))
def test_ellipsoid(self):
"""
Test the ellipsoid property.
"""
for sd in test_srs:
# Getting the ellipsoid and precision parameters.
ellps1 = sd['ellipsoid']
prec = sd['eprec']
# Getting our spatial reference and its ellipsoid
srs = SpatialRefSys.objects.get(srid=sd['srid'])
ellps2 = srs.ellipsoid
for i in range(3):
self.assertAlmostEqual(ellps1[i], ellps2[i], prec[i])
@skipUnlessDBFeature('supports_add_srs_entry')
def test_add_entry(self):
"""
Test adding a new entry in the SpatialRefSys model using the
add_srs_entry utility.
"""
from django.contrib.gis.utils import add_srs_entry
add_srs_entry(3857)
self.assertTrue(
SpatialRefSys.objects.filter(srid=3857).exists()
)
srs = SpatialRefSys.objects.get(srid=3857)
self.assertTrue(
SpatialRefSys.get_spheroid(srs.wkt).startswith('SPHEROID[')
)
| bsd-3-clause |
selahssea/ggrc-core | src/ggrc_basic_permissions/migrations/versions/20130627032526_3bf5430a8c6f_add_roles_and_permis.py | 7 | 1751 | # Copyright (C) 2017 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""Add roles and permissions tables
Revision ID: 3bf5430a8c6f
Revises: None
Create Date: 2013-06-27 03:25:26.571232
"""
# revision identifiers, used by Alembic.
revision = '3bf5430a8c6f'
down_revision = None
from alembic import op
import sqlalchemy as sa
def upgrade():
op.create_table('roles',
sa.Column('id', sa.Integer(), nullable=False, primary_key=True),
sa.Column('name', sa.String(length=128), nullable=False),
sa.Column('permissions_json', sa.Text(), nullable=False),
sa.Column('description', sa.Text(), nullable=True),
sa.Column('modified_by_id', sa.Integer()),
sa.Column(
'created_at', sa.DateTime(), default=sa.text('current_timestamp')),
sa.Column(
'updated_at',
sa.DateTime(),
default=sa.text('current_timestamp'),
onupdate=sa.text('current_timestamp')),
sa.Column('context_id', sa.Integer()),
)
op.create_table('users_roles',
sa.Column('id', sa.Integer(), nullable=False, primary_key=True),
sa.Column('role_id', sa.Integer(), nullable=False),
sa.Column('user_email', sa.String(length=128), nullable=False),
sa.Column('target_context_id', sa.Integer(), nullable=False),
sa.Column('modified_by_id', sa.Integer()),
sa.Column(
'created_at', sa.DateTime(), default=sa.text('current_timestamp')),
sa.Column(
'updated_at',
sa.DateTime(),
default=sa.text('current_timestamp'),
onupdate=sa.text('current_timestamp')),
sa.Column('context_id', sa.Integer()),
sa.ForeignKeyConstraint(['role_id',], ['roles.id',]),
)
def downgrade():
op.drop_table('users_roles')
op.drop_table('roles')
| apache-2.0 |
shubhdev/edxOnBaadal | lms/djangoapps/commerce/tests/__init__.py | 55 | 3551 | # -*- coding: utf-8 -*-
""" Commerce app tests package. """
import datetime
import json
from django.conf import settings
from django.test import TestCase
from django.test.utils import override_settings
from freezegun import freeze_time
import httpretty
import jwt
import mock
from ecommerce_api_client import auth
from commerce import ecommerce_api_client
from student.tests.factories import UserFactory
JSON = 'application/json'
TEST_PUBLIC_URL_ROOT = 'http://www.example.com'
TEST_API_URL = 'http://www-internal.example.com/api'
TEST_API_SIGNING_KEY = 'edx'
TEST_BASKET_ID = 7
TEST_ORDER_NUMBER = '100004'
TEST_PAYMENT_DATA = {
'payment_processor_name': 'test-processor',
'payment_form_data': {},
'payment_page_url': 'http://example.com/pay',
}
@override_settings(ECOMMERCE_API_SIGNING_KEY=TEST_API_SIGNING_KEY, ECOMMERCE_API_URL=TEST_API_URL)
class EcommerceApiClientTest(TestCase):
""" Tests to ensure the client is initialized properly. """
TEST_USER_EMAIL = 'test@example.com'
TEST_CLIENT_ID = 'test-client-id'
def setUp(self):
super(EcommerceApiClientTest, self).setUp()
self.user = UserFactory()
self.user.email = self.TEST_USER_EMAIL
self.user.save() # pylint: disable=no-member
@httpretty.activate
@freeze_time('2015-7-2')
@override_settings(JWT_ISSUER='http://example.com/oauth', JWT_EXPIRATION=30)
def test_tracking_context(self):
"""
Ensure the tracking context is set up in the api client correctly and
automatically.
"""
# fake an ecommerce api request.
httpretty.register_uri(
httpretty.POST,
'{}/baskets/1/'.format(TEST_API_URL),
status=200, body='{}',
adding_headers={'Content-Type': JSON}
)
mock_tracker = mock.Mock()
mock_tracker.resolve_context = mock.Mock(return_value={'client_id': self.TEST_CLIENT_ID})
with mock.patch('commerce.tracker.get_tracker', return_value=mock_tracker):
ecommerce_api_client(self.user).baskets(1).post()
# make sure the request's JWT token payload included correct tracking context values.
actual_header = httpretty.last_request().headers['Authorization']
expected_payload = {
'username': self.user.username,
'full_name': self.user.profile.name,
'email': self.user.email,
'iss': settings.JWT_ISSUER,
'exp': datetime.datetime.utcnow() + datetime.timedelta(seconds=settings.JWT_EXPIRATION),
'tracking_context': {
'lms_user_id': self.user.id, # pylint: disable=no-member
'lms_client_id': self.TEST_CLIENT_ID,
},
}
expected_header = 'JWT {}'.format(jwt.encode(expected_payload, TEST_API_SIGNING_KEY))
self.assertEqual(actual_header, expected_header)
@httpretty.activate
def test_client_unicode(self):
"""
The client should handle json responses properly when they contain
unicode character data.
Regression test for ECOM-1606.
"""
expected_content = '{"result": "Préparatoire"}'
httpretty.register_uri(
httpretty.GET,
'{}/baskets/1/order/'.format(TEST_API_URL),
status=200, body=expected_content,
adding_headers={'Content-Type': JSON},
)
actual_object = ecommerce_api_client(self.user).baskets(1).order.get()
self.assertEqual(actual_object, {u"result": u"Préparatoire"})
| agpl-3.0 |
delinhabit/django | tests/template_tests/filter_tests/test_urlizetrunc.py | 201 | 3348 | from django.template.defaultfilters import urlizetrunc
from django.test import SimpleTestCase
from django.utils.safestring import mark_safe
from ..utils import setup
class UrlizetruncTests(SimpleTestCase):
@setup({'urlizetrunc01':
'{% autoescape off %}{{ a|urlizetrunc:"8" }} {{ b|urlizetrunc:"8" }}{% endautoescape %}'})
def test_urlizetrunc01(self):
output = self.engine.render_to_string(
'urlizetrunc01',
{
'a': '"Unsafe" http://example.com/x=&y=',
'b': mark_safe('"Safe" http://example.com?x=&y='),
},
)
self.assertEqual(
output,
'"Unsafe" <a href="http://example.com/x=&y=" rel="nofollow">http:...</a> '
'"Safe" <a href="http://example.com?x=&y=" rel="nofollow">http:...</a>'
)
@setup({'urlizetrunc02': '{{ a|urlizetrunc:"8" }} {{ b|urlizetrunc:"8" }}'})
def test_urlizetrunc02(self):
output = self.engine.render_to_string(
'urlizetrunc02',
{
'a': '"Unsafe" http://example.com/x=&y=',
'b': mark_safe('"Safe" http://example.com?x=&y='),
},
)
self.assertEqual(
output,
'"Unsafe" <a href="http://example.com/x=&y=" rel="nofollow">http:...</a> '
'"Safe" <a href="http://example.com?x=&y=" rel="nofollow">http:...</a>'
)
class FunctionTests(SimpleTestCase):
def test_truncate(self):
uri = 'http://31characteruri.com/test/'
self.assertEqual(len(uri), 31)
self.assertEqual(
urlizetrunc(uri, 31),
'<a href="http://31characteruri.com/test/" rel="nofollow">'
'http://31characteruri.com/test/</a>',
)
self.assertEqual(
urlizetrunc(uri, 30),
'<a href="http://31characteruri.com/test/" rel="nofollow">'
'http://31characteruri.com/t...</a>',
)
self.assertEqual(
urlizetrunc(uri, 2),
'<a href="http://31characteruri.com/test/"'
' rel="nofollow">...</a>',
)
def test_overtruncate(self):
self.assertEqual(
urlizetrunc('http://short.com/', 20), '<a href='
'"http://short.com/" rel="nofollow">http://short.com/</a>',
)
def test_query_string(self):
self.assertEqual(
urlizetrunc('http://www.google.co.uk/search?hl=en&q=some+long+url&btnG=Search&meta=', 20),
'<a href="http://www.google.co.uk/search?hl=en&q=some+long+url&btnG=Search&'
'meta=" rel="nofollow">http://www.google...</a>',
)
def test_non_string_input(self):
self.assertEqual(urlizetrunc(123, 1), '123')
def test_autoescape(self):
self.assertEqual(
urlizetrunc('foo<a href=" google.com ">bar</a>buz', 10),
'foo<a href=" <a href="http://google.com" rel="nofollow">google.com</a> ">bar</a>buz',
)
def test_autoescape_off(self):
self.assertEqual(
urlizetrunc('foo<a href=" google.com ">bar</a>buz', 9, autoescape=False),
'foo<a href=" <a href="http://google.com" rel="nofollow">google...</a> ">bar</a>buz',
)
| bsd-3-clause |
saeedghsh/SSRR13 | Andreas/slam6d/3rdparty/lastools/ArcGIS_toolbox/scripts/lasheight_classify.py | 2 | 7125 | #
# lasheight_classify.py
#
# (c) 2012, Martin Isenburg
# LASSO - rapid tools to catch reality
#
# uses lasheight to compute the height of LiDAR points above the ground
# and uses the height information to classify the points.
#
# The LiDAR input can be in LAS/LAZ/BIN/TXT/SHP/... format.
# The LiDAR output can be in LAS/LAZ/BIN/TXT format.
#
# for licensing details see http://rapidlasso.com/download/LICENSE.txt
#
import sys, os, arcgisscripting, subprocess
def return_classification(classification):
if (classification == "created, never classified (0)"):
return "0"
if (classification == "unclassified (1)"):
return "1"
if (classification == "ground (2)"):
return "2"
if (classification == "low vegetation (3)"):
return "3"
if (classification == "medium vegetation (4)"):
return "4"
if (classification == "high vegetation (5)"):
return "5"
if (classification == "building (6)"):
return "6"
if (classification == "low point (7)"):
return "7"
if (classification == "keypoint (8)"):
return "8"
if (classification == "water (9)"):
return "9"
if (classification == "high point (10)"):
return "10"
if (classification == "(11)"):
return "11"
if (classification == "overlap point (12)"):
return "12"
if (classification == "(13)"):
return "13"
if (classification == "(14)"):
return "14"
if (classification == "(15)"):
return "15"
if (classification == "(16)"):
return "16"
if (classification == "(17)"):
return "17"
if (classification == "(18)"):
return "18"
return "unknown"
def check_output(command,console):
if console == True:
process = subprocess.Popen(command)
else:
process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True)
output,error = process.communicate()
returncode = process.poll()
return returncode,output
### create the geoprocessor object
gp = arcgisscripting.create(9.3)
### report that something is happening
gp.AddMessage("Starting lasheight ...")
### get number of arguments
argc = len(sys.argv)
### report arguments (for debug)
#gp.AddMessage("Arguments:")
#for i in range(0, argc):
# gp.AddMessage("[" + str(i) + "]" + sys.argv[i])
### get the path to the LAStools binaries
lastools_path = os.path.dirname(os.path.dirname(os.path.dirname(sys.argv[0])))+"\\bin"
### check if path exists
if os.path.exists(lastools_path) == False:
gp.AddMessage("Cannot find .\lastools\bin at " + lastools_path)
sys.exit(1)
else:
gp.AddMessage("Found " + lastools_path + " ...")
### create the full path to the lasheight executable
lasheight_path = lastools_path+"\\lasheight.exe"
### check if executable exists
if os.path.exists(lastools_path) == False:
gp.AddMessage("Cannot find lasheight.exe at " + lasheight_path)
sys.exit(1)
else:
gp.AddMessage("Found " + lasheight_path + " ...")
### create the command string for lasheight.exe
command = [lasheight_path]
### maybe use '-verbose' option
if sys.argv[argc-1] == "true":
command.append("-v")
### add input LiDAR
command.append("-i")
command.append(sys.argv[1])
### maybe use ground points from external file
if sys.argv[2] != "#":
command.append("-ground_points")
command.append(sys.argv[2])
### else maybe use points with a different classification as ground
elif sys.argv[3] != "#":
command.append("-class")
command.append(return_classification(sys.argv[3]))
### maybe we should ignore/preserve some existing classifications when classifying
if sys.argv[4] != "#":
command.append("-ignore_class")
command.append(return_classification(sys.argv[4]))
### maybe we should ignore/preserve some more existing classifications when classifying
if sys.argv[5] != "#":
command.append("-ignore_class")
command.append(return_classification(sys.argv[5]))
### maybe we classify points below
if sys.argv[6] != "#":
command.append("-classify_below")
command.append(sys.argv[7])
command.append(return_classification(sys.argv[6]))
### maybe we classify points between [interval 1]
if sys.argv[8] != "#":
command.append("-classify_between")
command.append(sys.argv[9])
command.append(sys.argv[10])
command.append(return_classification(sys.argv[8]))
### maybe we classify points between [interval 2]
if sys.argv[11] != "#":
command.append("-classify_between")
command.append(sys.argv[12])
command.append(sys.argv[13])
command.append(return_classification(sys.argv[11]))
### maybe we classify points between [interval 3]
if sys.argv[14] != "#":
command.append("-classify_between")
command.append(sys.argv[15])
command.append(sys.argv[16])
command.append(return_classification(sys.argv[14]))
### maybe we classify points below
if sys.argv[17] != "#":
command.append("-classify_above")
command.append(sys.argv[18])
command.append(return_classification(sys.argv[17]))
### this is where the output arguments start
out = 19
### maybe an output format was selected
if sys.argv[out] != "#":
if sys.argv[out] == "las":
command.append("-olas")
elif sys.argv[out] == "laz":
command.append("-olaz")
elif sys.argv[out] == "bin":
command.append("-obin")
elif sys.argv[out] == "xyzc":
command.append("-otxt")
command.append("-oparse")
command.append("xyzc")
elif sys.argv[out] == "xyzci":
command.append("-otxt")
command.append("-oparse")
command.append("xyzci")
elif sys.argv[out] == "txyzc":
command.append("-otxt")
command.append("-oparse")
command.append("txyzc")
elif sys.argv[out] == "txyzci":
command.append("-otxt")
command.append("-oparse")
command.append("txyzci")
### maybe an output file name was selected
if sys.argv[out+1] != "#":
command.append("-o")
command.append(sys.argv[out+1])
### maybe an output directory was selected
if sys.argv[out+2] != "#":
command.append("-odir")
command.append(sys.argv[out+2])
### maybe an output appendix was selected
if sys.argv[out+3] != "#":
command.append("-odix")
command.append(sys.argv[out+3])
### report command string
gp.AddMessage("LAStools command line:")
command_length = len(command)
command_string = str(command[0])
for i in range(1, command_length):
command_string = command_string + " " + str(command[i])
gp.AddMessage(command_string)
### run command
returncode,output = check_output(command, False)
### report output of lasheight
gp.AddMessage(str(output))
### check return code
if returncode != 0:
gp.AddMessage("Error. lasheight failed.")
sys.exit(1)
### report happy end
gp.AddMessage("Success. lasheight done.")
| bsd-3-clause |
pinireznik/antitude | agents/skynet/scripts/restart-proc.py | 5 | 1943 | #!/usr/bin/python -u
import socket
import time
import os
import sys
from subprocess import call
import random
FILENAME_BREAK = "/tmp/break.tmp"
FILENAME_OVERLOAD = "/tmp/load.tmp"
IP_ADDRESS = socket.gethostbyname(socket.gethostname())
MEMORY_LIMIT = "50"
FIXED_STRING = "event FIXED " + IP_ADDRESS
LOG_FILE = "/tmp/logging/" + IP_ADDRESS + ".log"
SIMULATION_DIRECTORY = "/tmp/simulation/" + IP_ADDRESS
MEMORY_FILE = SIMULATION_DIRECTORY + "/memory.tmp"
LAST_MEM = None
while True:
time.sleep(1)
memory_use = None
if os.path.isfile(FILENAME_BREAK):
try:
print "Restarting service and removing " + FILENAME_BREAK
call(["serf", "event", "-coalesce=false", "FIXING", IP_ADDRESS])
time.sleep(random.randint(2, 4))
try:
os.remove(FILENAME_BREAK)
except:
pass #don't care if it's already been deleted
call(["serf", "event", "-coalesce=false", "FIXED", IP_ADDRESS])
except Exception as e:
call(["serf", "event", "-coalesce=false", "EXCEPTION", "%s" % e])
if os.path.isfile(FILENAME_OVERLOAD):
print "Restarting service and removing" + FILENAME_OVERLOAD
try:
os.remove(FILENAME_OVERLOAD)
except:
pass # don't care if it's already been deleted
call(["serf", "event", "-coalesce=false", "OVERLOADED", IP_ADDRESS])
if os.path.isfile(MEMORY_FILE):
print "Found memory file"
with open(MEMORY_FILE, 'r') as f:
memory_use = f.readline().rstrip()
print "Memory use: " + memory_use
if LAST_MEM != memory_use:
print "Memory use changed, sending event"
call(["serf", "event", "-coalesce=false", "MEMORY_LEVEL", "MEMORY_LEVEL=%s IP=%s" % (memory_use, IP_ADDRESS)])
global LAST_MEM
LAST_MEM = memory_use
| apache-2.0 |
mesonbuild/meson | mesonbuild/ast/visitor.py | 2 | 4785 | # Copyright 2019 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This class contains the basic functionality needed to run any interpreter
# or an interpreter-based tool
from .. import mparser
class AstVisitor:
def __init__(self) -> None:
pass
def visit_default_func(self, node: mparser.BaseNode) -> None:
pass
def visit_BooleanNode(self, node: mparser.BooleanNode) -> None:
self.visit_default_func(node)
def visit_IdNode(self, node: mparser.IdNode) -> None:
self.visit_default_func(node)
def visit_NumberNode(self, node: mparser.NumberNode) -> None:
self.visit_default_func(node)
def visit_StringNode(self, node: mparser.StringNode) -> None:
self.visit_default_func(node)
def visit_ContinueNode(self, node: mparser.ContinueNode) -> None:
self.visit_default_func(node)
def visit_BreakNode(self, node: mparser.BreakNode) -> None:
self.visit_default_func(node)
def visit_ArrayNode(self, node: mparser.ArrayNode) -> None:
self.visit_default_func(node)
node.args.accept(self)
def visit_DictNode(self, node: mparser.DictNode) -> None:
self.visit_default_func(node)
node.args.accept(self)
def visit_EmptyNode(self, node: mparser.EmptyNode) -> None:
self.visit_default_func(node)
def visit_OrNode(self, node: mparser.OrNode) -> None:
self.visit_default_func(node)
node.left.accept(self)
node.right.accept(self)
def visit_AndNode(self, node: mparser.AndNode) -> None:
self.visit_default_func(node)
node.left.accept(self)
node.right.accept(self)
def visit_ComparisonNode(self, node: mparser.ComparisonNode) -> None:
self.visit_default_func(node)
node.left.accept(self)
node.right.accept(self)
def visit_ArithmeticNode(self, node: mparser.ArithmeticNode) -> None:
self.visit_default_func(node)
node.left.accept(self)
node.right.accept(self)
def visit_NotNode(self, node: mparser.NotNode) -> None:
self.visit_default_func(node)
node.value.accept(self)
def visit_CodeBlockNode(self, node: mparser.CodeBlockNode) -> None:
self.visit_default_func(node)
for i in node.lines:
i.accept(self)
def visit_IndexNode(self, node: mparser.IndexNode) -> None:
self.visit_default_func(node)
node.iobject.accept(self)
node.index.accept(self)
def visit_MethodNode(self, node: mparser.MethodNode) -> None:
self.visit_default_func(node)
node.source_object.accept(self)
node.args.accept(self)
def visit_FunctionNode(self, node: mparser.FunctionNode) -> None:
self.visit_default_func(node)
node.args.accept(self)
def visit_AssignmentNode(self, node: mparser.AssignmentNode) -> None:
self.visit_default_func(node)
node.value.accept(self)
def visit_PlusAssignmentNode(self, node: mparser.PlusAssignmentNode) -> None:
self.visit_default_func(node)
node.value.accept(self)
def visit_ForeachClauseNode(self, node: mparser.ForeachClauseNode) -> None:
self.visit_default_func(node)
node.items.accept(self)
node.block.accept(self)
def visit_IfClauseNode(self, node: mparser.IfClauseNode) -> None:
self.visit_default_func(node)
for i in node.ifs:
i.accept(self)
node.elseblock.accept(self)
def visit_UMinusNode(self, node: mparser.UMinusNode) -> None:
self.visit_default_func(node)
node.value.accept(self)
def visit_IfNode(self, node: mparser.IfNode) -> None:
self.visit_default_func(node)
node.condition.accept(self)
node.block.accept(self)
def visit_TernaryNode(self, node: mparser.TernaryNode) -> None:
self.visit_default_func(node)
node.condition.accept(self)
node.trueblock.accept(self)
node.falseblock.accept(self)
def visit_ArgumentNode(self, node: mparser.ArgumentNode) -> None:
self.visit_default_func(node)
for i in node.arguments:
i.accept(self)
for key, val in node.kwargs.items():
key.accept(self)
val.accept(self)
| apache-2.0 |
robwarm/gpaw-symm | gpaw/analyse/multipole.py | 1 | 2669 | import numpy as np
from ase.units import Bohr
from ase.parallel import paropen
from ase.utils import prnt
from gpaw.spherical_harmonics import Y
from gpaw.utilities.tools import coordinates
class Multipole:
"""Expand a function on the grid in multipole moments
relative to a given center.
center: Vector [Angstrom]
"""
def __init__(self, center, calculator=None, lmax=6):
self.center = center / Bohr
self.lmax = lmax
self.gd = None
self.y_Lg = None
self.l_L = None
if calculator is not None:
self.initialize(calculator.density.finegd)
def initialize(self, gd):
"""Initialize Y_L arrays"""
self.gd = gd
r_cg, r2_g = coordinates(gd, self.center, tiny=1.e-78)
r_g = np.sqrt(r2_g)
rhat_cg = r_cg / r_g
self.l_L = []
self.y_Lg = []
npY = np.vectorize(Y, (float,), 'spherical harmonic')
L = 0
for l in range(self.lmax + 1):
for m in range(2 * l + 1):
self.y_Lg.append(
np.sqrt(4 * np.pi / (2 * l + 1)) * r_g**l *
npY(L, rhat_cg[0], rhat_cg[1], rhat_cg[2])
)
self.l_L.append(l)
L += 1
def expand(self, f_g):
"""Expand a function f_g in multipole moments
units [e * Angstrom**l]"""
assert(f_g.shape == self.gd.empty().shape)
q_L = []
for L, y_g in enumerate(self.y_Lg):
q_L.append(self.gd.integrate(f_g * y_g))
q_L[L] *= Bohr**self.l_L[L]
return np.array(q_L)
def to_file(self, calculator,
filename='multipole.dat',
mode='a'):
"""Expand the charge distribution in multipoles and write
the result to a file"""
if self.gd is None:
self.initialize(calculator.density.finegd)
q_L = self.expand(-calculator.density.rhot_g)
f = paropen(filename, mode)
prnt('# Multipole expansion of the charge density', file=f)
prnt('# center =', self.center * Bohr, 'Angstrom', file=f)
prnt('# lmax =', self.lmax, file=f)
prnt(('# see https://trac.fysik.dtu.dk/projects/gpaw/browser/' +
'trunk/c/bmgs/sharmonic.py'), file=f)
prnt('# for the definition of spherical harmonics', file=f)
prnt('# l m q_lm[|e| Angstrom**l]', file=f)
L = 0
for l in range(self.lmax + 1):
for m in range(-l, l + 1):
prnt('{0:2d} {1:3d} {2:g}'.format(l, m, q_L[L]), file=f)
L += 1
f.close()
| gpl-3.0 |
coxmediagroup/googleads-python-lib | examples/dfp/v201408/report_service/run_inventory_report.py | 4 | 2915 | #!/usr/bin/python
#
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This code example runs a report equal to the "Whole network report" on the
DFP website.
"""
__author__ = ('Nicholas Chen',
'Joseph DiLallo')
import tempfile
# Import appropriate modules from the client library.
from googleads import dfp
from googleads import errors
def main(client):
# Initialize appropriate service.
network_service = client.GetService('NetworkService', version='v201408')
# Initialize a DataDownloader.
report_downloader = client.GetDataDownloader(version='v201408')
# Get root ad unit id for network.
root_ad_unit_id = (
network_service.getCurrentNetwork()['effectiveRootAdUnitId'])
# Set filter statement and bind value for reportQuery.
values = [{
'key': 'parent_ad_unit_id',
'value': {
'xsi_type': 'NumberValue',
'value': root_ad_unit_id
}
}]
filter_statement = {'query': 'WHERE PARENT_AD_UNIT_ID = :parent_ad_unit_id',
'values': values}
# Create report job.
report_job = {
'reportQuery': {
'dimensions': ['DATE', 'AD_UNIT_NAME'],
'adUnitView': 'HIERARCHICAL',
'columns': ['AD_SERVER_IMPRESSIONS', 'AD_SERVER_CLICKS',
'DYNAMIC_ALLOCATION_INVENTORY_LEVEL_IMPRESSIONS',
'DYNAMIC_ALLOCATION_INVENTORY_LEVEL_CLICKS',
'TOTAL_INVENTORY_LEVEL_IMPRESSIONS',
'TOTAL_INVENTORY_LEVEL_CPM_AND_CPC_REVENUE'],
'dateRangeType': 'LAST_WEEK',
'statement': filter_statement
}
}
try:
# Run the report and wait for it to finish.
report_job_id = report_downloader.WaitForReport(report_job)
except errors.DfpReportError, e:
print 'Failed to generate report. Error was: %s' % e
# Change to your preferred export format.
export_format = 'CSV_DUMP'
report_file = tempfile.NamedTemporaryFile(suffix='.csv.gz', delete=False)
# Download report data.
report_downloader.DownloadReportToFile(
report_job_id, export_format, report_file)
report_file.close()
# Display results.
print 'Report job with id \'%s\' downloaded to:\n%s' % (
report_job_id, report_file.name)
if __name__ == '__main__':
# Initialize client object.
dfp_client = dfp.DfpClient.LoadFromStorage()
main(dfp_client)
| apache-2.0 |
guard163/xen-api | ocaml/idl/binding_sanity_checks/graphxapi.py | 34 | 6851 | #!/usr/bin/env python
# NOT FINISHED (Needs tidying, can only follow OpaqueRefs, whereas some references are by uuid)
# This is supposed to be a general xapi database graphing tool.
# I've got a bit distracted, so I'm checking it in so it doesn't get lost.
# sample command line
# ./graphxapi.py | dot -Tpng >doom.png ; eog doom.png &
import XenAPI
import sanitychecklib
from pprint import pprint,pformat
#I've written a lot of database graphing programs recently.
#For instance there's a program called newmetrics graph which:
#finds all host objects, and then looks up all their resident VMs, and then for all those, looks up all VIFs and VBDs, then for all these VIFs looks up the network, and then for all those networks finds all the PIFs.
#I'd like to abstract this procedure of constructing a subgraph of the database graph by chasing keys from starting points.
#so for instance, a description of newmetricsgraph might be:
#considering objects (host, VM, VIF, VBD, network, PIF)
#starting from all host objects
#follow-keys (host, resident_VMs) (VM, VIFs) (VM, VBDs) (VIF, network) (network, PIFs)
#Similarly Richard is interested in the trees of VBDs which are created, so we might wish to say:
#considering objects (VBD)
#starting from all VBD where true
#follow-keys (VBD, parent)
#More formally, here are some functions which return 3-tuples describing these algorithms
#find ivory's pifs and associated networks
def ivorys_pifs_and_networks():
return (
['PIF','host','network'],
[('host','PIFs'),('PIF', 'network')],
[('host',lambda(x): x['name_label']=='ivory')]
)
#find all the objects with associated metrics
def all_metrics():
return(
['host', 'VM', 'VIF', 'VBD', 'network', 'PIF',
'host_metrics','VIF_metrics','PIF_metrics','VBD_metrics','VM_metrics','VM_guest_metrics'],
[ ('host', 'resident_VMs'), ('host','PIFs'), ('host','metrics'),
('VM','VIFs'), ('VM','VBDs'), ('VM','metrics'), ('VM','guest_metrics'),
('VIF','network'),('VIF','metrics'),
('PIF', 'network'),('PIF','metrics')],
[('host',lambda(x): True)]
)
#this should have been Richard's vdi tree, but the parent field is apparently a uuid, not an OpaqueRef, so the program will need to be modified to deal with this. Grrr.
#this command may come in handy when fixing it
#xe vdi-param-set uuid=895ba851-6a04-c06c-49ac-1bbd3021668c other-config:foo=bar
def vdi_tree(uuid):
return(
['VDI'],
[('VDI',('other_config','parent'))],
[('VDI', lambda(x): x['uuid']==uuid) ]
)
#Choose the subgraph we want to walk through
(object_types, keys_to_chase, start_from)=all_metrics()
##(object_types, keys_to_chase, start_from)=vdi_tree('d295fe98-eea4-e5bd-a776-d1c335612256')
##(object_types, keys_to_chase, start_from)=ivorys_pifs_and_networks()
#Generally, we wish to announce the name of this file.
#When running in the interpreter, however, this doesn't exist, and we
#probably shouldn't log out either
try:
this_test_name = __file__
logout_after_test = True
except NameError:
this_test_name = "unknown"
logout_after_test = False
print "/*------------", this_test_name, "*/"
#log in to the master
print "/*logging in to ",sanitychecklib.server, "*/"
session=sanitychecklib.getsession()
sx=session.xenapi
class typed_record():
def __init__(self, type_string, record, marked=False):
self.type_string=type_string
self.record=record
self.marked=marked #when chasing through the graph we need to avoid loops, so mark when visited
def __repr__(self):
return "typed_record("+self.type_string.__repr__()+","+self.record.__repr__()+","+self.marked.__repr__()+")"
#first fill a database with the data of all the objects we might be interested in
object_database={}
for obj_type in object_types:
objs=sx.__getattr__(obj_type).get_all_records()
for k,v in objs.items():
object_database[k]=typed_record(obj_type, v)
def mark_object_and_chase_keys(ref, keys_to_chase):
if(ref=='OpaqueRef:NULL'):
return
obj=object_database[ref]
if(obj.marked==True):
return
else:
obj.marked=True
type_string=obj.type_string
record=obj.record
for t,k in keys_to_chase:
if type_string==t:
try:
if (type(k)==type("key")):
value=record[k]
elif(type(k)==type(("key","subkey"))):
value=record[k[0]][k[1]]
else:
error("key "+k+" should be either string or tuple")
except KeyError:
return
try:
if(type(value)==type([])):
for x in value:
mark_object_and_chase_keys(x, keys_to_chase)
else:
mark_object_and_chase_keys(value, keys_to_chase)
except:
print "Error while chasing key ",k, "in type", t
raise
#mark all starting objects in the database, and for each one chase the key list through the database
for ref,obj in object_database.items():
for t,f in start_from:
if (obj.type_string == t ) and (f(obj.record)):
mark_object_and_chase_keys(ref, keys_to_chase)
def print_edge(x, y, label=None):
print '"%s" -> "%s"' % (x,y),
if label != None:
print ' [label="%s"]' % label,
print ';'
def print_node(x, label=None):
print '"%s"' % x,
if label != None:
print ' [label="%s"]' % label,
print ';'
#emit the relevant subgraph in dot format
print "digraph graphxapi { "
for ref,obj in object_database.items():
if (obj.marked):
print_node(ref, obj.record.get('name_label', obj.type_string))
for t,k in keys_to_chase:
if obj.type_string==t:
if (type(k)==type("key")):
value=obj.record[k]
elif(type(k)==type(("key","subkey"))):
try:
value=obj.record[k[0]][k[1]]
except KeyError:
continue;
else:
error("key "+k+" should be either string or tuple")
if(type(value)==type([])):
for x in value:
print_edge(ref, x, k)
else:
print_edge(ref, value, k)
print "}"
#log out
if logout_after_test:
print "/*logging out*/"
session.logout()
print "/*End of------", this_test_name, "*/"
| lgpl-2.1 |
Multimac/ansible-modules-extras | notification/jabber.py | 60 | 4555 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2015, Brian Coca <bcoca@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>
DOCUMENTATION = '''
---
version_added: "1.2"
module: jabber
short_description: Send a message to jabber user or chat room
description:
- Send a message to jabber
options:
user:
description:
- User as which to connect
required: true
password:
description:
- password for user to connect
required: true
to:
description:
- user ID or name of the room, when using room use a slash to indicate your nick.
required: true
msg:
description:
- The message body.
required: true
default: null
host:
description:
- host to connect, overrides user info
required: false
port:
description:
- port to connect to, overrides default
required: false
default: 5222
encoding:
description:
- message encoding
required: false
# informational: requirements for nodes
requirements:
- python xmpp (xmpppy)
author: "Brian Coca (@bcoca)"
'''
EXAMPLES = '''
# send a message to a user
- jabber: user=mybot@example.net
password=secret
to=friend@example.net
msg="Ansible task finished"
# send a message to a room
- jabber: user=mybot@example.net
password=secret
to=mychaps@conference.example.net/ansiblebot
msg="Ansible task finished"
# send a message, specifying the host and port
- jabber user=mybot@example.net
host=talk.example.net
port=5223
password=secret
to=mychaps@example.net
msg="Ansible task finished"
'''
import os
import re
import time
HAS_XMPP = True
try:
import xmpp
except ImportError:
HAS_XMPP = False
def main():
module = AnsibleModule(
argument_spec=dict(
user=dict(required=True),
password=dict(required=True),
to=dict(required=True),
msg=dict(required=True),
host=dict(required=False),
port=dict(required=False,default=5222),
encoding=dict(required=False),
),
supports_check_mode=True
)
if not HAS_XMPP:
module.fail_json(msg="The required python xmpp library (xmpppy) is not installed")
jid = xmpp.JID(module.params['user'])
user = jid.getNode()
server = jid.getDomain()
port = module.params['port']
password = module.params['password']
try:
to, nick = module.params['to'].split('/', 1)
except ValueError:
to, nick = module.params['to'], None
if module.params['host']:
host = module.params['host']
else:
host = server
if module.params['encoding']:
xmpp.simplexml.ENCODING = params['encoding']
msg = xmpp.protocol.Message(body=module.params['msg'])
try:
conn=xmpp.Client(server)
if not conn.connect(server=(host,port)):
module.fail_json(rc=1, msg='Failed to connect to server: %s' % (server))
if not conn.auth(user,password,'Ansible'):
module.fail_json(rc=1, msg='Failed to authorize %s on: %s' % (user,server))
# some old servers require this, also the sleep following send
conn.sendInitPresence(requestRoster=0)
if nick: # sending to room instead of user, need to join
msg.setType('groupchat')
msg.setTag('x', namespace='http://jabber.org/protocol/muc#user')
conn.send(xmpp.Presence(to=module.params['to']))
time.sleep(1)
else:
msg.setType('chat')
msg.setTo(to)
if not module.check_mode:
conn.send(msg)
time.sleep(1)
conn.disconnect()
except Exception, e:
module.fail_json(msg="unable to send msg: %s" % e)
module.exit_json(changed=False, to=to, user=user, msg=msg.getBody())
# import module snippets
from ansible.module_utils.basic import *
main()
| gpl-3.0 |
harlowja/networkx | examples/drawing/knuth_miles.py | 50 | 2994 | #!/usr/bin/env python
"""
An example using networkx.Graph().
miles_graph() returns an undirected graph over the 128 US cities from
the datafile miles_dat.txt. The cities each have location and population
data. The edges are labeled with the distance betwen the two cities.
This example is described in Section 1.1 in Knuth's book [1,2].
References.
-----------
[1] Donald E. Knuth,
"The Stanford GraphBase: A Platform for Combinatorial Computing",
ACM Press, New York, 1993.
[2] http://www-cs-faculty.stanford.edu/~knuth/sgb.html
"""
__author__ = """Aric Hagberg (hagberg@lanl.gov)"""
# Copyright (C) 2004-2015 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
import networkx as nx
def miles_graph():
""" Return the cites example graph in miles_dat.txt
from the Stanford GraphBase.
"""
# open file miles_dat.txt.gz (or miles_dat.txt)
import gzip
fh = gzip.open('knuth_miles.txt.gz','r')
G=nx.Graph()
G.position={}
G.population={}
cities=[]
for line in fh.readlines():
line = line.decode()
if line.startswith("*"): # skip comments
continue
numfind=re.compile("^\d+")
if numfind.match(line): # this line is distances
dist=line.split()
for d in dist:
G.add_edge(city,cities[i],weight=int(d))
i=i+1
else: # this line is a city, position, population
i=1
(city,coordpop)=line.split("[")
cities.insert(0,city)
(coord,pop)=coordpop.split("]")
(y,x)=coord.split(",")
G.add_node(city)
# assign position - flip x axis for matplotlib, shift origin
G.position[city]=(-int(x)+7500,int(y)-3000)
G.population[city]=float(pop)/1000.0
return G
if __name__ == '__main__':
import networkx as nx
import re
import sys
G=miles_graph()
print("Loaded miles_dat.txt containing 128 cities.")
print("digraph has %d nodes with %d edges"\
%(nx.number_of_nodes(G),nx.number_of_edges(G)))
# make new graph of cites, edge if less then 300 miles between them
H=nx.Graph()
for v in G:
H.add_node(v)
for (u,v,d) in G.edges(data=True):
if d['weight'] < 300:
H.add_edge(u,v)
# draw with matplotlib/pylab
try:
import matplotlib.pyplot as plt
plt.figure(figsize=(8,8))
# with nodes colored by degree sized by population
node_color=[float(H.degree(v)) for v in H]
nx.draw(H,G.position,
node_size=[G.population[v] for v in H],
node_color=node_color,
with_labels=False)
# scale the axes equally
plt.xlim(-5000,500)
plt.ylim(-2000,3500)
plt.savefig("knuth_miles.png")
except:
pass
| bsd-3-clause |
MacCearain/HomeDetect | radio.py | 1 | 18735 | '''
radio module
'''
# pylint: disable=line-too-long
# pylint: disable=too-many-instance-attributes
# pylint: disable=too-many-arguments
# pylint: disable=trailing-whitespace
import time
import serial
import sensor
ONEBIT = 0b1
#-------------------------------------------------------------------
# Let's Setup a couple of functions that make it easier to work with
# HEX Bytes
def get_bit(bit_mask, position):
'''
Utility Function that extracts a bit from a byte
Should probably check on length of the bit_mask and the number of positions we are shifting the position... possible future refactor
'''
return (bit_mask >> position) & ONEBIT
def hex_dump(random_integer):
'''
hex_dump returns the hex representation of a number
'''
return hex(random_integer)[2:].zfill(2).upper()
class XBeeOnUSB(object):
'''
XBee On USB models the interaction with the Xbee radio that is acting as the translator of ZigBee Messages to
computer logic via a USB serial port
'''
# Setup Context Manager on the Serial Port... Open / Close
# Support a Configuration File
# Maybe send the inbound raw data to a log @ debug level
# Potentially deliver complete messages instead of raw bytes
# It's ok to XBee/ZigBee specific this object is about getting Zigbee Packets off the USB port
# These are part of the Xbee Radio Object -- Future Refactor
STARTFRAMEBYTE = 0x7e
DATAFRAMEBYTE = 0x92
#---------------------------------------------------------------------
# Let's Setup the Serial Port where the Coordinator Xbee is attached
# This should be refactored into an ???? Class and consideration for Linux vs Windows...
# Maybe externalize the port definition into the radio.cfg file.... - Future Refactor
def __init__(self, port='/dev/ttyUSB0', baudrate=9600):
'''
Initializes the XbeeOnUSB Class
'''
self.port = port
self.baud_rate = baudrate
self.usb = serial.Serial() # This will be handle to the serial port
self._start_frame = 0x7e # Just used as a constant
self.frame_length = 0 # Indicates the length of the ZigBee Protocol Data Frame
self._byte_count = 0 # Indicates how many bytes have been read
self.actual_frame_type = 0x92 # Will be the frame type we read in the packet
self.expected_frame_type = 0x92 # We are always hoping for the 0x92 data frame type in this program
self.serial_number = '0013A20040B97414' # The serial number of the XBEE that sent the message
self.network_address = '0000' # The network address
self.receive_type = 'Broadcast' # Receive Type is either 'Acknowledge' or 'Broadcast'
self.sample_set_count = 1 # The number of sample sets in the packet -- Limited to 1 currently
self.digital_channel_mask = {'d00': 0, 'd01': 0, 'd02': 0, 'd03': 0, 'd04': 0, 'd05': 0, 'd06': 0, 'd07': 0, 'd10': 0, 'd11': 0, 'd12': 0}
# Indicates the digital inputs that are enabled on the sending radio
self.analog_channel_mask = {'a0': 0, 'a1': 0, 'a2': 0, 'a3': 0}
# Indicates the analog inputs that are enabled on the sending radio
self.digital_data = {'d00': 0, 'd01': 0, 'd02': 0, 'd03': 0, 'd04': 0, 'd05': 0, 'd06': 0, 'd07': 0, 'd10': 0, 'd11': 0, 'd12': 0}
# The actual digital data on the digital pins
self.analog_data = {'a0': 0, 'a1': 0, 'a2': 0, 'a3': 0, 'vcc': 0}
# The actual analog data from the packet
self.analog_mV = {'a0': 0.0, 'a1': 0.0, 'a2': 0.0, 'a3': 0.0, 'vcc': 0.0}
# The mV readings on the analog inputs from the remote radio
self.check_sum = 0x00 # A Check sum -- to be calculated later
def open(self):
'''
open() attempts to open the serial port based on the current configuration settings of port and baud rate
'''
self.usb.port = self.port
self.usb.baudrate = self.baud_rate
self.usb.open()
def close(self):
'''
close() attempts to close the serial port
'''
self.usb.close()
def __enter__(self):
'''
__enter__ implements context manager features for the class
'''
self.open()
return self
def __exit__(self, exc_ty, exc_val, tb):
'''
__exit__ implements context manager features for the class
'''
self.close()
def __str__(self):
return 'Some Text tbd'
def __repr__(self):
return 'radio.XBeeOnUSB()'
def _dict(self):
'''
Returns a Dictionary that summarizes the last frame read
'''
return_val = {}
return_val['frame_length'] = self.frame_length
return_val['frame_type'] = self.actual_frame_type
return_val['serial_number'] = self.serial_number
return_val['network_address'] = self.network_address
return_val['receive_type'] = self.receive_type
return_val['digital_channel_mask'] = self.digital_channel_mask
return_val['analog_channel_mask'] = self.analog_channel_mask
return_val['digital_data'] = self.digital_data
return_val['analog_data'] = self.analog_data
return_val['analog_mV'] = self.analog_mV
return return_val
def _read_start_byte(self):
'''
Let's read bytes until either 5 seconds has passed or we get a start byte
'''
start_time = int(time.time())
if self.usb.is_open:
self.usb.reset_input_buffer()
start_ord = ord(self.usb.read())
while (start_time + 5) > int(time.time()) and start_ord != self._start_frame:
start_ord = ord(self.usb.read())
if start_ord == self._start_frame:
self._byte_count = 0
return
else:
print('You need to open the serial port before we can read from it.')
def _read_frame_length(self):
'''
Let's read the two bytes that indicate the frame length
'''
msb_length_ord = ord(self.usb.read())
lsb_length_ord = ord(self.usb.read())
self.frame_length = (msb_length_ord * 256) + lsb_length_ord
# print " Length: " + str(frameLength)
self._byte_count = 0
def _read_frame_type(self):
'''
Let's get the Frame Type Byte from the Frame...
'''
self.actual_frame_type = ord(self.usb.read())
self._byte_count += 1
def _read_serial_number(self):
'''
Let's get the Serial Number of the Sending Xbee from the Frame...
'''
sn_ord8 = ord(self.usb.read())
sn_ord7 = ord(self.usb.read())
sn_ord6 = ord(self.usb.read())
sn_ord5 = ord(self.usb.read())
sn_ord4 = ord(self.usb.read())
sn_ord3 = ord(self.usb.read())
sn_ord2 = ord(self.usb.read())
sn_ord1 = ord(self.usb.read())
self._byte_count += 8
self.serial_number = hex_dump(sn_ord8) + hex_dump(sn_ord7) + hex_dump(sn_ord6) + hex_dump(sn_ord5) + hex_dump(sn_ord4) + hex_dump(sn_ord3) + hex_dump(sn_ord2) + hex_dump(sn_ord1)
#print(" Sender Radio Serial Number: " + self.serial_number)
def _read_network_address(self):
'''
Let's get the Network Address within the packet
'''
network_address_high_ord = ord(self.usb.read())
network_address_low_ord = ord(self.usb.read())
self._byte_count += 2
self.network_address = hex_dump(network_address_high_ord) + hex_dump(network_address_low_ord)
#print(" Source Network Address: " + self.network_address)
def _read_receive_type(self):
'''
Let's get the Receive Type of the Packet.
0x01 = Acknoledge
0x02 = Broadcast
0x?? = Unknown
'''
receive_type_ord = ord(self.usb.read())
self._byte_count += 1
if receive_type_ord == 0x01:
self.receive_type = 'Acknowledge'
#print(" Packet Acknowledged")
elif receive_type_ord == 0x02:
self.receive_type = 'Broadcast'
#print(" Broadcast Packet")
else:
self.receive_type = 'Unknown'
#print(" Unknown Receive Option")
def _read_sample_set_count(self):
'''
Let's get the number of Sample Sets from the Frame Content...
I think the only valide value at this time is 1
'''
self.sample_set_count = ord(self.usb.read())
self._byte_count += 1
#print(" Number of Sample Sets: " + str(self.sample_set_count))
def _read_digital_channel_mask(self):
'''
Let's get the Digital Channel Mask that describes which Digital Pins are Enabled on the Sending Xbee
'''
digital_channel_mask_high = ord(self.usb.read())
digital_channel_mask_low = ord(self.usb.read())
self._byte_count += 2
#print(" Digital IO High Mask: " + bin(digitalChannelMaskHigh)[2:].zfill(8) + " (" + HexDump(digitalChannelMaskHigh) + ")")
#print(" Digital IO Low Mask: " + bin(digitalChannelMaskLow)[2:].zfill(8) + " (" + HexDump(digitalChannelMaskLow) + ")")
self.digital_channel_mask['d00'] = get_bit(digital_channel_mask_low, 0)
self.digital_channel_mask['d01'] = get_bit(digital_channel_mask_low, 1)
self.digital_channel_mask['d02'] = get_bit(digital_channel_mask_low, 2)
self.digital_channel_mask['d03'] = get_bit(digital_channel_mask_low, 3)
self.digital_channel_mask['d04'] = get_bit(digital_channel_mask_low, 4)
self.digital_channel_mask['d05'] = get_bit(digital_channel_mask_low, 5)
self.digital_channel_mask['d06'] = get_bit(digital_channel_mask_low, 6)
self.digital_channel_mask['d07'] = get_bit(digital_channel_mask_low, 7)
self.digital_channel_mask['d10'] = get_bit(digital_channel_mask_high, 2)
self.digital_channel_mask['d11'] = get_bit(digital_channel_mask_high, 3)
self.digital_channel_mask['d12'] = get_bit(digital_channel_mask_high, 4)
def _read_analog_channel_mask(self):
'''
Let's get the Analog Channel Mask that describes which Analog Pins are Enabled on the Sending Xbee
'''
analog_channel_mask = ord(self.usb.read())
self._byte_count += 1
#print(" Analog IO Mask: " + bin(analogChannelMask)[2:].zfill(8) + " (" + hex(analogChannelMask)[2:].zfill(2) + ")")
self.analog_channel_mask['a0'] = get_bit(analog_channel_mask, 0)
self.analog_channel_mask['a1'] = get_bit(analog_channel_mask, 1)
self.analog_channel_mask['a2'] = get_bit(analog_channel_mask, 2)
self.analog_channel_mask['a3'] = get_bit(analog_channel_mask, 3)
def _read_digital_data(self):
'''
If any of the Digital Pins were Enabled -- Let's get the Digital Data the remote Xbee sent...
'''
if any(self.digital_channel_mask['d00'],
self.digital_channel_mask['d01'],
self.digital_channel_mask['d02'],
self.digital_channel_mask['d03'],
self.digital_channel_mask['d04'],
self.digital_channel_mask['d05'],
self.digital_channel_mask['d06'],
self.digital_channel_mask['d07'],
self.digital_channel_mask['d10'],
self.digital_channel_mask['d11'],
self.digital_channel_mask['d12']):
digital_channel_input_high = ord(self.usb.read())
digital_channel_input_low = ord(self.usb.read())
self._byte_count += 2
self.digital_data['d00'] = get_bit(digital_channel_input_low, 0)
self.digital_data['d01'] = get_bit(digital_channel_input_low, 1)
self.digital_data['d02'] = get_bit(digital_channel_input_low, 2)
self.digital_data['d03'] = get_bit(digital_channel_input_low, 3)
self.digital_data['d04'] = get_bit(digital_channel_input_low, 4)
self.digital_data['d05'] = get_bit(digital_channel_input_low, 5)
self.digital_data['d06'] = get_bit(digital_channel_input_low, 6)
self.digital_data['d07'] = get_bit(digital_channel_input_low, 7)
self.digital_data['d10'] = get_bit(digital_channel_input_high, 2)
self.digital_data['d11'] = get_bit(digital_channel_input_high, 3)
self.digital_data['d12'] = get_bit(digital_channel_input_high, 4)
else:
self.digital_data['d00'] = 0
self.digital_data['d01'] = 0
self.digital_data['d02'] = 0
self.digital_data['d03'] = 0
self.digital_data['d04'] = 0
self.digital_data['d05'] = 0
self.digital_data['d06'] = 0
self.digital_data['d07'] = 0
self.digital_data['d10'] = 0
self.digital_data['d11'] = 0
self.digital_data['d12'] = 0
def _read_analog_data(self, pin):
'''
Let's read an analog data value and convert it to mV as well
Used on Analog pins that are enabled and on VCC if enabled on the sending radio
'''
analog_high_input = ord(self.usb.read())
analog_low_input = ord(self.usb.read())
self._byte_count += 2
self.analog_data[pin] = (analog_high_input * 256) + analog_low_input
self.analog_mV[pin] = (self.analog_data[pin]/1023.0) * 1200.0
def _read_analog_channels(self):
'''
Let's cycle over the analog pins, if the pin is enabled (in the analog channel mask)
then let's read the data on the pin
'''
for pin in self.analog_channel_mask:
if self.analog_channel_mask[pin]:
self._read_analog_data(pin)
else:
self.analog_data[pin] = 0
self.analog_mV[pin] = 0.0
# 15 -- Let's determine if the Radio Sent VCC
pin = 'vcc'
if (self._byte_count + 2) == self.frame_length:
#print " The Remote Radio provided VCC in the Frame."
# Let's Assume this is a report of the VCC as an analog Value
self._read_analog_data(pin)
elif self._byte_count == self.frame_length:
self.analog_data[pin] = 0
self.analog_mV[pin] = 0.0
else:
#print(" The byte count is: " + str(byteCount) + " the frame length is: " + str(frameLength))
self.analog_data[pin] = 0
self.analog_mV[pin] = 0.0
def _read_check_sum(self):
'''
Let's read the checksum byte from the frame
'''
self.check_sum = ord(self.usb.read())
#print " Check Sum : " + HexDump(checkSum)
def read_frame(self):
'''
This returns a dictionary that has the essence of the raw radio message received
Typically a radio message will formatted as follows:
Byte Example Description
0 0x7e Start Byte -- Indicates the beginning of a data frame
1 0x00 Length -- Number of Bytes (CheckSumByte# - 1 - 2)
2 0x14
3 0x92 Frame Type - 0x92 indicates this will be a broadcasted data sample
4 0x00 64-Bit Source Address (aka Serial Number of the Sending Radio)
5 0x13 Most Significant Byte is Byte 4 and the Least Significant Byte is Byte 11
6 0xA2
7 0x00
8 0x40
9 0x77
10 0x9C
11 0x49
12 0x36 Source Network Address -- 16 Bit
13 0x6A
14 0x01 Receive Options -- 01 = Packet Acknowledged -- 02 = Broadcast Packet
15 0x01 Number of Sample Sets (Always set to 1 due to XBEE Limitations)
16 0x00 Digital Channel Mask - Indicates which Digital Pins are enabled (See below for a mapping)
17 0x20
18 0x01 Analog Channel Mask - Indicates which Analog Pins are enabled (See below for a mapping)
19 0x00 Digital Sample Data (if any) - Maps the same as the Digital Channel Mask
20 0x14
21 0x04 Analog Sample Data (if any)
22 0x25 There will be two bytes here for every pin set for ADC
23 0xF5 Checksum(0xFF - the 8 bit sum of the bytes from byte 3 to this byte)
Digital Channel Mask
First Byte
0 1 2 3 4 5 6 7
n/a n/a D12 D11 D10 n/a n/a n/a
Second Byte
0 1 2 3 4 5 6 7
D7 D6 D5 D4 D3 D2 D1 D0
Analog Channel Mask
First Byte
0 1 2 3 4 5 6 7
n/a n/a n/a n/a A3 A2 A1 A0
'''
# 1 -- Let's read until we get a STARTFRAMEBYTE or 5 seconds has elapsed
self._read_start_byte()
# 2 -- Next lets get how long the frame should be...
self._read_frame_length()
# 3 -- Next lets get the dataframe type -- we are hoping for 0x92 data frames
self._read_frame_type()
# 4 -- Next lets get the serial number of the sending XBEE
self._read_serial_number()
# 5 -- Let's get the network Address
self._read_network_address()
# 6 -- Lets get the Receive Type of the Packet... Acknowledged or Broadbast
self._read_receive_type()
# 7 -- Let's get the number of sample sets
self._read_sample_set_count()
# 8 -- Let's get the Digital Channel Mask
self._read_digital_channel_mask()
# 9 -- Let's get the Analog Channel Mask
self._read_analog_channel_mask()
# 10 -- Let's get the actual digital data on the Digital Pins if any are enabled.
self._read_digital_data()
# 11 -- Analog Pin 0 Data, if it was enabled...
self._read_analog_channels()
# 12 -- Read the Checksum Byte
self._read_check_sum()
return self._dict()
| mit |
ppiotr/Invenio | modules/bibformat/lib/elements/bfe_reprints.py | 39 | 1199 | ## This file is part of Invenio.
## Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""BibFormat element - Prints reprinted editions
"""
__revision__ = "$Id$"
def format_element(bfo, separator):
"""
Prints the reprinted editions of a record
@param separator: a separator between reprinted editions
@see: place.py, publisher.py, imprint.py, date.py, pagination.py
"""
reprints = bfo.field('260__g')
if len(reprints) > 0:
return separator.join(reprints)
| gpl-2.0 |
Johnzero/OE7 | openerp/addons-fg/openerp-magento-master/magento_connect/__init__.py | 1 | 1237 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (c) 2011 Zikzakmedia S.L. (http://zikzakmedia.com) All Rights Reserved.
# Raimon Esteve <resteve@zikzakmedia.com>
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import delivery
import mgn
import mgn_referential
import partner
import product
import product_attributes
import product_images
import sale
import wizard
| agpl-3.0 |
jmighion/ansible | lib/ansible/modules/network/eos/eos_system.py | 22 | 11371 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = """
---
module: eos_system
version_added: "2.3"
author: "Peter Sprygada (@privateip)"
short_description: Manage the system attributes on Arista EOS devices
description:
- This module provides declarative management of node system attributes
on Arista EOS devices. It provides an option to configure host system
parameters or remove those parameters from the device active
configuration.
extends_documentation_fragment: eos
notes:
- Tested against EOS 4.15
options:
hostname:
description:
- Configure the device hostname parameter. This option takes an ASCII string value.
domain_name:
description:
- Configure the IP domain name
on the remote device to the provided value. Value
should be in the dotted name form and will be
appended to the C(hostname) to create a fully-qualified
domain name.
domain_search:
description:
- Provides the list of domain suffixes to
append to the hostname for the purpose of doing name resolution.
This argument accepts a list of names and will be reconciled
with the current active configuration on the running node.
lookup_source:
description:
- Provides one or more source
interfaces to use for performing DNS lookups. The interface
provided in C(lookup_source) can only exist in a single VRF. This
argument accepts either a list of interface names or a list of
hashes that configure the interface name and VRF name. See
examples.
name_servers:
description:
- List of DNS name servers by IP address to use to perform name resolution
lookups. This argument accepts either a list of DNS servers or
a list of hashes that configure the name server and VRF name. See
examples.
state:
description:
- State of the configuration
values in the device's current active configuration. When set
to I(present), the values should be configured in the device active
configuration and when set to I(absent) the values should not be
in the device active configuration
default: present
choices: ['present', 'absent']
"""
EXAMPLES = """
- name: configure hostname and domain-name
eos_system:
hostname: eos01
domain_name: test.example.com
- name: remove configuration
eos_system:
state: absent
- name: configure DNS lookup sources
eos_system:
lookup_source: Management1
- name: configure DNS lookup sources with VRF support
eos_system:
lookup_source:
- interface: Management1
vrf: mgmt
- interface: Ethernet1
vrf: myvrf
- name: configure name servers
eos_system:
name_servers:
- 8.8.8.8
- 8.8.4.4
- name: configure name servers with VRF support
eos_system:
name_servers:
- { server: 8.8.8.8, vrf: mgmt }
- { server: 8.8.4.4, vrf: mgmt }
"""
RETURN = """
commands:
description: The list of configuration mode commands to send to the device
returned: always
type: list
sample:
- hostname eos01
- ip domain-name test.example.com
session_name:
description: The EOS config session name used to load the configuration
returned: changed
type: str
sample: ansible_1479315771
"""
import re
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network_common import ComplexList
from ansible.module_utils.eos import load_config, get_config
from ansible.module_utils.eos import eos_argument_spec
_CONFIGURED_VRFS = None
def has_vrf(module, vrf):
global _CONFIGURED_VRFS
if _CONFIGURED_VRFS is not None:
return vrf in _CONFIGURED_VRFS
config = get_config(module)
_CONFIGURED_VRFS = re.findall('vrf definition (\S+)', config)
_CONFIGURED_VRFS.append('default')
return vrf in _CONFIGURED_VRFS
def map_obj_to_commands(want, have, module):
commands = list()
state = module.params['state']
needs_update = lambda x: want.get(x) and (want.get(x) != have.get(x))
if state == 'absent':
if have['domain_name']:
commands.append('no ip domain-name')
if have['hostname'] != 'localhost':
commands.append('no hostname')
if state == 'present':
if needs_update('hostname'):
commands.append('hostname %s' % want['hostname'])
if needs_update('domain_name'):
commands.append('ip domain-name %s' % want['domain_name'])
if want['domain_list']:
# handle domain_list items to be removed
for item in set(have['domain_list']).difference(want['domain_list']):
commands.append('no ip domain-list %s' % item)
# handle domain_list items to be added
for item in set(want['domain_list']).difference(have['domain_list']):
commands.append('ip domain-list %s' % item)
if want['lookup_source']:
# handle lookup_source items to be removed
for item in have['lookup_source']:
if item not in want['lookup_source']:
if item['vrf']:
if not has_vrf(module, item['vrf']):
module.fail_json(msg='vrf %s is not configured' % item['vrf'])
values = (item['vrf'], item['interface'])
commands.append('no ip domain lookup vrf %s source-interface %s' % values)
else:
commands.append('no ip domain lookup source-interface %s' % item['interface'])
# handle lookup_source items to be added
for item in want['lookup_source']:
if item not in have['lookup_source']:
if item['vrf']:
if not has_vrf(module, item['vrf']):
module.fail_json(msg='vrf %s is not configured' % item['vrf'])
values = (item['vrf'], item['interface'])
commands.append('ip domain lookup vrf %s source-interface %s' % values)
else:
commands.append('ip domain lookup source-interface %s' % item['interface'])
if want['name_servers']:
# handle name_servers items to be removed. Order does matter here
# since name servers can only be in one vrf at a time
for item in have['name_servers']:
if item not in want['name_servers']:
if not has_vrf(module, item['vrf']):
module.fail_json(msg='vrf %s is not configured' % item['vrf'])
if item['vrf'] not in ('default', None):
values = (item['vrf'], item['server'])
commands.append('no ip name-server vrf %s %s' % values)
else:
commands.append('no ip name-server %s' % item['server'])
# handle name_servers items to be added
for item in want['name_servers']:
if item not in have['name_servers']:
if not has_vrf(module, item['vrf']):
module.fail_json(msg='vrf %s is not configured' % item['vrf'])
if item['vrf'] not in ('default', None):
values = (item['vrf'], item['server'])
commands.append('ip name-server vrf %s %s' % values)
else:
commands.append('ip name-server %s' % item['server'])
return commands
def parse_hostname(config):
match = re.search('^hostname (\S+)', config, re.M)
if match:
return match.group(1)
def parse_domain_name(config):
match = re.search('^ip domain-name (\S+)', config, re.M)
if match:
return match.group(1)
def parse_lookup_source(config):
objects = list()
regex = 'ip domain lookup (?:vrf (\S+) )*source-interface (\S+)'
for vrf, intf in re.findall(regex, config, re.M):
if len(vrf) == 0:
vrf= None
objects.append({'interface': intf, 'vrf': vrf})
return objects
def parse_name_servers(config):
objects = list()
for vrf, addr in re.findall('ip name-server vrf (\S+) (\S+)', config, re.M):
objects.append({'server': addr, 'vrf': vrf})
return objects
def map_config_to_obj(module):
config = get_config(module)
return {
'hostname': parse_hostname(config),
'domain_name': parse_domain_name(config),
'domain_list': re.findall('^ip domain-list (\S+)', config, re.M),
'lookup_source': parse_lookup_source(config),
'name_servers': parse_name_servers(config)
}
def map_params_to_obj(module):
obj = {
'hostname': module.params['hostname'],
'domain_name': module.params['domain_name'],
'domain_list': module.params['domain_list']
}
lookup_source = ComplexList(dict(
interface=dict(key=True),
vrf=dict()
), module)
name_servers = ComplexList(dict(
server=dict(key=True),
vrf=dict(default='default')
), module)
for arg, cast in [('lookup_source', lookup_source), ('name_servers', name_servers)]:
if module.params[arg] is not None:
obj[arg] = cast(module.params[arg])
else:
obj[arg] = None
return obj
def main():
""" main entry point for module execution
"""
argument_spec = dict(
hostname=dict(),
domain_name=dict(),
domain_list=dict(type='list', aliases=['domain_search']),
# { interface: <str>, vrf: <str> }
lookup_source=dict(type='list'),
# { server: <str>; vrf: <str> }
name_servers=dict(type='list'),
state=dict(default='present', choices=['present', 'absent'])
)
argument_spec.update(eos_argument_spec)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
result = {'changed': False}
want = map_params_to_obj(module)
have = map_config_to_obj(module)
commands = map_obj_to_commands(want, have, module)
result['commands'] = commands
if commands:
commit = not module.check_mode
response = load_config(module, commands, commit=commit)
if response.get('diff') and module._diff:
result['diff'] = {'prepared': response.get('diff')}
result['session_name'] = response.get('session')
result['changed'] = True
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
holmes/intellij-community | python/lib/Lib/os.py | 74 | 24851 | r"""OS routines for Mac, NT, or Posix depending on what system we're on.
This exports:
- all functions from posix, nt, os2, or ce, e.g. unlink, stat, etc.
- os.path is one of the modules posixpath, or ntpath
- os.name is 'posix', 'nt', 'os2', 'ce' or 'riscos'
- os.curdir is a string representing the current directory ('.' or ':')
- os.pardir is a string representing the parent directory ('..' or '::')
- os.sep is the (or a most common) pathname separator ('/' or ':' or '\\')
- os.extsep is the extension separator ('.' or '/')
- os.altsep is the alternate pathname separator (None or '/')
- os.pathsep is the component separator used in $PATH etc
- os.linesep is the line separator in text files ('\r' or '\n' or '\r\n')
- os.defpath is the default search path for executables
- os.devnull is the file path of the null device ('/dev/null', etc.)
Programs that import and use 'os' stand a better chance of being
portable between different platforms. Of course, they must then
only use functions that are defined by all platforms (e.g., unlink
and opendir), and leave all pathname manipulation to os.path
(e.g., split and join).
"""
#'
import sys, errno
_names = sys.builtin_module_names
# Note: more names are added to __all__ later.
__all__ = ["altsep", "curdir", "pardir", "sep", "extsep", "pathsep", "linesep",
"defpath", "name", "path", "devnull",
"SEEK_SET", "SEEK_CUR", "SEEK_END"]
def _get_exports_list(module):
try:
return list(module.__all__)
except AttributeError:
return [n for n in dir(module) if n[0] != '_']
name = 'java'
if 'posix' in _names:
_name = 'posix'
linesep = '\n'
from posix import *
try:
from posix import _exit
except ImportError:
pass
import posixpath as path
import posix
__all__.extend(_get_exports_list(posix))
del posix
elif 'nt' in _names:
_name = 'nt'
linesep = '\r\n'
from nt import *
try:
from nt import _exit
except ImportError:
pass
import ntpath as path
import nt
__all__.extend(_get_exports_list(nt))
del nt
elif 'os2' in _names:
_name = 'os2'
linesep = '\r\n'
from os2 import *
try:
from os2 import _exit
except ImportError:
pass
if sys.version.find('EMX GCC') == -1:
import ntpath as path
else:
import os2emxpath as path
from _emx_link import link
import os2
__all__.extend(_get_exports_list(os2))
del os2
elif 'ce' in _names:
_name = 'ce'
linesep = '\r\n'
from ce import *
try:
from ce import _exit
except ImportError:
pass
# We can use the standard Windows path.
import ntpath as path
import ce
__all__.extend(_get_exports_list(ce))
del ce
elif 'riscos' in _names:
_name = 'riscos'
linesep = '\n'
from riscos import *
try:
from riscos import _exit
except ImportError:
pass
import riscospath as path
import riscos
__all__.extend(_get_exports_list(riscos))
del riscos
else:
raise ImportError, 'no os specific module found'
sys.modules['os.path'] = path
from os.path import (curdir, pardir, sep, pathsep, defpath, extsep, altsep,
devnull)
del _names
# Python uses fixed values for the SEEK_ constants; they are mapped
# to native constants if necessary in posixmodule.c
SEEK_SET = 0
SEEK_CUR = 1
SEEK_END = 2
#'
# Super directory utilities.
# (Inspired by Eric Raymond; the doc strings are mostly his)
def makedirs(name, mode=0777):
"""makedirs(path [, mode=0777])
Super-mkdir; create a leaf directory and all intermediate ones.
Works like mkdir, except that any intermediate path segment (not
just the rightmost) will be created if it does not exist. This is
recursive.
"""
head, tail = path.split(name)
if not tail:
head, tail = path.split(head)
if head and tail and not path.exists(head):
try:
makedirs(head, mode)
except OSError, e:
# be happy if someone already created the path
if e.errno != errno.EEXIST:
raise
if tail == curdir: # xxx/newdir/. exists if xxx/newdir exists
return
mkdir(name, mode)
def removedirs(name):
"""removedirs(path)
Super-rmdir; remove a leaf directory and all empty intermediate
ones. Works like rmdir except that, if the leaf directory is
successfully removed, directories corresponding to rightmost path
segments will be pruned away until either the whole path is
consumed or an error occurs. Errors during this latter phase are
ignored -- they generally mean that a directory was not empty.
"""
rmdir(name)
head, tail = path.split(name)
if not tail:
head, tail = path.split(head)
while head and tail:
try:
rmdir(head)
except error:
break
head, tail = path.split(head)
def renames(old, new):
"""renames(old, new)
Super-rename; create directories as necessary and delete any left
empty. Works like rename, except creation of any intermediate
directories needed to make the new pathname good is attempted
first. After the rename, directories corresponding to rightmost
path segments of the old name will be pruned way until either the
whole path is consumed or a nonempty directory is found.
Note: this function can fail with the new directory structure made
if you lack permissions needed to unlink the leaf directory or
file.
"""
head, tail = path.split(new)
if head and tail and not path.exists(head):
makedirs(head)
rename(old, new)
head, tail = path.split(old)
if head and tail:
try:
removedirs(head)
except error:
pass
__all__.extend(["makedirs", "removedirs", "renames"])
def walk(top, topdown=True, onerror=None, followlinks=False):
"""Directory tree generator.
For each directory in the directory tree rooted at top (including top
itself, but excluding '.' and '..'), yields a 3-tuple
dirpath, dirnames, filenames
dirpath is a string, the path to the directory. dirnames is a list of
the names of the subdirectories in dirpath (excluding '.' and '..').
filenames is a list of the names of the non-directory files in dirpath.
Note that the names in the lists are just names, with no path components.
To get a full path (which begins with top) to a file or directory in
dirpath, do os.path.join(dirpath, name).
If optional arg 'topdown' is true or not specified, the triple for a
directory is generated before the triples for any of its subdirectories
(directories are generated top down). If topdown is false, the triple
for a directory is generated after the triples for all of its
subdirectories (directories are generated bottom up).
When topdown is true, the caller can modify the dirnames list in-place
(e.g., via del or slice assignment), and walk will only recurse into the
subdirectories whose names remain in dirnames; this can be used to prune
the search, or to impose a specific order of visiting. Modifying
dirnames when topdown is false is ineffective, since the directories in
dirnames have already been generated by the time dirnames itself is
generated.
By default errors from the os.listdir() call are ignored. If
optional arg 'onerror' is specified, it should be a function; it
will be called with one argument, an os.error instance. It can
report the error to continue with the walk, or raise the exception
to abort the walk. Note that the filename is available as the
filename attribute of the exception object.
By default, os.walk does not follow symbolic links to subdirectories on
systems that support them. In order to get this functionality, set the
optional argument 'followlinks' to true.
Caution: if you pass a relative pathname for top, don't change the
current working directory between resumptions of walk. walk never
changes the current directory, and assumes that the client doesn't
either.
Example:
import os
from os.path import join, getsize
for root, dirs, files in os.walk('python/Lib/email'):
print root, "consumes",
print sum([getsize(join(root, name)) for name in files]),
print "bytes in", len(files), "non-directory files"
if 'CVS' in dirs:
dirs.remove('CVS') # don't visit CVS directories
"""
from os.path import join, isdir, islink
# We may not have read permission for top, in which case we can't
# get a list of the files the directory contains. os.path.walk
# always suppressed the exception then, rather than blow up for a
# minor reason when (say) a thousand readable directories are still
# left to visit. That logic is copied here.
try:
# Note that listdir and error are globals in this module due
# to earlier import-*.
names = listdir(top)
except error, err:
if onerror is not None:
onerror(err)
return
dirs, nondirs = [], []
for name in names:
if isdir(join(top, name)):
dirs.append(name)
else:
nondirs.append(name)
if topdown:
yield top, dirs, nondirs
for name in dirs:
path = join(top, name)
if followlinks or not islink(path):
for x in walk(path, topdown, onerror, followlinks):
yield x
if not topdown:
yield top, dirs, nondirs
__all__.append("walk")
# Make sure os.environ exists, at least
try:
environ
except NameError:
environ = {}
def _exists(name):
# CPython eval's the name, whereas looking in __all__ works for
# Jython and is much faster
return name in __all__
if _exists('execv'):
def execl(file, *args):
"""execl(file, *args)
Execute the executable file with argument list args, replacing the
current process. """
execv(file, args)
def execle(file, *args):
"""execle(file, *args, env)
Execute the executable file with argument list args and
environment env, replacing the current process. """
env = args[-1]
execve(file, args[:-1], env)
def execlp(file, *args):
"""execlp(file, *args)
Execute the executable file (which is searched for along $PATH)
with argument list args, replacing the current process. """
execvp(file, args)
def execlpe(file, *args):
"""execlpe(file, *args, env)
Execute the executable file (which is searched for along $PATH)
with argument list args and environment env, replacing the current
process. """
env = args[-1]
execvpe(file, args[:-1], env)
def execvp(file, args):
"""execp(file, args)
Execute the executable file (which is searched for along $PATH)
with argument list args, replacing the current process.
args may be a list or tuple of strings. """
_execvpe(file, args)
def execvpe(file, args, env):
"""execvpe(file, args, env)
Execute the executable file (which is searched for along $PATH)
with argument list args and environment env , replacing the
current process.
args may be a list or tuple of strings. """
_execvpe(file, args, env)
__all__.extend(["execl","execle","execlp","execlpe","execvp","execvpe"])
def _execvpe(file, args, env=None):
if env is not None:
func = execve
argrest = (args, env)
else:
func = execv
argrest = (args,)
env = environ
head, tail = path.split(file)
if head:
func(file, *argrest)
return
if 'PATH' in env:
envpath = env['PATH']
else:
envpath = defpath
PATH = envpath.split(pathsep)
saved_exc = None
saved_tb = None
for dir in PATH:
fullname = path.join(dir, file)
try:
func(fullname, *argrest)
except error, e:
tb = sys.exc_info()[2]
if (e.errno != errno.ENOENT and e.errno != errno.ENOTDIR
and saved_exc is None):
saved_exc = e
saved_tb = tb
if saved_exc:
raise error, saved_exc, saved_tb
raise error, e, tb
# Change environ to automatically call putenv() if it exists
try:
# This will fail if there's no putenv
putenv
except NameError:
pass
else:
# Fake unsetenv() for Windows
# not sure about os2 here but
# I'm guessing they are the same.
if name in ('os2', 'nt'):
def unsetenv(key):
putenv(key, "")
if _name == "riscos":
# On RISC OS, all env access goes through getenv and putenv
from riscosenviron import _Environ
elif _name in ('os2', 'nt'): # Where Env Var Names Must Be UPPERCASE
import UserDict
# But we store them as upper case
class _Environ(UserDict.IterableUserDict):
def __init__(self, environ):
UserDict.UserDict.__init__(self)
data = self.data
for k, v in environ.items():
data[k.upper()] = v
def __setitem__(self, key, item):
self.data[key.upper()] = item
def __getitem__(self, key):
return self.data[key.upper()]
def __delitem__(self, key):
del self.data[key.upper()]
def has_key(self, key):
return key.upper() in self.data
def __contains__(self, key):
return key.upper() in self.data
def get(self, key, failobj=None):
return self.data.get(key.upper(), failobj)
def update(self, dict=None, **kwargs):
if dict:
try:
keys = dict.keys()
except AttributeError:
# List of (key, value)
for k, v in dict:
self[k] = v
else:
# got keys
# cannot use items(), since mappings
# may not have them.
for k in keys:
self[k] = dict[k]
if kwargs:
self.update(kwargs)
def copy(self):
return dict(self)
environ = _Environ(environ)
def getenv(key, default=None):
"""Get an environment variable, return None if it doesn't exist.
The optional second argument can specify an alternate default."""
return environ.get(key, default)
__all__.append("getenv")
# Supply spawn*() (probably only for Unix)
if _exists("fork") and not _exists("spawnv") and _exists("execv"):
P_WAIT = 0
P_NOWAIT = P_NOWAITO = 1
# XXX Should we support P_DETACH? I suppose it could fork()**2
# and close the std I/O streams. Also, P_OVERLAY is the same
# as execv*()?
def _spawnvef(mode, file, args, env, func):
# Internal helper; func is the exec*() function to use
pid = fork()
if not pid:
# Child
try:
if env is None:
func(file, args)
else:
func(file, args, env)
except:
_exit(127)
else:
# Parent
if mode == P_NOWAIT:
return pid # Caller is responsible for waiting!
while 1:
wpid, sts = waitpid(pid, 0)
if WIFSTOPPED(sts):
continue
elif WIFSIGNALED(sts):
return -WTERMSIG(sts)
elif WIFEXITED(sts):
return WEXITSTATUS(sts)
else:
raise error, "Not stopped, signaled or exited???"
def spawnv(mode, file, args):
"""spawnv(mode, file, args) -> integer
Execute file with arguments from args in a subprocess.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return _spawnvef(mode, file, args, None, execv)
def spawnve(mode, file, args, env):
"""spawnve(mode, file, args, env) -> integer
Execute file with arguments from args in a subprocess with the
specified environment.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return _spawnvef(mode, file, args, env, execve)
# Note: spawnvp[e] is't currently supported on Windows
def spawnvp(mode, file, args):
"""spawnvp(mode, file, args) -> integer
Execute file (which is looked for along $PATH) with arguments from
args in a subprocess.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return _spawnvef(mode, file, args, None, execvp)
def spawnvpe(mode, file, args, env):
"""spawnvpe(mode, file, args, env) -> integer
Execute file (which is looked for along $PATH) with arguments from
args in a subprocess with the supplied environment.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return _spawnvef(mode, file, args, env, execvpe)
if _exists("spawnv"):
# These aren't supplied by the basic Windows code
# but can be easily implemented in Python
def spawnl(mode, file, *args):
"""spawnl(mode, file, *args) -> integer
Execute file with arguments from args in a subprocess.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return spawnv(mode, file, args)
def spawnle(mode, file, *args):
"""spawnle(mode, file, *args, env) -> integer
Execute file with arguments from args in a subprocess with the
supplied environment.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
env = args[-1]
return spawnve(mode, file, args[:-1], env)
__all__.extend(["spawnv", "spawnve", "spawnl", "spawnle",])
if _exists("spawnvp"):
# At the moment, Windows doesn't implement spawnvp[e],
# so it won't have spawnlp[e] either.
def spawnlp(mode, file, *args):
"""spawnlp(mode, file, *args) -> integer
Execute file (which is looked for along $PATH) with arguments from
args in a subprocess with the supplied environment.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return spawnvp(mode, file, args)
def spawnlpe(mode, file, *args):
"""spawnlpe(mode, file, *args, env) -> integer
Execute file (which is looked for along $PATH) with arguments from
args in a subprocess with the supplied environment.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
env = args[-1]
return spawnvpe(mode, file, args[:-1], env)
__all__.extend(["spawnvp", "spawnvpe", "spawnlp", "spawnlpe",])
# Supply popen2 etc. (for Unix)
if sys.platform.startswith('java') or _exists("fork"):
if not _exists("popen2"):
def popen2(cmd, mode="t", bufsize=-1):
"""Execute the shell command 'cmd' in a sub-process. On UNIX, 'cmd'
may be a sequence, in which case arguments will be passed directly to
the program without shell intervention (as with os.spawnv()). If 'cmd'
is a string it will be passed to the shell (as with os.system()). If
'bufsize' is specified, it sets the buffer size for the I/O pipes. The
file objects (child_stdin, child_stdout) are returned."""
import subprocess
PIPE = subprocess.PIPE
p = subprocess.Popen(cmd, shell=isinstance(cmd, basestring),
bufsize=bufsize, stdin=PIPE, stdout=PIPE,
close_fds=True)
return p.stdin, p.stdout
__all__.append("popen2")
if not _exists("popen3"):
def popen3(cmd, mode="t", bufsize=-1):
"""Execute the shell command 'cmd' in a sub-process. On UNIX, 'cmd'
may be a sequence, in which case arguments will be passed directly to
the program without shell intervention (as with os.spawnv()). If 'cmd'
is a string it will be passed to the shell (as with os.system()). If
'bufsize' is specified, it sets the buffer size for the I/O pipes. The
file objects (child_stdin, child_stdout, child_stderr) are returned."""
import subprocess
PIPE = subprocess.PIPE
p = subprocess.Popen(cmd, shell=isinstance(cmd, basestring),
bufsize=bufsize, stdin=PIPE, stdout=PIPE,
stderr=PIPE, close_fds=True)
return p.stdin, p.stdout, p.stderr
__all__.append("popen3")
if not _exists("popen4"):
def popen4(cmd, mode="t", bufsize=-1):
"""Execute the shell command 'cmd' in a sub-process. On UNIX, 'cmd'
may be a sequence, in which case arguments will be passed directly to
the program without shell intervention (as with os.spawnv()). If 'cmd'
is a string it will be passed to the shell (as with os.system()). If
'bufsize' is specified, it sets the buffer size for the I/O pipes. The
file objects (child_stdin, child_stdout_stderr) are returned."""
import subprocess
PIPE = subprocess.PIPE
p = subprocess.Popen(cmd, shell=isinstance(cmd, basestring),
bufsize=bufsize, stdin=PIPE, stdout=PIPE,
stderr=subprocess.STDOUT, close_fds=True)
return p.stdin, p.stdout
__all__.append("popen4")
if not _exists("urandom"):
def urandom(n):
"""urandom(n) -> str
Return a string of n random bytes suitable for cryptographic use.
"""
try:
_urandomfd = open("/dev/urandom", O_RDONLY)
except (OSError, IOError):
raise NotImplementedError("/dev/urandom (or equivalent) not found")
bytes = ""
while len(bytes) < n:
bytes += read(_urandomfd, n - len(bytes))
close(_urandomfd)
return bytes
# Supply os.popen()
def popen(cmd, mode='r', bufsize=-1):
"""popen(command [, mode='r' [, bufsize]]) -> pipe
Open a pipe to/from a command returning a file object.
"""
if not isinstance(cmd, (str, unicode)):
raise TypeError('invalid cmd type (%s, expected string)' % type(cmd))
if mode not in ('r', 'w'):
raise ValueError("invalid mode %r" % mode)
import subprocess
if mode == 'r':
proc = subprocess.Popen(cmd, bufsize=bufsize, shell=True,
stdout=subprocess.PIPE)
fp = proc.stdout
elif mode == 'w':
proc = subprocess.Popen(cmd, bufsize=bufsize, shell=True,
stdin=subprocess.PIPE)
fp = proc.stdin
# files from subprocess are in binary mode but popen needs text mode
fp = fdopen(fp.fileno(), mode, bufsize)
return _wrap_close(fp, proc)
# Helper for popen() -- a proxy for a file whose close waits for the process
class _wrap_close(object):
def __init__(self, stream, proc):
self._stream = stream
self._proc = proc
def close(self):
self._stream.close()
returncode = self._proc.wait()
if returncode == 0:
return None
if _name == 'nt':
return returncode
else:
return returncode
def __getattr__(self, name):
return getattr(self._stream, name)
def __iter__(self):
return iter(self._stream)
| apache-2.0 |
salamer/django | tests/forms_tests/tests/test_widgets.py | 115 | 105521 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import copy
import datetime
from django.contrib.admin.tests import AdminSeleniumWebDriverTestCase
from django.core.files.uploadedfile import SimpleUploadedFile
from django.core.urlresolvers import reverse
from django.forms import (
BooleanField, CharField, CheckboxInput, CheckboxSelectMultiple,
ChoiceField, ClearableFileInput, DateField, DateInput, DateTimeInput,
FileInput, Form, HiddenInput, MultipleChoiceField, MultipleHiddenInput,
MultiValueField, MultiWidget, NullBooleanSelect, PasswordInput,
RadioSelect, Select, SelectDateWidget, SelectMultiple, SplitDateTimeField,
SplitDateTimeWidget, Textarea, TextInput, TimeInput, ValidationError,
)
from django.forms.widgets import (
ChoiceFieldRenderer, ChoiceInput, RadioFieldRenderer,
)
from django.test import SimpleTestCase, override_settings
from django.utils import six, translation
from django.utils.dates import MONTHS_AP
from django.utils.encoding import force_text, python_2_unicode_compatible
from django.utils.safestring import SafeData, mark_safe
from ..models import Article
class FormsWidgetTests(SimpleTestCase):
# Each Widget class corresponds to an HTML form widget. A Widget knows how to
# render itself, given a field name and some data. Widgets don't perform
# validation.
def test_textinput(self):
w = TextInput()
self.assertHTMLEqual(w.render('email', ''), '<input type="text" name="email" />')
self.assertHTMLEqual(w.render('email', None), '<input type="text" name="email" />')
self.assertHTMLEqual(w.render('email', 'test@example.com'), '<input type="text" name="email" value="test@example.com" />')
self.assertHTMLEqual(w.render('email', 'some "quoted" & ampersanded value'), '<input type="text" name="email" value="some "quoted" & ampersanded value" />')
self.assertHTMLEqual(w.render('email', 'test@example.com', attrs={'class': 'fun'}), '<input type="text" name="email" value="test@example.com" class="fun" />')
self.assertHTMLEqual(w.render('email', 'ŠĐĆŽćžšđ', attrs={'class': 'fun'}), '<input type="text" name="email" value="\u0160\u0110\u0106\u017d\u0107\u017e\u0161\u0111" class="fun" />')
# You can also pass 'attrs' to the constructor:
w = TextInput(attrs={'class': 'fun', 'type': 'email'})
self.assertHTMLEqual(w.render('email', ''), '<input type="email" class="fun" name="email" />')
self.assertHTMLEqual(w.render('email', 'foo@example.com'), '<input type="email" class="fun" value="foo@example.com" name="email" />')
# 'attrs' passed to render() get precedence over those passed to the constructor:
w = TextInput(attrs={'class': 'pretty'})
self.assertHTMLEqual(w.render('email', '', attrs={'class': 'special'}), '<input type="text" class="special" name="email" />')
# 'attrs' can be safe-strings if needed)
w = TextInput(attrs={'onBlur': mark_safe("function('foo')")})
self.assertHTMLEqual(w.render('email', ''), '<input onBlur="function(\'foo\')" type="text" name="email" />')
def test_passwordinput(self):
w = PasswordInput()
self.assertHTMLEqual(w.render('email', ''), '<input type="password" name="email" />')
self.assertHTMLEqual(w.render('email', None), '<input type="password" name="email" />')
self.assertHTMLEqual(w.render('email', 'secret'), '<input type="password" name="email" />')
# The render_value argument lets you specify whether the widget should render
# its value. For security reasons, this is off by default.
w = PasswordInput(render_value=True)
self.assertHTMLEqual(w.render('email', ''), '<input type="password" name="email" />')
self.assertHTMLEqual(w.render('email', None), '<input type="password" name="email" />')
self.assertHTMLEqual(w.render('email', 'test@example.com'), '<input type="password" name="email" value="test@example.com" />')
self.assertHTMLEqual(w.render('email', 'some "quoted" & ampersanded value'), '<input type="password" name="email" value="some "quoted" & ampersanded value" />')
self.assertHTMLEqual(w.render('email', 'test@example.com', attrs={'class': 'fun'}), '<input type="password" name="email" value="test@example.com" class="fun" />')
# You can also pass 'attrs' to the constructor:
w = PasswordInput(attrs={'class': 'fun'}, render_value=True)
self.assertHTMLEqual(w.render('email', ''), '<input type="password" class="fun" name="email" />')
self.assertHTMLEqual(w.render('email', 'foo@example.com'), '<input type="password" class="fun" value="foo@example.com" name="email" />')
# 'attrs' passed to render() get precedence over those passed to the constructor:
w = PasswordInput(attrs={'class': 'pretty'}, render_value=True)
self.assertHTMLEqual(w.render('email', '', attrs={'class': 'special'}), '<input type="password" class="special" name="email" />')
self.assertHTMLEqual(w.render('email', 'ŠĐĆŽćžšđ', attrs={'class': 'fun'}), '<input type="password" class="fun" value="\u0160\u0110\u0106\u017d\u0107\u017e\u0161\u0111" name="email" />')
def test_hiddeninput(self):
w = HiddenInput()
self.assertHTMLEqual(w.render('email', ''), '<input type="hidden" name="email" />')
self.assertHTMLEqual(w.render('email', None), '<input type="hidden" name="email" />')
self.assertHTMLEqual(w.render('email', 'test@example.com'), '<input type="hidden" name="email" value="test@example.com" />')
self.assertHTMLEqual(w.render('email', 'some "quoted" & ampersanded value'), '<input type="hidden" name="email" value="some "quoted" & ampersanded value" />')
self.assertHTMLEqual(w.render('email', 'test@example.com', attrs={'class': 'fun'}), '<input type="hidden" name="email" value="test@example.com" class="fun" />')
# You can also pass 'attrs' to the constructor:
w = HiddenInput(attrs={'class': 'fun'})
self.assertHTMLEqual(w.render('email', ''), '<input type="hidden" class="fun" name="email" />')
self.assertHTMLEqual(w.render('email', 'foo@example.com'), '<input type="hidden" class="fun" value="foo@example.com" name="email" />')
# 'attrs' passed to render() get precedence over those passed to the constructor:
w = HiddenInput(attrs={'class': 'pretty'})
self.assertHTMLEqual(w.render('email', '', attrs={'class': 'special'}), '<input type="hidden" class="special" name="email" />')
self.assertHTMLEqual(w.render('email', 'ŠĐĆŽćžšđ', attrs={'class': 'fun'}), '<input type="hidden" class="fun" value="\u0160\u0110\u0106\u017d\u0107\u017e\u0161\u0111" name="email" />')
# 'attrs' passed to render() get precedence over those passed to the constructor:
w = HiddenInput(attrs={'class': 'pretty'})
self.assertHTMLEqual(w.render('email', '', attrs={'class': 'special'}), '<input type="hidden" class="special" name="email" />')
# Boolean values are rendered to their string forms ("True" and "False").
w = HiddenInput()
self.assertHTMLEqual(w.render('get_spam', False), '<input type="hidden" name="get_spam" value="False" />')
self.assertHTMLEqual(w.render('get_spam', True), '<input type="hidden" name="get_spam" value="True" />')
def test_multiplehiddeninput(self):
w = MultipleHiddenInput()
self.assertHTMLEqual(w.render('email', []), '')
self.assertHTMLEqual(w.render('email', None), '')
self.assertHTMLEqual(w.render('email', ['test@example.com']), '<input type="hidden" name="email" value="test@example.com" />')
self.assertHTMLEqual(w.render('email', ['some "quoted" & ampersanded value']), '<input type="hidden" name="email" value="some "quoted" & ampersanded value" />')
self.assertHTMLEqual(w.render('email', ['test@example.com', 'foo@example.com']), '<input type="hidden" name="email" value="test@example.com" />\n<input type="hidden" name="email" value="foo@example.com" />')
self.assertHTMLEqual(w.render('email', ['test@example.com'], attrs={'class': 'fun'}), '<input type="hidden" name="email" value="test@example.com" class="fun" />')
self.assertHTMLEqual(w.render('email', ['test@example.com', 'foo@example.com'], attrs={'class': 'fun'}), '<input type="hidden" name="email" value="test@example.com" class="fun" />\n<input type="hidden" name="email" value="foo@example.com" class="fun" />')
# You can also pass 'attrs' to the constructor:
w = MultipleHiddenInput(attrs={'class': 'fun'})
self.assertHTMLEqual(w.render('email', []), '')
self.assertHTMLEqual(w.render('email', ['foo@example.com']), '<input type="hidden" class="fun" value="foo@example.com" name="email" />')
self.assertHTMLEqual(w.render('email', ['foo@example.com', 'test@example.com']), '<input type="hidden" class="fun" value="foo@example.com" name="email" />\n<input type="hidden" class="fun" value="test@example.com" name="email" />')
# 'attrs' passed to render() get precedence over those passed to the constructor:
w = MultipleHiddenInput(attrs={'class': 'pretty'})
self.assertHTMLEqual(w.render('email', ['foo@example.com'], attrs={'class': 'special'}), '<input type="hidden" class="special" value="foo@example.com" name="email" />')
self.assertHTMLEqual(w.render('email', ['ŠĐĆŽćžšđ'], attrs={'class': 'fun'}), '<input type="hidden" class="fun" value="\u0160\u0110\u0106\u017d\u0107\u017e\u0161\u0111" name="email" />')
# 'attrs' passed to render() get precedence over those passed to the constructor:
w = MultipleHiddenInput(attrs={'class': 'pretty'})
self.assertHTMLEqual(w.render('email', ['foo@example.com'], attrs={'class': 'special'}), '<input type="hidden" class="special" value="foo@example.com" name="email" />')
# Each input gets a separate ID.
w = MultipleHiddenInput()
self.assertHTMLEqual(w.render('letters', list('abc'), attrs={'id': 'hideme'}), '<input type="hidden" name="letters" value="a" id="hideme_0" />\n<input type="hidden" name="letters" value="b" id="hideme_1" />\n<input type="hidden" name="letters" value="c" id="hideme_2" />')
def test_fileinput(self):
# FileInput widgets don't ever show the value, because the old value is of no use
# if you are updating the form or if the provided file generated an error.
w = FileInput()
self.assertHTMLEqual(w.render('email', ''), '<input type="file" name="email" />')
self.assertHTMLEqual(w.render('email', None), '<input type="file" name="email" />')
self.assertHTMLEqual(w.render('email', 'test@example.com'), '<input type="file" name="email" />')
self.assertHTMLEqual(w.render('email', 'some "quoted" & ampersanded value'), '<input type="file" name="email" />')
self.assertHTMLEqual(w.render('email', 'test@example.com', attrs={'class': 'fun'}), '<input type="file" name="email" class="fun" />')
# You can also pass 'attrs' to the constructor:
w = FileInput(attrs={'class': 'fun'})
self.assertHTMLEqual(w.render('email', ''), '<input type="file" class="fun" name="email" />')
self.assertHTMLEqual(w.render('email', 'foo@example.com'), '<input type="file" class="fun" name="email" />')
self.assertHTMLEqual(w.render('email', 'ŠĐĆŽćžšđ', attrs={'class': 'fun'}), '<input type="file" class="fun" name="email" />')
def test_textarea(self):
w = Textarea()
self.assertHTMLEqual(w.render('msg', ''), '<textarea rows="10" cols="40" name="msg"></textarea>')
self.assertHTMLEqual(w.render('msg', None), '<textarea rows="10" cols="40" name="msg"></textarea>')
self.assertHTMLEqual(w.render('msg', 'value'), '<textarea rows="10" cols="40" name="msg">value</textarea>')
self.assertHTMLEqual(w.render('msg', 'some "quoted" & ampersanded value'), '<textarea rows="10" cols="40" name="msg">some "quoted" & ampersanded value</textarea>')
self.assertHTMLEqual(w.render('msg', mark_safe('pre "quoted" value')), '<textarea rows="10" cols="40" name="msg">pre "quoted" value</textarea>')
self.assertHTMLEqual(w.render('msg', 'value', attrs={'class': 'pretty', 'rows': 20}), '<textarea class="pretty" rows="20" cols="40" name="msg">value</textarea>')
# You can also pass 'attrs' to the constructor:
w = Textarea(attrs={'class': 'pretty'})
self.assertHTMLEqual(w.render('msg', ''), '<textarea rows="10" cols="40" name="msg" class="pretty"></textarea>')
self.assertHTMLEqual(w.render('msg', 'example'), '<textarea rows="10" cols="40" name="msg" class="pretty">example</textarea>')
# 'attrs' passed to render() get precedence over those passed to the constructor:
w = Textarea(attrs={'class': 'pretty'})
self.assertHTMLEqual(w.render('msg', '', attrs={'class': 'special'}), '<textarea rows="10" cols="40" name="msg" class="special"></textarea>')
self.assertHTMLEqual(w.render('msg', 'ŠĐĆŽćžšđ', attrs={'class': 'fun'}), '<textarea rows="10" cols="40" name="msg" class="fun">\u0160\u0110\u0106\u017d\u0107\u017e\u0161\u0111</textarea>')
def test_checkboxinput(self):
w = CheckboxInput()
self.assertHTMLEqual(w.render('is_cool', ''), '<input type="checkbox" name="is_cool" />')
self.assertHTMLEqual(w.render('is_cool', None), '<input type="checkbox" name="is_cool" />')
self.assertHTMLEqual(w.render('is_cool', False), '<input type="checkbox" name="is_cool" />')
self.assertHTMLEqual(w.render('is_cool', True), '<input checked="checked" type="checkbox" name="is_cool" />')
# Using any value that's not in ('', None, False, True) will check the checkbox
# and set the 'value' attribute.
self.assertHTMLEqual(w.render('is_cool', 'foo'), '<input checked="checked" type="checkbox" name="is_cool" value="foo" />')
self.assertHTMLEqual(w.render('is_cool', False, attrs={'class': 'pretty'}), '<input type="checkbox" name="is_cool" class="pretty" />')
# regression for #17114
self.assertHTMLEqual(w.render('is_cool', 0), '<input checked="checked" type="checkbox" name="is_cool" value="0" />')
self.assertHTMLEqual(w.render('is_cool', 1), '<input checked="checked" type="checkbox" name="is_cool" value="1" />')
# You can also pass 'attrs' to the constructor:
w = CheckboxInput(attrs={'class': 'pretty'})
self.assertHTMLEqual(w.render('is_cool', ''), '<input type="checkbox" class="pretty" name="is_cool" />')
# 'attrs' passed to render() get precedence over those passed to the constructor:
w = CheckboxInput(attrs={'class': 'pretty'})
self.assertHTMLEqual(w.render('is_cool', '', attrs={'class': 'special'}), '<input type="checkbox" class="special" name="is_cool" />')
# You can pass 'check_test' to the constructor. This is a callable that takes the
# value and returns True if the box should be checked.
w = CheckboxInput(check_test=lambda value: value.startswith('hello'))
self.assertHTMLEqual(w.render('greeting', ''), '<input type="checkbox" name="greeting" />')
self.assertHTMLEqual(w.render('greeting', 'hello'), '<input checked="checked" type="checkbox" name="greeting" value="hello" />')
self.assertHTMLEqual(w.render('greeting', 'hello there'), '<input checked="checked" type="checkbox" name="greeting" value="hello there" />')
self.assertHTMLEqual(w.render('greeting', 'hello & goodbye'), '<input checked="checked" type="checkbox" name="greeting" value="hello & goodbye" />')
# Ticket #17888: calling check_test shouldn't swallow exceptions
with self.assertRaises(AttributeError):
w.render('greeting', True)
# The CheckboxInput widget will return False if the key is not found in the data
# dictionary (because HTML form submission doesn't send any result for unchecked
# checkboxes).
self.assertFalse(w.value_from_datadict({}, {}, 'testing'))
value = w.value_from_datadict({'testing': '0'}, {}, 'testing')
self.assertIsInstance(value, bool)
self.assertTrue(value)
def test_select(self):
w = Select()
self.assertHTMLEqual(w.render('beatle', 'J', choices=(('J', 'John'), ('P', 'Paul'), ('G', 'George'), ('R', 'Ringo'))), """<select name="beatle">
<option value="J" selected="selected">John</option>
<option value="P">Paul</option>
<option value="G">George</option>
<option value="R">Ringo</option>
</select>""")
# If the value is None, none of the options are selected:
self.assertHTMLEqual(w.render('beatle', None, choices=(('J', 'John'), ('P', 'Paul'), ('G', 'George'), ('R', 'Ringo'))), """<select name="beatle">
<option value="J">John</option>
<option value="P">Paul</option>
<option value="G">George</option>
<option value="R">Ringo</option>
</select>""")
# If the value corresponds to a label (but not to an option value), none of the options are selected:
self.assertHTMLEqual(w.render('beatle', 'John', choices=(('J', 'John'), ('P', 'Paul'), ('G', 'George'), ('R', 'Ringo'))), """<select name="beatle">
<option value="J">John</option>
<option value="P">Paul</option>
<option value="G">George</option>
<option value="R">Ringo</option>
</select>""")
# Only one option can be selected, see #8103:
self.assertHTMLEqual(w.render('choices', '0', choices=(('0', '0'), ('1', '1'), ('2', '2'), ('3', '3'), ('0', 'extra'))), """<select name="choices">
<option value="0" selected="selected">0</option>
<option value="1">1</option>
<option value="2">2</option>
<option value="3">3</option>
<option value="0">extra</option>
</select>""")
# The value is compared to its str():
self.assertHTMLEqual(w.render('num', 2, choices=[('1', '1'), ('2', '2'), ('3', '3')]), """<select name="num">
<option value="1">1</option>
<option value="2" selected="selected">2</option>
<option value="3">3</option>
</select>""")
self.assertHTMLEqual(w.render('num', '2', choices=[(1, 1), (2, 2), (3, 3)]), """<select name="num">
<option value="1">1</option>
<option value="2" selected="selected">2</option>
<option value="3">3</option>
</select>""")
self.assertHTMLEqual(w.render('num', 2, choices=[(1, 1), (2, 2), (3, 3)]), """<select name="num">
<option value="1">1</option>
<option value="2" selected="selected">2</option>
<option value="3">3</option>
</select>""")
# The 'choices' argument can be any iterable:
from itertools import chain
def get_choices():
for i in range(5):
yield (i, i)
self.assertHTMLEqual(w.render('num', 2, choices=get_choices()), """<select name="num">
<option value="0">0</option>
<option value="1">1</option>
<option value="2" selected="selected">2</option>
<option value="3">3</option>
<option value="4">4</option>
</select>""")
things = ({'id': 1, 'name': 'And Boom'}, {'id': 2, 'name': 'One More Thing!'})
class SomeForm(Form):
somechoice = ChoiceField(choices=chain((('', '-' * 9),), [(thing['id'], thing['name']) for thing in things]))
f = SomeForm()
self.assertHTMLEqual(f.as_table(), '<tr><th><label for="id_somechoice">Somechoice:</label></th><td><select name="somechoice" id="id_somechoice">\n<option value="" selected="selected">---------</option>\n<option value="1">And Boom</option>\n<option value="2">One More Thing!</option>\n</select></td></tr>')
self.assertHTMLEqual(f.as_table(), '<tr><th><label for="id_somechoice">Somechoice:</label></th><td><select name="somechoice" id="id_somechoice">\n<option value="" selected="selected">---------</option>\n<option value="1">And Boom</option>\n<option value="2">One More Thing!</option>\n</select></td></tr>')
f = SomeForm({'somechoice': 2})
self.assertHTMLEqual(f.as_table(), '<tr><th><label for="id_somechoice">Somechoice:</label></th><td><select name="somechoice" id="id_somechoice">\n<option value="">---------</option>\n<option value="1">And Boom</option>\n<option value="2" selected="selected">One More Thing!</option>\n</select></td></tr>')
# You can also pass 'choices' to the constructor:
w = Select(choices=[(1, 1), (2, 2), (3, 3)])
self.assertHTMLEqual(w.render('num', 2), """<select name="num">
<option value="1">1</option>
<option value="2" selected="selected">2</option>
<option value="3">3</option>
</select>""")
# If 'choices' is passed to both the constructor and render(), then they'll both be in the output:
self.assertHTMLEqual(w.render('num', 2, choices=[(4, 4), (5, 5)]), """<select name="num">
<option value="1">1</option>
<option value="2" selected="selected">2</option>
<option value="3">3</option>
<option value="4">4</option>
<option value="5">5</option>
</select>""")
# Choices are escaped correctly
self.assertHTMLEqual(w.render('escape', None, choices=(('bad', 'you & me'), ('good', mark_safe('you > me')))), """<select name="escape">
<option value="1">1</option>
<option value="2">2</option>
<option value="3">3</option>
<option value="bad">you & me</option>
<option value="good">you > me</option>
</select>""")
# Unicode choices are correctly rendered as HTML
self.assertHTMLEqual(w.render('email', 'ŠĐĆŽćžšđ', choices=[('ŠĐĆŽćžšđ', 'ŠĐabcĆŽćžšđ'), ('ćžšđ', 'abcćžšđ')]), '<select name="email">\n<option value="1">1</option>\n<option value="2">2</option>\n<option value="3">3</option>\n<option value="\u0160\u0110\u0106\u017d\u0107\u017e\u0161\u0111" selected="selected">\u0160\u0110abc\u0106\u017d\u0107\u017e\u0161\u0111</option>\n<option value="\u0107\u017e\u0161\u0111">abc\u0107\u017e\u0161\u0111</option>\n</select>')
# If choices is passed to the constructor and is a generator, it can be iterated
# over multiple times without getting consumed:
w = Select(choices=get_choices())
self.assertHTMLEqual(w.render('num', 2), """<select name="num">
<option value="0">0</option>
<option value="1">1</option>
<option value="2" selected="selected">2</option>
<option value="3">3</option>
<option value="4">4</option>
</select>""")
self.assertHTMLEqual(w.render('num', 3), """<select name="num">
<option value="0">0</option>
<option value="1">1</option>
<option value="2">2</option>
<option value="3" selected="selected">3</option>
<option value="4">4</option>
</select>""")
# Choices can be nested one level in order to create HTML optgroups:
w.choices = (
('outer1', 'Outer 1'),
('Group "1"', (('inner1', 'Inner 1'), ('inner2', 'Inner 2'))),
)
self.assertHTMLEqual(w.render('nestchoice', None), """<select name="nestchoice">
<option value="outer1">Outer 1</option>
<optgroup label="Group "1"">
<option value="inner1">Inner 1</option>
<option value="inner2">Inner 2</option>
</optgroup>
</select>""")
self.assertHTMLEqual(w.render('nestchoice', 'outer1'), """<select name="nestchoice">
<option value="outer1" selected="selected">Outer 1</option>
<optgroup label="Group "1"">
<option value="inner1">Inner 1</option>
<option value="inner2">Inner 2</option>
</optgroup>
</select>""")
self.assertHTMLEqual(w.render('nestchoice', 'inner1'), """<select name="nestchoice">
<option value="outer1">Outer 1</option>
<optgroup label="Group "1"">
<option value="inner1" selected="selected">Inner 1</option>
<option value="inner2">Inner 2</option>
</optgroup>
</select>""")
def test_nullbooleanselect(self):
w = NullBooleanSelect()
self.assertTrue(w.render('is_cool', True), """<select name="is_cool">
<option value="1">Unknown</option>
<option value="2" selected="selected">Yes</option>
<option value="3">No</option>
</select>""")
self.assertHTMLEqual(w.render('is_cool', False), """<select name="is_cool">
<option value="1">Unknown</option>
<option value="2">Yes</option>
<option value="3" selected="selected">No</option>
</select>""")
self.assertHTMLEqual(w.render('is_cool', None), """<select name="is_cool">
<option value="1" selected="selected">Unknown</option>
<option value="2">Yes</option>
<option value="3">No</option>
</select>""")
self.assertHTMLEqual(w.render('is_cool', '2'), """<select name="is_cool">
<option value="1">Unknown</option>
<option value="2" selected="selected">Yes</option>
<option value="3">No</option>
</select>""")
self.assertHTMLEqual(w.render('is_cool', '3'), """<select name="is_cool">
<option value="1">Unknown</option>
<option value="2">Yes</option>
<option value="3" selected="selected">No</option>
</select>""")
def test_selectmultiple(self):
w = SelectMultiple()
self.assertHTMLEqual(w.render('beatles', ['J'], choices=(('J', 'John'), ('P', 'Paul'), ('G', 'George'), ('R', 'Ringo'))), """<select multiple="multiple" name="beatles">
<option value="J" selected="selected">John</option>
<option value="P">Paul</option>
<option value="G">George</option>
<option value="R">Ringo</option>
</select>""")
self.assertHTMLEqual(w.render('beatles', ['J', 'P'], choices=(('J', 'John'), ('P', 'Paul'), ('G', 'George'), ('R', 'Ringo'))), """<select multiple="multiple" name="beatles">
<option value="J" selected="selected">John</option>
<option value="P" selected="selected">Paul</option>
<option value="G">George</option>
<option value="R">Ringo</option>
</select>""")
self.assertHTMLEqual(w.render('beatles', ['J', 'P', 'R'], choices=(('J', 'John'), ('P', 'Paul'), ('G', 'George'), ('R', 'Ringo'))), """<select multiple="multiple" name="beatles">
<option value="J" selected="selected">John</option>
<option value="P" selected="selected">Paul</option>
<option value="G">George</option>
<option value="R" selected="selected">Ringo</option>
</select>""")
# If the value is None, none of the options are selected:
self.assertHTMLEqual(w.render('beatles', None, choices=(('J', 'John'), ('P', 'Paul'), ('G', 'George'), ('R', 'Ringo'))), """<select multiple="multiple" name="beatles">
<option value="J">John</option>
<option value="P">Paul</option>
<option value="G">George</option>
<option value="R">Ringo</option>
</select>""")
# If the value corresponds to a label (but not to an option value), none of the options are selected:
self.assertHTMLEqual(w.render('beatles', ['John'], choices=(('J', 'John'), ('P', 'Paul'), ('G', 'George'), ('R', 'Ringo'))), """<select multiple="multiple" name="beatles">
<option value="J">John</option>
<option value="P">Paul</option>
<option value="G">George</option>
<option value="R">Ringo</option>
</select>""")
# Multiple options (with the same value) can be selected, see #8103:
self.assertHTMLEqual(w.render('choices', ['0'], choices=(('0', '0'), ('1', '1'), ('2', '2'), ('3', '3'), ('0', 'extra'))), """<select multiple="multiple" name="choices">
<option value="0" selected="selected">0</option>
<option value="1">1</option>
<option value="2">2</option>
<option value="3">3</option>
<option value="0" selected="selected">extra</option>
</select>""")
# If multiple values are given, but some of them are not valid, the valid ones are selected:
self.assertHTMLEqual(w.render('beatles', ['J', 'G', 'foo'], choices=(('J', 'John'), ('P', 'Paul'), ('G', 'George'), ('R', 'Ringo'))), """<select multiple="multiple" name="beatles">
<option value="J" selected="selected">John</option>
<option value="P">Paul</option>
<option value="G" selected="selected">George</option>
<option value="R">Ringo</option>
</select>""")
# The value is compared to its str():
self.assertHTMLEqual(w.render('nums', [2], choices=[('1', '1'), ('2', '2'), ('3', '3')]), """<select multiple="multiple" name="nums">
<option value="1">1</option>
<option value="2" selected="selected">2</option>
<option value="3">3</option>
</select>""")
self.assertHTMLEqual(w.render('nums', ['2'], choices=[(1, 1), (2, 2), (3, 3)]), """<select multiple="multiple" name="nums">
<option value="1">1</option>
<option value="2" selected="selected">2</option>
<option value="3">3</option>
</select>""")
self.assertHTMLEqual(w.render('nums', [2], choices=[(1, 1), (2, 2), (3, 3)]), """<select multiple="multiple" name="nums">
<option value="1">1</option>
<option value="2" selected="selected">2</option>
<option value="3">3</option>
</select>""")
# The 'choices' argument can be any iterable:
def get_choices():
for i in range(5):
yield (i, i)
self.assertHTMLEqual(w.render('nums', [2], choices=get_choices()), """<select multiple="multiple" name="nums">
<option value="0">0</option>
<option value="1">1</option>
<option value="2" selected="selected">2</option>
<option value="3">3</option>
<option value="4">4</option>
</select>""")
# You can also pass 'choices' to the constructor:
w = SelectMultiple(choices=[(1, 1), (2, 2), (3, 3)])
self.assertHTMLEqual(w.render('nums', [2]), """<select multiple="multiple" name="nums">
<option value="1">1</option>
<option value="2" selected="selected">2</option>
<option value="3">3</option>
</select>""")
# If 'choices' is passed to both the constructor and render(), then they'll both be in the output:
self.assertHTMLEqual(w.render('nums', [2], choices=[(4, 4), (5, 5)]), """<select multiple="multiple" name="nums">
<option value="1">1</option>
<option value="2" selected="selected">2</option>
<option value="3">3</option>
<option value="4">4</option>
<option value="5">5</option>
</select>""")
# Choices are escaped correctly
self.assertHTMLEqual(w.render('escape', None, choices=(('bad', 'you & me'), ('good', mark_safe('you > me')))), """<select multiple="multiple" name="escape">
<option value="1">1</option>
<option value="2">2</option>
<option value="3">3</option>
<option value="bad">you & me</option>
<option value="good">you > me</option>
</select>""")
# Unicode choices are correctly rendered as HTML
self.assertHTMLEqual(w.render('nums', ['ŠĐĆŽćžšđ'], choices=[('ŠĐĆŽćžšđ', 'ŠĐabcĆŽćžšđ'), ('ćžšđ', 'abcćžšđ')]), '<select multiple="multiple" name="nums">\n<option value="1">1</option>\n<option value="2">2</option>\n<option value="3">3</option>\n<option value="\u0160\u0110\u0106\u017d\u0107\u017e\u0161\u0111" selected="selected">\u0160\u0110abc\u0106\u017d\u0107\u017e\u0161\u0111</option>\n<option value="\u0107\u017e\u0161\u0111">abc\u0107\u017e\u0161\u0111</option>\n</select>')
# Choices can be nested one level in order to create HTML optgroups:
w.choices = (('outer1', 'Outer 1'), ('Group "1"', (('inner1', 'Inner 1'), ('inner2', 'Inner 2'))))
self.assertHTMLEqual(w.render('nestchoice', None), """<select multiple="multiple" name="nestchoice">
<option value="outer1">Outer 1</option>
<optgroup label="Group "1"">
<option value="inner1">Inner 1</option>
<option value="inner2">Inner 2</option>
</optgroup>
</select>""")
self.assertHTMLEqual(w.render('nestchoice', ['outer1']), """<select multiple="multiple" name="nestchoice">
<option value="outer1" selected="selected">Outer 1</option>
<optgroup label="Group "1"">
<option value="inner1">Inner 1</option>
<option value="inner2">Inner 2</option>
</optgroup>
</select>""")
self.assertHTMLEqual(w.render('nestchoice', ['inner1']), """<select multiple="multiple" name="nestchoice">
<option value="outer1">Outer 1</option>
<optgroup label="Group "1"">
<option value="inner1" selected="selected">Inner 1</option>
<option value="inner2">Inner 2</option>
</optgroup>
</select>""")
self.assertHTMLEqual(w.render('nestchoice', ['outer1', 'inner2']), """<select multiple="multiple" name="nestchoice">
<option value="outer1" selected="selected">Outer 1</option>
<optgroup label="Group "1"">
<option value="inner1">Inner 1</option>
<option value="inner2" selected="selected">Inner 2</option>
</optgroup>
</select>""")
def test_radioselect(self):
w = RadioSelect()
self.assertHTMLEqual(w.render('beatle', 'J', choices=(('J', 'John'), ('P', 'Paul'), ('G', 'George'), ('R', 'Ringo'))), """<ul>
<li><label><input checked="checked" type="radio" name="beatle" value="J" /> John</label></li>
<li><label><input type="radio" name="beatle" value="P" /> Paul</label></li>
<li><label><input type="radio" name="beatle" value="G" /> George</label></li>
<li><label><input type="radio" name="beatle" value="R" /> Ringo</label></li>
</ul>""")
# If the value is None, none of the options are checked:
self.assertHTMLEqual(w.render('beatle', None, choices=(('J', 'John'), ('P', 'Paul'), ('G', 'George'), ('R', 'Ringo'))), """<ul>
<li><label><input type="radio" name="beatle" value="J" /> John</label></li>
<li><label><input type="radio" name="beatle" value="P" /> Paul</label></li>
<li><label><input type="radio" name="beatle" value="G" /> George</label></li>
<li><label><input type="radio" name="beatle" value="R" /> Ringo</label></li>
</ul>""")
# If the value corresponds to a label (but not to an option value), none of the options are checked:
self.assertHTMLEqual(w.render('beatle', 'John', choices=(('J', 'John'), ('P', 'Paul'), ('G', 'George'), ('R', 'Ringo'))), """<ul>
<li><label><input type="radio" name="beatle" value="J" /> John</label></li>
<li><label><input type="radio" name="beatle" value="P" /> Paul</label></li>
<li><label><input type="radio" name="beatle" value="G" /> George</label></li>
<li><label><input type="radio" name="beatle" value="R" /> Ringo</label></li>
</ul>""")
# The value is compared to its str():
self.assertHTMLEqual(w.render('num', 2, choices=[('1', '1'), ('2', '2'), ('3', '3')]), """<ul>
<li><label><input type="radio" name="num" value="1" /> 1</label></li>
<li><label><input checked="checked" type="radio" name="num" value="2" /> 2</label></li>
<li><label><input type="radio" name="num" value="3" /> 3</label></li>
</ul>""")
self.assertHTMLEqual(w.render('num', '2', choices=[(1, 1), (2, 2), (3, 3)]), """<ul>
<li><label><input type="radio" name="num" value="1" /> 1</label></li>
<li><label><input checked="checked" type="radio" name="num" value="2" /> 2</label></li>
<li><label><input type="radio" name="num" value="3" /> 3</label></li>
</ul>""")
self.assertHTMLEqual(w.render('num', 2, choices=[(1, 1), (2, 2), (3, 3)]), """<ul>
<li><label><input type="radio" name="num" value="1" /> 1</label></li>
<li><label><input checked="checked" type="radio" name="num" value="2" /> 2</label></li>
<li><label><input type="radio" name="num" value="3" /> 3</label></li>
</ul>""")
# The 'choices' argument can be any iterable:
def get_choices():
for i in range(5):
yield (i, i)
self.assertHTMLEqual(w.render('num', 2, choices=get_choices()), """<ul>
<li><label><input type="radio" name="num" value="0" /> 0</label></li>
<li><label><input type="radio" name="num" value="1" /> 1</label></li>
<li><label><input checked="checked" type="radio" name="num" value="2" /> 2</label></li>
<li><label><input type="radio" name="num" value="3" /> 3</label></li>
<li><label><input type="radio" name="num" value="4" /> 4</label></li>
</ul>""")
# You can also pass 'choices' to the constructor:
w = RadioSelect(choices=[(1, 1), (2, 2), (3, 3)])
self.assertHTMLEqual(w.render('num', 2), """<ul>
<li><label><input type="radio" name="num" value="1" /> 1</label></li>
<li><label><input checked="checked" type="radio" name="num" value="2" /> 2</label></li>
<li><label><input type="radio" name="num" value="3" /> 3</label></li>
</ul>""")
# If 'choices' is passed to both the constructor and render(), then they'll both be in the output:
self.assertHTMLEqual(w.render('num', 2, choices=[(4, 4), (5, 5)]), """<ul>
<li><label><input type="radio" name="num" value="1" /> 1</label></li>
<li><label><input checked="checked" type="radio" name="num" value="2" /> 2</label></li>
<li><label><input type="radio" name="num" value="3" /> 3</label></li>
<li><label><input type="radio" name="num" value="4" /> 4</label></li>
<li><label><input type="radio" name="num" value="5" /> 5</label></li>
</ul>""")
# Choices are escaped correctly
w = RadioSelect()
self.assertHTMLEqual(w.render('escape', None, choices=(('bad', 'you & me'), ('good', mark_safe('you > me')))), """<ul>
<li><label><input type="radio" name="escape" value="bad" /> you & me</label></li>
<li><label><input type="radio" name="escape" value="good" /> you > me</label></li>
</ul>""")
# Unicode choices are correctly rendered as HTML
w = RadioSelect()
self.assertHTMLEqual(six.text_type(w.render('email', 'ŠĐĆŽćžšđ', choices=[('ŠĐĆŽćžšđ', 'ŠĐabcĆŽćžšđ'), ('ćžšđ', 'abcćžšđ')])), '<ul>\n<li><label><input checked="checked" type="radio" name="email" value="\u0160\u0110\u0106\u017d\u0107\u017e\u0161\u0111" /> \u0160\u0110abc\u0106\u017d\u0107\u017e\u0161\u0111</label></li>\n<li><label><input type="radio" name="email" value="\u0107\u017e\u0161\u0111" /> abc\u0107\u017e\u0161\u0111</label></li>\n</ul>')
# Attributes provided at instantiation are passed to the constituent inputs
w = RadioSelect(attrs={'id': 'foo'})
self.assertHTMLEqual(w.render('beatle', 'J', choices=(('J', 'John'), ('P', 'Paul'), ('G', 'George'), ('R', 'Ringo'))), """<ul id="foo">
<li><label for="foo_0"><input checked="checked" type="radio" id="foo_0" value="J" name="beatle" /> John</label></li>
<li><label for="foo_1"><input type="radio" id="foo_1" value="P" name="beatle" /> Paul</label></li>
<li><label for="foo_2"><input type="radio" id="foo_2" value="G" name="beatle" /> George</label></li>
<li><label for="foo_3"><input type="radio" id="foo_3" value="R" name="beatle" /> Ringo</label></li>
</ul>""")
# Attributes provided at render-time are passed to the constituent inputs
w = RadioSelect()
self.assertHTMLEqual(w.render('beatle', 'J', choices=(('J', 'John'), ('P', 'Paul'), ('G', 'George'), ('R', 'Ringo')), attrs={'id': 'bar'}), """<ul id="bar">
<li><label for="bar_0"><input checked="checked" type="radio" id="bar_0" value="J" name="beatle" /> John</label></li>
<li><label for="bar_1"><input type="radio" id="bar_1" value="P" name="beatle" /> Paul</label></li>
<li><label for="bar_2"><input type="radio" id="bar_2" value="G" name="beatle" /> George</label></li>
<li><label for="bar_3"><input type="radio" id="bar_3" value="R" name="beatle" /> Ringo</label></li>
</ul>""")
def test_radiofieldrenderer(self):
# RadioSelect uses a RadioFieldRenderer to render the individual radio inputs.
# You can manipulate that object directly to customize the way the RadioSelect
# is rendered.
w = RadioSelect()
r = w.get_renderer('beatle', 'J', choices=(('J', 'John'), ('P', 'Paul'), ('G', 'George'), ('R', 'Ringo')))
inp_set1 = []
inp_set2 = []
inp_set3 = []
inp_set4 = []
for inp in r:
inp_set1.append(str(inp))
inp_set2.append('%s<br />' % inp)
inp_set3.append('<p>%s %s</p>' % (inp.tag(), inp.choice_label))
inp_set4.append('%s %s %s %s %s' % (inp.name, inp.value, inp.choice_value, inp.choice_label, inp.is_checked()))
self.assertHTMLEqual('\n'.join(inp_set1), """<label><input checked="checked" type="radio" name="beatle" value="J" /> John</label>
<label><input type="radio" name="beatle" value="P" /> Paul</label>
<label><input type="radio" name="beatle" value="G" /> George</label>
<label><input type="radio" name="beatle" value="R" /> Ringo</label>""")
self.assertHTMLEqual('\n'.join(inp_set2), """<label><input checked="checked" type="radio" name="beatle" value="J" /> John</label><br />
<label><input type="radio" name="beatle" value="P" /> Paul</label><br />
<label><input type="radio" name="beatle" value="G" /> George</label><br />
<label><input type="radio" name="beatle" value="R" /> Ringo</label><br />""")
self.assertHTMLEqual('\n'.join(inp_set3), """<p><input checked="checked" type="radio" name="beatle" value="J" /> John</p>
<p><input type="radio" name="beatle" value="P" /> Paul</p>
<p><input type="radio" name="beatle" value="G" /> George</p>
<p><input type="radio" name="beatle" value="R" /> Ringo</p>""")
self.assertHTMLEqual('\n'.join(inp_set4), """beatle J J John True
beatle J P Paul False
beatle J G George False
beatle J R Ringo False""")
# A RadioFieldRenderer object also allows index access to individual RadioChoiceInput
w = RadioSelect()
r = w.get_renderer('beatle', 'J', choices=(('J', 'John'), ('P', 'Paul'), ('G', 'George'), ('R', 'Ringo')))
self.assertHTMLEqual(str(r[1]), '<label><input type="radio" name="beatle" value="P" /> Paul</label>')
self.assertHTMLEqual(str(r[0]), '<label><input checked="checked" type="radio" name="beatle" value="J" /> John</label>')
self.assertTrue(r[0].is_checked())
self.assertFalse(r[1].is_checked())
self.assertEqual((r[1].name, r[1].value, r[1].choice_value, r[1].choice_label), ('beatle', 'J', 'P', 'Paul'))
# These individual widgets can accept extra attributes if manually rendered.
self.assertHTMLEqual(
r[1].render(attrs={'extra': 'value'}),
'<label><input type="radio" extra="value" name="beatle" value="P" /> Paul</label>'
)
with self.assertRaises(IndexError):
r[10]
# You can create your own custom renderers for RadioSelect to use.
class MyRenderer(RadioFieldRenderer):
def render(self):
return '<br />\n'.join(six.text_type(choice) for choice in self)
w = RadioSelect(renderer=MyRenderer)
self.assertHTMLEqual(w.render('beatle', 'G', choices=(('J', 'John'), ('P', 'Paul'), ('G', 'George'), ('R', 'Ringo'))), """<label><input type="radio" name="beatle" value="J" /> John</label><br />
<label><input type="radio" name="beatle" value="P" /> Paul</label><br />
<label><input checked="checked" type="radio" name="beatle" value="G" /> George</label><br />
<label><input type="radio" name="beatle" value="R" /> Ringo</label>""")
# Or you can use custom RadioSelect fields that use your custom renderer.
class CustomRadioSelect(RadioSelect):
renderer = MyRenderer
w = CustomRadioSelect()
self.assertHTMLEqual(w.render('beatle', 'G', choices=(('J', 'John'), ('P', 'Paul'), ('G', 'George'), ('R', 'Ringo'))), """<label><input type="radio" name="beatle" value="J" /> John</label><br />
<label><input type="radio" name="beatle" value="P" /> Paul</label><br />
<label><input checked="checked" type="radio" name="beatle" value="G" /> George</label><br />
<label><input type="radio" name="beatle" value="R" /> Ringo</label>""")
# You can customize rendering with outer_html/inner_html renderer variables (#22950)
class MyRenderer(RadioFieldRenderer):
outer_html = str('<div{id_attr}>{content}</div>') # str is just to test some Python 2 issue with bytestrings
inner_html = '<p>{choice_value}{sub_widgets}</p>'
w = RadioSelect(renderer=MyRenderer)
output = w.render('beatle', 'J',
choices=(('J', 'John'), ('P', 'Paul'), ('G', 'George'), ('R', 'Ringo')),
attrs={'id': 'bar'})
self.assertIsInstance(output, SafeData)
self.assertHTMLEqual(output, """<div id="bar">
<p><label for="bar_0"><input checked="checked" type="radio" id="bar_0" value="J" name="beatle" /> John</label></p>
<p><label for="bar_1"><input type="radio" id="bar_1" value="P" name="beatle" /> Paul</label></p>
<p><label for="bar_2"><input type="radio" id="bar_2" value="G" name="beatle" /> George</label></p>
<p><label for="bar_3"><input type="radio" id="bar_3" value="R" name="beatle" /> Ringo</label></p>
</div>""")
def test_nested_choices(self):
# Choices can be nested for radio buttons:
w = RadioSelect()
w.choices = (
('unknown', 'Unknown'),
('Audio', (('vinyl', 'Vinyl'), ('cd', 'CD'))),
('Video', (('vhs', 'VHS'), ('dvd', 'DVD'))),
)
self.assertHTMLEqual(w.render('nestchoice', 'dvd', attrs={'id': 'media'}), """<ul id="media">
<li><label for="media_0"><input id="media_0" name="nestchoice" type="radio" value="unknown" /> Unknown</label></li>
<li>Audio<ul id="media_1">
<li><label for="media_1_0"><input id="media_1_0" name="nestchoice" type="radio" value="vinyl" /> Vinyl</label></li>
<li><label for="media_1_1"><input id="media_1_1" name="nestchoice" type="radio" value="cd" /> CD</label></li>
</ul></li>
<li>Video<ul id="media_2">
<li><label for="media_2_0"><input id="media_2_0" name="nestchoice" type="radio" value="vhs" /> VHS</label></li>
<li><label for="media_2_1"><input checked="checked" id="media_2_1" name="nestchoice" type="radio" value="dvd" /> DVD</label></li>
</ul></li>
</ul>""")
# Choices can be nested for checkboxes:
w = CheckboxSelectMultiple()
w.choices = (
('unknown', 'Unknown'),
('Audio', (('vinyl', 'Vinyl'), ('cd', 'CD'))),
('Video', (('vhs', 'VHS'), ('dvd', 'DVD'))),
)
self.assertHTMLEqual(w.render('nestchoice', ('vinyl', 'dvd'), attrs={'id': 'media'}), """<ul id="media">
<li><label for="media_0"><input id="media_0" name="nestchoice" type="checkbox" value="unknown" /> Unknown</label></li>
<li>Audio<ul id="media_1">
<li><label for="media_1_0"><input checked="checked" id="media_1_0" name="nestchoice" type="checkbox" value="vinyl" /> Vinyl</label></li>
<li><label for="media_1_1"><input id="media_1_1" name="nestchoice" type="checkbox" value="cd" /> CD</label></li>
</ul></li>
<li>Video<ul id="media_2">
<li><label for="media_2_0"><input id="media_2_0" name="nestchoice" type="checkbox" value="vhs" /> VHS</label></li>
<li><label for="media_2_1"><input checked="checked" id="media_2_1" name="nestchoice" type="checkbox" value="dvd" /> DVD</label></li>
</ul></li>
</ul>""")
def test_checkboxselectmultiple(self):
w = CheckboxSelectMultiple()
self.assertHTMLEqual(w.render('beatles', ['J'], choices=(('J', 'John'), ('P', 'Paul'), ('G', 'George'), ('R', 'Ringo'))), """<ul>
<li><label><input checked="checked" type="checkbox" name="beatles" value="J" /> John</label></li>
<li><label><input type="checkbox" name="beatles" value="P" /> Paul</label></li>
<li><label><input type="checkbox" name="beatles" value="G" /> George</label></li>
<li><label><input type="checkbox" name="beatles" value="R" /> Ringo</label></li>
</ul>""")
self.assertHTMLEqual(w.render('beatles', ['J', 'P'], choices=(('J', 'John'), ('P', 'Paul'), ('G', 'George'), ('R', 'Ringo'))), """<ul>
<li><label><input checked="checked" type="checkbox" name="beatles" value="J" /> John</label></li>
<li><label><input checked="checked" type="checkbox" name="beatles" value="P" /> Paul</label></li>
<li><label><input type="checkbox" name="beatles" value="G" /> George</label></li>
<li><label><input type="checkbox" name="beatles" value="R" /> Ringo</label></li>
</ul>""")
self.assertHTMLEqual(w.render('beatles', ['J', 'P', 'R'], choices=(('J', 'John'), ('P', 'Paul'), ('G', 'George'), ('R', 'Ringo'))), """<ul>
<li><label><input checked="checked" type="checkbox" name="beatles" value="J" /> John</label></li>
<li><label><input checked="checked" type="checkbox" name="beatles" value="P" /> Paul</label></li>
<li><label><input type="checkbox" name="beatles" value="G" /> George</label></li>
<li><label><input checked="checked" type="checkbox" name="beatles" value="R" /> Ringo</label></li>
</ul>""")
# If the value is None, none of the options are selected:
self.assertHTMLEqual(w.render('beatles', None, choices=(('J', 'John'), ('P', 'Paul'), ('G', 'George'), ('R', 'Ringo'))), """<ul>
<li><label><input type="checkbox" name="beatles" value="J" /> John</label></li>
<li><label><input type="checkbox" name="beatles" value="P" /> Paul</label></li>
<li><label><input type="checkbox" name="beatles" value="G" /> George</label></li>
<li><label><input type="checkbox" name="beatles" value="R" /> Ringo</label></li>
</ul>""")
# If the value corresponds to a label (but not to an option value), none of the options are selected:
self.assertHTMLEqual(w.render('beatles', ['John'], choices=(('J', 'John'), ('P', 'Paul'), ('G', 'George'), ('R', 'Ringo'))), """<ul>
<li><label><input type="checkbox" name="beatles" value="J" /> John</label></li>
<li><label><input type="checkbox" name="beatles" value="P" /> Paul</label></li>
<li><label><input type="checkbox" name="beatles" value="G" /> George</label></li>
<li><label><input type="checkbox" name="beatles" value="R" /> Ringo</label></li>
</ul>""")
# If multiple values are given, but some of them are not valid, the valid ones are selected:
self.assertHTMLEqual(w.render('beatles', ['J', 'G', 'foo'], choices=(('J', 'John'), ('P', 'Paul'), ('G', 'George'), ('R', 'Ringo'))), """<ul>
<li><label><input checked="checked" type="checkbox" name="beatles" value="J" /> John</label></li>
<li><label><input type="checkbox" name="beatles" value="P" /> Paul</label></li>
<li><label><input checked="checked" type="checkbox" name="beatles" value="G" /> George</label></li>
<li><label><input type="checkbox" name="beatles" value="R" /> Ringo</label></li>
</ul>""")
# The value is compared to its str():
self.assertHTMLEqual(w.render('nums', [2], choices=[('1', '1'), ('2', '2'), ('3', '3')]), """<ul>
<li><label><input type="checkbox" name="nums" value="1" /> 1</label></li>
<li><label><input checked="checked" type="checkbox" name="nums" value="2" /> 2</label></li>
<li><label><input type="checkbox" name="nums" value="3" /> 3</label></li>
</ul>""")
self.assertHTMLEqual(w.render('nums', ['2'], choices=[(1, 1), (2, 2), (3, 3)]), """<ul>
<li><label><input type="checkbox" name="nums" value="1" /> 1</label></li>
<li><label><input checked="checked" type="checkbox" name="nums" value="2" /> 2</label></li>
<li><label><input type="checkbox" name="nums" value="3" /> 3</label></li>
</ul>""")
self.assertHTMLEqual(w.render('nums', [2], choices=[(1, 1), (2, 2), (3, 3)]), """<ul>
<li><label><input type="checkbox" name="nums" value="1" /> 1</label></li>
<li><label><input checked="checked" type="checkbox" name="nums" value="2" /> 2</label></li>
<li><label><input type="checkbox" name="nums" value="3" /> 3</label></li>
</ul>""")
# The 'choices' argument can be any iterable:
def get_choices():
for i in range(5):
yield (i, i)
self.assertHTMLEqual(w.render('nums', [2], choices=get_choices()), """<ul>
<li><label><input type="checkbox" name="nums" value="0" /> 0</label></li>
<li><label><input type="checkbox" name="nums" value="1" /> 1</label></li>
<li><label><input checked="checked" type="checkbox" name="nums" value="2" /> 2</label></li>
<li><label><input type="checkbox" name="nums" value="3" /> 3</label></li>
<li><label><input type="checkbox" name="nums" value="4" /> 4</label></li>
</ul>""")
# You can also pass 'choices' to the constructor:
w = CheckboxSelectMultiple(choices=[(1, 1), (2, 2), (3, 3)])
self.assertHTMLEqual(w.render('nums', [2]), """<ul>
<li><label><input type="checkbox" name="nums" value="1" /> 1</label></li>
<li><label><input checked="checked" type="checkbox" name="nums" value="2" /> 2</label></li>
<li><label><input type="checkbox" name="nums" value="3" /> 3</label></li>
</ul>""")
# If 'choices' is passed to both the constructor and render(), then they'll both be in the output:
self.assertHTMLEqual(w.render('nums', [2], choices=[(4, 4), (5, 5)]), """<ul>
<li><label><input type="checkbox" name="nums" value="1" /> 1</label></li>
<li><label><input checked="checked" type="checkbox" name="nums" value="2" /> 2</label></li>
<li><label><input type="checkbox" name="nums" value="3" /> 3</label></li>
<li><label><input type="checkbox" name="nums" value="4" /> 4</label></li>
<li><label><input type="checkbox" name="nums" value="5" /> 5</label></li>
</ul>""")
# Choices are escaped correctly
self.assertHTMLEqual(w.render('escape', None, choices=(('bad', 'you & me'), ('good', mark_safe('you > me')))), """<ul>
<li><label><input type="checkbox" name="escape" value="1" /> 1</label></li>
<li><label><input type="checkbox" name="escape" value="2" /> 2</label></li>
<li><label><input type="checkbox" name="escape" value="3" /> 3</label></li>
<li><label><input type="checkbox" name="escape" value="bad" /> you & me</label></li>
<li><label><input type="checkbox" name="escape" value="good" /> you > me</label></li>
</ul>""")
# Unicode choices are correctly rendered as HTML
self.assertHTMLEqual(w.render('nums', ['ŠĐĆŽćžšđ'], choices=[('ŠĐĆŽćžšđ', 'ŠĐabcĆŽćžšđ'), ('ćžšđ', 'abcćžšđ')]), '<ul>\n<li><label><input type="checkbox" name="nums" value="1" /> 1</label></li>\n<li><label><input type="checkbox" name="nums" value="2" /> 2</label></li>\n<li><label><input type="checkbox" name="nums" value="3" /> 3</label></li>\n<li><label><input checked="checked" type="checkbox" name="nums" value="\u0160\u0110\u0106\u017d\u0107\u017e\u0161\u0111" /> \u0160\u0110abc\u0106\u017d\u0107\u017e\u0161\u0111</label></li>\n<li><label><input type="checkbox" name="nums" value="\u0107\u017e\u0161\u0111" /> abc\u0107\u017e\u0161\u0111</label></li>\n</ul>')
# Each input gets a separate ID
self.assertHTMLEqual(CheckboxSelectMultiple().render('letters', list('ac'), choices=zip(list('abc'), list('ABC')), attrs={'id': 'abc'}), """<ul id="abc">
<li><label for="abc_0"><input checked="checked" type="checkbox" name="letters" value="a" id="abc_0" /> A</label></li>
<li><label for="abc_1"><input type="checkbox" name="letters" value="b" id="abc_1" /> B</label></li>
<li><label for="abc_2"><input checked="checked" type="checkbox" name="letters" value="c" id="abc_2" /> C</label></li>
</ul>""")
# Each input gets a separate ID when the ID is passed to the constructor
self.assertHTMLEqual(CheckboxSelectMultiple(attrs={'id': 'abc'}).render('letters', list('ac'), choices=zip(list('abc'), list('ABC'))), """<ul id="abc">
<li><label for="abc_0"><input checked="checked" type="checkbox" name="letters" value="a" id="abc_0" /> A</label></li>
<li><label for="abc_1"><input type="checkbox" name="letters" value="b" id="abc_1" /> B</label></li>
<li><label for="abc_2"><input checked="checked" type="checkbox" name="letters" value="c" id="abc_2" /> C</label></li>
</ul>""")
w = CheckboxSelectMultiple()
r = w.get_renderer('abc', 'b', choices=[(c, c.upper()) for c in 'abc'])
# You can iterate over the CheckboxFieldRenderer to get individual elements
expected = [
'<label><input type="checkbox" name="abc" value="a" /> A</label>',
'<label><input checked="checked" type="checkbox" name="abc" value="b" /> B</label>',
'<label><input type="checkbox" name="abc" value="c" /> C</label>',
]
for output, expected in zip(r, expected):
self.assertHTMLEqual(force_text(output), expected)
# You can access individual elements
self.assertHTMLEqual(force_text(r[1]),
'<label><input checked="checked" type="checkbox" name="abc" value="b" /> B</label>')
# Out-of-range errors are propagated
with self.assertRaises(IndexError):
r[42]
def test_subwidget(self):
# Each subwidget tag gets a separate ID when the widget has an ID specified
self.assertHTMLEqual("\n".join(c.tag() for c in CheckboxSelectMultiple(attrs={'id': 'abc'}).subwidgets('letters', list('ac'), choices=zip(list('abc'), list('ABC')))), """<input checked="checked" type="checkbox" name="letters" value="a" id="abc_0" />
<input type="checkbox" name="letters" value="b" id="abc_1" />
<input checked="checked" type="checkbox" name="letters" value="c" id="abc_2" />""")
# Each subwidget tag does not get an ID if the widget does not have an ID specified
self.assertHTMLEqual("\n".join(c.tag() for c in CheckboxSelectMultiple().subwidgets('letters', list('ac'), choices=zip(list('abc'), list('ABC')))), """<input checked="checked" type="checkbox" name="letters" value="a" />
<input type="checkbox" name="letters" value="b" />
<input checked="checked" type="checkbox" name="letters" value="c" />""")
# The id_for_label property of the subwidget should return the ID that is used on the subwidget's tag
self.assertHTMLEqual("\n".join('<input type="checkbox" name="letters" value="%s" id="%s" />' % (c.choice_value, c.id_for_label) for c in CheckboxSelectMultiple(attrs={'id': 'abc'}).subwidgets('letters', [], choices=zip(list('abc'), list('ABC')))), """<input type="checkbox" name="letters" value="a" id="abc_0" />
<input type="checkbox" name="letters" value="b" id="abc_1" />
<input type="checkbox" name="letters" value="c" id="abc_2" />""")
def test_multi(self):
class MyMultiWidget(MultiWidget):
def decompress(self, value):
if value:
return value.split('__')
return ['', '']
def format_output(self, rendered_widgets):
return '<br />'.join(rendered_widgets)
w = MyMultiWidget(widgets=(TextInput(attrs={'class': 'big'}), TextInput(attrs={'class': 'small'})))
self.assertHTMLEqual(w.render('name', ['john', 'lennon']), '<input type="text" class="big" value="john" name="name_0" /><br /><input type="text" class="small" value="lennon" name="name_1" />')
self.assertHTMLEqual(w.render('name', 'john__lennon'), '<input type="text" class="big" value="john" name="name_0" /><br /><input type="text" class="small" value="lennon" name="name_1" />')
self.assertHTMLEqual(w.render('name', 'john__lennon', attrs={'id': 'foo'}), '<input id="foo_0" type="text" class="big" value="john" name="name_0" /><br /><input id="foo_1" type="text" class="small" value="lennon" name="name_1" />')
w = MyMultiWidget(widgets=(TextInput(attrs={'class': 'big'}), TextInput(attrs={'class': 'small'})), attrs={'id': 'bar'})
self.assertHTMLEqual(w.render('name', ['john', 'lennon']), '<input id="bar_0" type="text" class="big" value="john" name="name_0" /><br /><input id="bar_1" type="text" class="small" value="lennon" name="name_1" />')
# Test needs_multipart_form=True if any widget needs it
w = MyMultiWidget(widgets=(TextInput(), FileInput()))
self.assertTrue(w.needs_multipart_form)
# Test needs_multipart_form=False if no widget needs it
w = MyMultiWidget(widgets=(TextInput(), TextInput()))
self.assertFalse(w.needs_multipart_form)
def test_splitdatetime(self):
w = SplitDateTimeWidget()
self.assertHTMLEqual(w.render('date', ''), '<input type="text" name="date_0" /><input type="text" name="date_1" />')
self.assertHTMLEqual(w.render('date', None), '<input type="text" name="date_0" /><input type="text" name="date_1" />')
self.assertHTMLEqual(w.render('date', datetime.datetime(2006, 1, 10, 7, 30)), '<input type="text" name="date_0" value="2006-01-10" /><input type="text" name="date_1" value="07:30:00" />')
self.assertHTMLEqual(w.render('date', [datetime.date(2006, 1, 10), datetime.time(7, 30)]), '<input type="text" name="date_0" value="2006-01-10" /><input type="text" name="date_1" value="07:30:00" />')
# You can also pass 'attrs' to the constructor. In this case, the attrs will be
w = SplitDateTimeWidget(attrs={'class': 'pretty'})
self.assertHTMLEqual(w.render('date', datetime.datetime(2006, 1, 10, 7, 30)), '<input type="text" class="pretty" value="2006-01-10" name="date_0" /><input type="text" class="pretty" value="07:30:00" name="date_1" />')
# Use 'date_format' and 'time_format' to change the way a value is displayed.
w = SplitDateTimeWidget(date_format='%d/%m/%Y', time_format='%H:%M')
self.assertHTMLEqual(w.render('date', datetime.datetime(2006, 1, 10, 7, 30)), '<input type="text" name="date_0" value="10/01/2006" /><input type="text" name="date_1" value="07:30" />')
def test_datetimeinput(self):
w = DateTimeInput()
self.assertHTMLEqual(w.render('date', None), '<input type="text" name="date" />')
d = datetime.datetime(2007, 9, 17, 12, 51, 34, 482548)
self.assertEqual(str(d), '2007-09-17 12:51:34.482548')
# The microseconds are trimmed on display, by default.
self.assertHTMLEqual(w.render('date', d), '<input type="text" name="date" value="2007-09-17 12:51:34" />')
self.assertHTMLEqual(w.render('date', datetime.datetime(2007, 9, 17, 12, 51, 34)), '<input type="text" name="date" value="2007-09-17 12:51:34" />')
self.assertHTMLEqual(w.render('date', datetime.datetime(2007, 9, 17, 12, 51)), '<input type="text" name="date" value="2007-09-17 12:51:00" />')
# Use 'format' to change the way a value is displayed.
w = DateTimeInput(format='%d/%m/%Y %H:%M', attrs={'type': 'datetime'})
self.assertHTMLEqual(w.render('date', d), '<input type="datetime" name="date" value="17/09/2007 12:51" />')
def test_dateinput(self):
w = DateInput()
self.assertHTMLEqual(w.render('date', None), '<input type="text" name="date" />')
d = datetime.date(2007, 9, 17)
self.assertEqual(str(d), '2007-09-17')
self.assertHTMLEqual(w.render('date', d), '<input type="text" name="date" value="2007-09-17" />')
self.assertHTMLEqual(w.render('date', datetime.date(2007, 9, 17)), '<input type="text" name="date" value="2007-09-17" />')
# We should be able to initialize from a unicode value.
self.assertHTMLEqual(w.render('date', '2007-09-17'), '<input type="text" name="date" value="2007-09-17" />')
# Use 'format' to change the way a value is displayed.
w = DateInput(format='%d/%m/%Y', attrs={'type': 'date'})
self.assertHTMLEqual(w.render('date', d), '<input type="date" name="date" value="17/09/2007" />')
def test_timeinput(self):
w = TimeInput()
self.assertHTMLEqual(w.render('time', None), '<input type="text" name="time" />')
t = datetime.time(12, 51, 34, 482548)
self.assertEqual(str(t), '12:51:34.482548')
# The microseconds are trimmed on display, by default.
self.assertHTMLEqual(w.render('time', t), '<input type="text" name="time" value="12:51:34" />')
self.assertHTMLEqual(w.render('time', datetime.time(12, 51, 34)), '<input type="text" name="time" value="12:51:34" />')
self.assertHTMLEqual(w.render('time', datetime.time(12, 51)), '<input type="text" name="time" value="12:51:00" />')
# We should be able to initialize from a unicode value.
self.assertHTMLEqual(w.render('time', '13:12:11'), '<input type="text" name="time" value="13:12:11" />')
# Use 'format' to change the way a value is displayed.
w = TimeInput(format='%H:%M', attrs={'type': 'time'})
self.assertHTMLEqual(w.render('time', t), '<input type="time" name="time" value="12:51" />')
def test_splithiddendatetime(self):
from django.forms.widgets import SplitHiddenDateTimeWidget
w = SplitHiddenDateTimeWidget()
self.assertHTMLEqual(w.render('date', ''), '<input type="hidden" name="date_0" /><input type="hidden" name="date_1" />')
d = datetime.datetime(2007, 9, 17, 12, 51, 34, 482548)
self.assertHTMLEqual(str(d), '2007-09-17 12:51:34.482548')
self.assertHTMLEqual(w.render('date', d), '<input type="hidden" name="date_0" value="2007-09-17" /><input type="hidden" name="date_1" value="12:51:34" />')
self.assertHTMLEqual(w.render('date', datetime.datetime(2007, 9, 17, 12, 51, 34)), '<input type="hidden" name="date_0" value="2007-09-17" /><input type="hidden" name="date_1" value="12:51:34" />')
self.assertHTMLEqual(w.render('date', datetime.datetime(2007, 9, 17, 12, 51)), '<input type="hidden" name="date_0" value="2007-09-17" /><input type="hidden" name="date_1" value="12:51:00" />')
def test_multiwidget(self):
# MultiWidgets are widgets composed of other widgets. They are usually
# combined with MultiValueFields - a field that is composed of other fields.
# MulitWidgets can themselves be composed of other MultiWidgets.
# SplitDateTimeWidget is one example of a MultiWidget.
class ComplexMultiWidget(MultiWidget):
def __init__(self, attrs=None):
widgets = (
TextInput(),
SelectMultiple(choices=(('J', 'John'), ('P', 'Paul'), ('G', 'George'), ('R', 'Ringo'))),
SplitDateTimeWidget(),
)
super(ComplexMultiWidget, self).__init__(widgets, attrs)
def decompress(self, value):
if value:
data = value.split(',')
return [data[0], list(data[1]), datetime.datetime.strptime(data[2], "%Y-%m-%d %H:%M:%S")]
return [None, None, None]
def format_output(self, rendered_widgets):
return '\n'.join(rendered_widgets)
w = ComplexMultiWidget()
self.assertHTMLEqual(
w.render('name', 'some text,JP,2007-04-25 06:24:00'),
"""
<input type="text" name="name_0" value="some text" />
<select multiple="multiple" name="name_1">
<option value="J" selected="selected">John</option>
<option value="P" selected="selected">Paul</option>
<option value="G">George</option>
<option value="R">Ringo</option>
</select>
<input type="text" name="name_2_0" value="2007-04-25" />
<input type="text" name="name_2_1" value="06:24:00" />
""",
)
class ComplexField(MultiValueField):
def __init__(self, required=True, widget=None, label=None, initial=None):
fields = (
CharField(),
MultipleChoiceField(choices=(('J', 'John'), ('P', 'Paul'), ('G', 'George'), ('R', 'Ringo'))),
SplitDateTimeField()
)
super(ComplexField, self).__init__(fields, required, widget, label, initial)
def compress(self, data_list):
if data_list:
return '%s,%s,%s' % (data_list[0], ''.join(data_list[1]), data_list[2])
return None
f = ComplexField(widget=w)
self.assertEqual(
f.clean(['some text', ['J', 'P'], ['2007-04-25', '6:24:00']]),
'some text,JP,2007-04-25 06:24:00',
)
with self.assertRaisesMessage(ValidationError,
"'Select a valid choice. X is not one of the available choices.'"):
f.clean(['some text', ['X'], ['2007-04-25', '6:24:00']])
# If insufficient data is provided, None is substituted
self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, ['some text', ['JP']])
# test with no initial data
self.assertTrue(f.has_changed(None, ['some text', ['J', 'P'], ['2007-04-25', '6:24:00']]))
# test when the data is the same as initial
self.assertFalse(f.has_changed('some text,JP,2007-04-25 06:24:00',
['some text', ['J', 'P'], ['2007-04-25', '6:24:00']]))
# test when the first widget's data has changed
self.assertTrue(f.has_changed('some text,JP,2007-04-25 06:24:00',
['other text', ['J', 'P'], ['2007-04-25', '6:24:00']]))
# test when the last widget's data has changed. this ensures that it is not
# short circuiting while testing the widgets.
self.assertTrue(f.has_changed('some text,JP,2007-04-25 06:24:00',
['some text', ['J', 'P'], ['2009-04-25', '11:44:00']]))
class ComplexFieldForm(Form):
field1 = ComplexField(widget=w)
f = ComplexFieldForm()
self.assertHTMLEqual(
f.as_table(),
"""
<tr><th><label for="id_field1_0">Field1:</label></th>
<td><input type="text" name="field1_0" id="id_field1_0" />
<select multiple="multiple" name="field1_1" id="id_field1_1">
<option value="J">John</option>
<option value="P">Paul</option>
<option value="G">George</option>
<option value="R">Ringo</option>
</select>
<input type="text" name="field1_2_0" id="id_field1_2_0" />
<input type="text" name="field1_2_1" id="id_field1_2_1" /></td></tr>
""",
)
f = ComplexFieldForm({
'field1_0': 'some text',
'field1_1': ['J', 'P'],
'field1_2_0': '2007-04-25',
'field1_2_1': '06:24:00',
})
self.assertHTMLEqual(
f.as_table(),
"""
<tr><th><label for="id_field1_0">Field1:</label></th>
<td><input type="text" name="field1_0" value="some text" id="id_field1_0" />
<select multiple="multiple" name="field1_1" id="id_field1_1">
<option value="J" selected="selected">John</option>
<option value="P" selected="selected">Paul</option>
<option value="G">George</option>
<option value="R">Ringo</option>
</select>
<input type="text" name="field1_2_0" value="2007-04-25" id="id_field1_2_0" />
<input type="text" name="field1_2_1" value="06:24:00" id="id_field1_2_1" /></td></tr>
""",
)
self.assertEqual(f.cleaned_data['field1'], 'some text,JP,2007-04-25 06:24:00')
def test_sub_widget_html_safe(self):
widget = TextInput()
subwidget = next(widget.subwidgets('username', 'John Doe'))
self.assertTrue(hasattr(subwidget, '__html__'))
self.assertEqual(force_text(subwidget), subwidget.__html__())
def test_choice_input_html_safe(self):
widget = ChoiceInput('choices', 'CHOICE1', {}, ('CHOICE1', 'first choice'), 0)
self.assertTrue(hasattr(ChoiceInput, '__html__'))
self.assertEqual(force_text(widget), widget.__html__())
def test_choice_field_renderer_html_safe(self):
renderer = ChoiceFieldRenderer('choices', 'CHOICE1', {}, [('CHOICE1', 'first_choice')])
renderer.choice_input_class = lambda *args: args
self.assertTrue(hasattr(ChoiceFieldRenderer, '__html__'))
self.assertEqual(force_text(renderer), renderer.__html__())
class NullBooleanSelectLazyForm(Form):
"""Form to test for lazy evaluation. Refs #17190"""
bool = BooleanField(widget=NullBooleanSelect())
@override_settings(USE_L10N=True)
class FormsI18NWidgetsTests(SimpleTestCase):
def setUp(self):
super(FormsI18NWidgetsTests, self).setUp()
translation.activate('de-at')
def tearDown(self):
translation.deactivate()
super(FormsI18NWidgetsTests, self).tearDown()
def test_datetimeinput(self):
w = DateTimeInput()
d = datetime.datetime(2007, 9, 17, 12, 51, 34, 482548)
self.assertHTMLEqual(w.render('date', d), '<input type="text" name="date" value="17.09.2007 12:51:34" />')
def test_dateinput(self):
w = DateInput()
d = datetime.date(2007, 9, 17)
self.assertHTMLEqual(w.render('date', d), '<input type="text" name="date" value="17.09.2007" />')
def test_timeinput(self):
w = TimeInput()
t = datetime.time(12, 51, 34, 482548)
self.assertHTMLEqual(w.render('time', t), '<input type="text" name="time" value="12:51:34" />')
def test_datetime_locale_aware(self):
w = DateTimeInput()
d = datetime.datetime(2007, 9, 17, 12, 51, 34, 482548)
with self.settings(USE_L10N=False):
self.assertHTMLEqual(w.render('date', d), '<input type="text" name="date" value="2007-09-17 12:51:34" />')
with translation.override('es'):
self.assertHTMLEqual(w.render('date', d), '<input type="text" name="date" value="17/09/2007 12:51:34" />')
def test_splithiddendatetime(self):
from django.forms.widgets import SplitHiddenDateTimeWidget
w = SplitHiddenDateTimeWidget()
self.assertHTMLEqual(
w.render('date', datetime.datetime(2007, 9, 17, 12, 51)),
"""
<input type="hidden" name="date_0" value="17.09.2007" />
<input type="hidden" name="date_1" value="12:51:00" />
""",
)
def test_nullbooleanselect(self):
"""
Ensure that the NullBooleanSelect widget's options are lazily
localized.
Refs #17190
"""
f = NullBooleanSelectLazyForm()
self.assertHTMLEqual(
f.fields['bool'].widget.render('id_bool', True),
"""
<select name="id_bool">
<option value="1">Unbekannt</option>
<option value="2" selected="selected">Ja</option>
<option value="3">Nein</option>
</select>
""",
)
class SelectAndTextWidget(MultiWidget):
"""
MultiWidget subclass
"""
def __init__(self, choices=[]):
widgets = [
RadioSelect(choices=choices),
TextInput
]
super(SelectAndTextWidget, self).__init__(widgets)
def _set_choices(self, choices):
"""
When choices are set for this widget, we want to pass those along to the Select widget
"""
self.widgets[0].choices = choices
def _get_choices(self):
"""
The choices for this widget are the Select widget's choices
"""
return self.widgets[0].choices
choices = property(_get_choices, _set_choices)
class WidgetTests(SimpleTestCase):
def test_12048(self):
# See ticket #12048.
w1 = SelectAndTextWidget(choices=[1, 2, 3])
w2 = copy.deepcopy(w1)
w2.choices = [4, 5, 6]
# w2 ought to be independent of w1, since MultiWidget ought
# to make a copy of its sub-widgets when it is copied.
self.assertEqual(w1.choices, [1, 2, 3])
@override_settings(ROOT_URLCONF='forms_tests.urls')
class LiveWidgetTests(AdminSeleniumWebDriverTestCase):
available_apps = ['forms_tests'] + AdminSeleniumWebDriverTestCase.available_apps
def test_textarea_trailing_newlines(self):
"""
Test that a roundtrip on a ModelForm doesn't alter the TextField value
"""
article = Article.objects.create(content="\nTst\n")
self.selenium.get('%s%s' % (self.live_server_url,
reverse('article_form', args=[article.pk])))
self.selenium.find_element_by_id('submit').submit()
article = Article.objects.get(pk=article.pk)
# Should be "\nTst\n" after #19251 is fixed
self.assertEqual(article.content, "\r\nTst\r\n")
@python_2_unicode_compatible
class FakeFieldFile(object):
"""
Quacks like a FieldFile (has a .url and unicode representation), but
doesn't require us to care about storages etc.
"""
url = 'something'
def __str__(self):
return self.url
class ClearableFileInputTests(SimpleTestCase):
def test_clear_input_renders(self):
"""
A ClearableFileInput with is_required False and rendered with
an initial value that is a file renders a clear checkbox.
"""
widget = ClearableFileInput()
widget.is_required = False
self.assertHTMLEqual(
widget.render('myfile', FakeFieldFile()),
"""
Currently: <a href="something">something</a>
<input type="checkbox" name="myfile-clear" id="myfile-clear_id" />
<label for="myfile-clear_id">Clear</label><br />
Change: <input type="file" name="myfile" />
""",
)
def test_html_escaped(self):
"""
A ClearableFileInput should escape name, filename and URL when
rendering HTML. Refs #15182.
"""
@python_2_unicode_compatible
class StrangeFieldFile(object):
url = "something?chapter=1§=2©=3&lang=en"
def __str__(self):
return '''something<div onclick="alert('oops')">.jpg'''
widget = ClearableFileInput()
field = StrangeFieldFile()
output = widget.render('my<div>file', field)
self.assertNotIn(field.url, output)
self.assertIn('href="something?chapter=1&sect=2&copy=3&lang=en"', output)
self.assertNotIn(six.text_type(field), output)
self.assertIn('something<div onclick="alert('oops')">.jpg', output)
self.assertIn('my<div>file', output)
self.assertNotIn('my<div>file', output)
def test_html_does_not_mask_exceptions(self):
"""
A ClearableFileInput should not mask exceptions produced while
checking that it has a value.
"""
@python_2_unicode_compatible
class FailingURLFieldFile(object):
@property
def url(self):
raise RuntimeError('Canary')
def __str__(self):
return 'value'
widget = ClearableFileInput()
field = FailingURLFieldFile()
with self.assertRaisesMessage(RuntimeError, 'Canary'):
widget.render('myfile', field)
def test_clear_input_renders_only_if_not_required(self):
"""
A ClearableFileInput with is_required=False does not render a clear
checkbox.
"""
widget = ClearableFileInput()
widget.is_required = True
self.assertHTMLEqual(
widget.render('myfile', FakeFieldFile()),
"""
Currently: <a href="something">something</a> <br />
Change: <input type="file" name="myfile" />
""",
)
def test_clear_input_renders_only_if_initial(self):
"""
A ClearableFileInput instantiated with no initial value does not render
a clear checkbox.
"""
widget = ClearableFileInput()
widget.is_required = False
self.assertHTMLEqual(widget.render('myfile', None),
'<input type="file" name="myfile" />')
def test_clear_input_checked_returns_false(self):
"""
ClearableFileInput.value_from_datadict returns False if the clear
checkbox is checked, if not required.
"""
widget = ClearableFileInput()
widget.is_required = False
self.assertEqual(widget.value_from_datadict(
data={'myfile-clear': True},
files={},
name='myfile'), False)
def test_clear_input_checked_returns_false_only_if_not_required(self):
"""
ClearableFileInput.value_from_datadict never returns False if the field
is required.
"""
widget = ClearableFileInput()
widget.is_required = True
f = SimpleUploadedFile('something.txt', b'content')
self.assertEqual(widget.value_from_datadict(
data={'myfile-clear': True},
files={'myfile': f},
name='myfile'), f)
def test_render_custom_template(self):
widget = ClearableFileInput()
widget.template_with_initial = (
'%(initial_text)s: <img src="%(initial_url)s" alt="%(initial)s" /> '
'%(clear_template)s<br />%(input_text)s: %(input)s'
)
self.assertHTMLEqual(
widget.render('myfile', FakeFieldFile()),
'Currently: <img src="something" alt="something" /> '
'<input type="checkbox" name="myfile-clear" id="myfile-clear_id" /> '
'<label for="myfile-clear_id">Clear</label><br />Change: <input type="file" name="myfile" />'
)
class GetDate(Form):
mydate = DateField(widget=SelectDateWidget)
class SelectDateWidgetTests(SimpleTestCase):
# The forms library comes with some extra, higher-level Field and Widget
def test_selectdate(self):
self.maxDiff = None
w = SelectDateWidget(years=('2007', '2008', '2009', '2010', '2011', '2012', '2013', '2014', '2015', '2016'))
# Rendering the default state.
self.assertHTMLEqual(
w.render('mydate', ''),
"""
<select name="mydate_month" id="id_mydate_month">
<option value="0">---</option>
<option value="1">January</option>
<option value="2">February</option>
<option value="3">March</option>
<option value="4">April</option>
<option value="5">May</option>
<option value="6">June</option>
<option value="7">July</option>
<option value="8">August</option>
<option value="9">September</option>
<option value="10">October</option>
<option value="11">November</option>
<option value="12">December</option>
</select>
<select name="mydate_day" id="id_mydate_day">
<option value="0">---</option>
<option value="1">1</option>
<option value="2">2</option>
<option value="3">3</option>
<option value="4">4</option>
<option value="5">5</option>
<option value="6">6</option>
<option value="7">7</option>
<option value="8">8</option>
<option value="9">9</option>
<option value="10">10</option>
<option value="11">11</option>
<option value="12">12</option>
<option value="13">13</option>
<option value="14">14</option>
<option value="15">15</option>
<option value="16">16</option>
<option value="17">17</option>
<option value="18">18</option>
<option value="19">19</option>
<option value="20">20</option>
<option value="21">21</option>
<option value="22">22</option>
<option value="23">23</option>
<option value="24">24</option>
<option value="25">25</option>
<option value="26">26</option>
<option value="27">27</option>
<option value="28">28</option>
<option value="29">29</option>
<option value="30">30</option>
<option value="31">31</option>
</select>
<select name="mydate_year" id="id_mydate_year">
<option value="0">---</option>
<option value="2007">2007</option>
<option value="2008">2008</option>
<option value="2009">2009</option>
<option value="2010">2010</option>
<option value="2011">2011</option>
<option value="2012">2012</option>
<option value="2013">2013</option>
<option value="2014">2014</option>
<option value="2015">2015</option>
<option value="2016">2016</option>
</select>
""",
)
# Rendering the None or '' values should yield the same output.
self.assertHTMLEqual(w.render('mydate', None), w.render('mydate', ''))
# Rendering a string value.
self.assertHTMLEqual(
w.render('mydate', '2010-04-15'),
"""
<select name="mydate_month" id="id_mydate_month">
<option value="0">---</option>
<option value="1">January</option>
<option value="2">February</option>
<option value="3">March</option>
<option value="4" selected="selected">April</option>
<option value="5">May</option>
<option value="6">June</option>
<option value="7">July</option>
<option value="8">August</option>
<option value="9">September</option>
<option value="10">October</option>
<option value="11">November</option>
<option value="12">December</option>
</select>
<select name="mydate_day" id="id_mydate_day">
<option value="0">---</option>
<option value="1">1</option>
<option value="2">2</option>
<option value="3">3</option>
<option value="4">4</option>
<option value="5">5</option>
<option value="6">6</option>
<option value="7">7</option>
<option value="8">8</option>
<option value="9">9</option>
<option value="10">10</option>
<option value="11">11</option>
<option value="12">12</option>
<option value="13">13</option>
<option value="14">14</option>
<option value="15" selected="selected">15</option>
<option value="16">16</option>
<option value="17">17</option>
<option value="18">18</option>
<option value="19">19</option>
<option value="20">20</option>
<option value="21">21</option>
<option value="22">22</option>
<option value="23">23</option>
<option value="24">24</option>
<option value="25">25</option>
<option value="26">26</option>
<option value="27">27</option>
<option value="28">28</option>
<option value="29">29</option>
<option value="30">30</option>
<option value="31">31</option>
</select>
<select name="mydate_year" id="id_mydate_year">
<option value="0">---</option>
<option value="2007">2007</option>
<option value="2008">2008</option>
<option value="2009">2009</option>
<option value="2010" selected="selected">2010</option>
<option value="2011">2011</option>
<option value="2012">2012</option>
<option value="2013">2013</option>
<option value="2014">2014</option>
<option value="2015">2015</option>
<option value="2016">2016</option>
</select>
""",
)
# Rendering a datetime value.
self.assertHTMLEqual(w.render('mydate', datetime.date(2010, 4, 15)), w.render('mydate', '2010-04-15'))
# Invalid dates should still render the failed date.
self.assertHTMLEqual(
w.render('mydate', '2010-02-31'),
"""
<select name="mydate_month" id="id_mydate_month">
<option value="0">---</option>
<option value="1">January</option>
<option value="2" selected="selected">February</option>
<option value="3">March</option>
<option value="4">April</option>
<option value="5">May</option>
<option value="6">June</option>
<option value="7">July</option>
<option value="8">August</option>
<option value="9">September</option>
<option value="10">October</option>
<option value="11">November</option>
<option value="12">December</option>
</select>
<select name="mydate_day" id="id_mydate_day">
<option value="0">---</option>
<option value="1">1</option>
<option value="2">2</option>
<option value="3">3</option>
<option value="4">4</option>
<option value="5">5</option>
<option value="6">6</option>
<option value="7">7</option>
<option value="8">8</option>
<option value="9">9</option>
<option value="10">10</option>
<option value="11">11</option>
<option value="12">12</option>
<option value="13">13</option>
<option value="14">14</option>
<option value="15">15</option>
<option value="16">16</option>
<option value="17">17</option>
<option value="18">18</option>
<option value="19">19</option>
<option value="20">20</option>
<option value="21">21</option>
<option value="22">22</option>
<option value="23">23</option>
<option value="24">24</option>
<option value="25">25</option>
<option value="26">26</option>
<option value="27">27</option>
<option value="28">28</option>
<option value="29">29</option>
<option value="30">30</option>
<option value="31" selected="selected">31</option>
</select>
<select name="mydate_year" id="id_mydate_year">
<option value="0">---</option>
<option value="2007">2007</option>
<option value="2008">2008</option>
<option value="2009">2009</option>
<option value="2010" selected="selected">2010</option>
<option value="2011">2011</option>
<option value="2012">2012</option>
<option value="2013">2013</option>
<option value="2014">2014</option>
<option value="2015">2015</option>
<option value="2016">2016</option>
</select>
""",
)
# Rendering with a custom months dict.
w = SelectDateWidget(months=MONTHS_AP, years=('2013',))
self.assertHTMLEqual(
w.render('mydate', ''),
"""
<select name="mydate_month" id="id_mydate_month">
<option value="0">---</option>
<option value="1">Jan.</option>
<option value="2">Feb.</option>
<option value="3">March</option>
<option value="4">April</option>
<option value="5">May</option>
<option value="6">June</option>
<option value="7">July</option>
<option value="8">Aug.</option>
<option value="9">Sept.</option>
<option value="10">Oct.</option>
<option value="11">Nov.</option>
<option value="12">Dec.</option>
</select>
<select name="mydate_day" id="id_mydate_day">
<option value="0">---</option>
<option value="1">1</option>
<option value="2">2</option>
<option value="3">3</option>
<option value="4">4</option>
<option value="5">5</option>
<option value="6">6</option>
<option value="7">7</option>
<option value="8">8</option>
<option value="9">9</option>
<option value="10">10</option>
<option value="11">11</option>
<option value="12">12</option>
<option value="13">13</option>
<option value="14">14</option>
<option value="15">15</option>
<option value="16">16</option>
<option value="17">17</option>
<option value="18">18</option>
<option value="19">19</option>
<option value="20">20</option>
<option value="21">21</option>
<option value="22">22</option>
<option value="23">23</option>
<option value="24">24</option>
<option value="25">25</option>
<option value="26">26</option>
<option value="27">27</option>
<option value="28">28</option>
<option value="29">29</option>
<option value="30">30</option>
<option value="31">31</option>
</select>
<select name="mydate_year" id="id_mydate_year">
<option value="0">---</option>
<option value="2013">2013</option>
</select>
""",
)
a = GetDate({'mydate_month': '4', 'mydate_day': '1', 'mydate_year': '2008'})
self.assertTrue(a.is_valid())
self.assertEqual(a.cleaned_data['mydate'], datetime.date(2008, 4, 1))
# As with any widget that implements get_value_from_datadict,
# we must be prepared to accept the input from the "as_hidden"
# rendering as well.
self.assertHTMLEqual(
a['mydate'].as_hidden(),
'<input type="hidden" name="mydate" value="2008-4-1" id="id_mydate" />',
)
b = GetDate({'mydate': '2008-4-1'})
self.assertTrue(b.is_valid())
self.assertEqual(b.cleaned_data['mydate'], datetime.date(2008, 4, 1))
# Invalid dates shouldn't be allowed
c = GetDate({'mydate_month': '2', 'mydate_day': '31', 'mydate_year': '2010'})
self.assertFalse(c.is_valid())
self.assertEqual(c.errors, {'mydate': ['Enter a valid date.']})
# label tag is correctly associated with month dropdown
d = GetDate({'mydate_month': '1', 'mydate_day': '1', 'mydate_year': '2010'})
self.assertIn('<label for="id_mydate_month">', d.as_p())
def test_selectdate_required(self):
class GetNotRequiredDate(Form):
mydate = DateField(widget=SelectDateWidget, required=False)
class GetRequiredDate(Form):
mydate = DateField(widget=SelectDateWidget, required=True)
self.assertFalse(GetNotRequiredDate().fields['mydate'].widget.is_required)
self.assertTrue(GetRequiredDate().fields['mydate'].widget.is_required)
def test_selectdate_empty_label(self):
w = SelectDateWidget(years=('2014',), empty_label='empty_label')
# Rendering the default state with empty_label setted as string.
self.assertInHTML('<option value="0">empty_label</option>', w.render('mydate', ''), count=3)
w = SelectDateWidget(years=('2014',), empty_label=('empty_year', 'empty_month', 'empty_day'))
# Rendering the default state with empty_label tuple.
self.assertHTMLEqual(
w.render('mydate', ''),
"""
<select name="mydate_month" id="id_mydate_month">
<option value="0">empty_month</option>
<option value="1">January</option>
<option value="2">February</option>
<option value="3">March</option>
<option value="4">April</option>
<option value="5">May</option>
<option value="6">June</option>
<option value="7">July</option>
<option value="8">August</option>
<option value="9">September</option>
<option value="10">October</option>
<option value="11">November</option>
<option value="12">December</option>
</select>
<select name="mydate_day" id="id_mydate_day">
<option value="0">empty_day</option>
<option value="1">1</option>
<option value="2">2</option>
<option value="3">3</option>
<option value="4">4</option>
<option value="5">5</option>
<option value="6">6</option>
<option value="7">7</option>
<option value="8">8</option>
<option value="9">9</option>
<option value="10">10</option>
<option value="11">11</option>
<option value="12">12</option>
<option value="13">13</option>
<option value="14">14</option>
<option value="15">15</option>
<option value="16">16</option>
<option value="17">17</option>
<option value="18">18</option>
<option value="19">19</option>
<option value="20">20</option>
<option value="21">21</option>
<option value="22">22</option>
<option value="23">23</option>
<option value="24">24</option>
<option value="25">25</option>
<option value="26">26</option>
<option value="27">27</option>
<option value="28">28</option>
<option value="29">29</option>
<option value="30">30</option>
<option value="31">31</option>
</select>
<select name="mydate_year" id="id_mydate_year">
<option value="0">empty_year</option>
<option value="2014">2014</option>
</select>
""",
)
self.assertRaisesMessage(ValueError, 'empty_label list/tuple must have 3 elements.',
SelectDateWidget, years=('2014',), empty_label=('not enough', 'values'))
@override_settings(USE_L10N=True)
@translation.override('nl')
def test_l10n(self):
w = SelectDateWidget(years=('2007', '2008', '2009', '2010', '2011', '2012', '2013', '2014', '2015', '2016'))
self.assertEqual(
w.value_from_datadict({'date_year': '2010', 'date_month': '8', 'date_day': '13'}, {}, 'date'),
'13-08-2010',
)
self.assertHTMLEqual(
w.render('date', '13-08-2010'),
"""
<select name="date_day" id="id_date_day">
<option value="0">---</option>
<option value="1">1</option>
<option value="2">2</option>
<option value="3">3</option>
<option value="4">4</option>
<option value="5">5</option>
<option value="6">6</option>
<option value="7">7</option>
<option value="8">8</option>
<option value="9">9</option>
<option value="10">10</option>
<option value="11">11</option>
<option value="12">12</option>
<option value="13" selected="selected">13</option>
<option value="14">14</option>
<option value="15">15</option>
<option value="16">16</option>
<option value="17">17</option>
<option value="18">18</option>
<option value="19">19</option>
<option value="20">20</option>
<option value="21">21</option>
<option value="22">22</option>
<option value="23">23</option>
<option value="24">24</option>
<option value="25">25</option>
<option value="26">26</option>
<option value="27">27</option>
<option value="28">28</option>
<option value="29">29</option>
<option value="30">30</option>
<option value="31">31</option>
</select>
<select name="date_month" id="id_date_month">
<option value="0">---</option>
<option value="1">januari</option>
<option value="2">februari</option>
<option value="3">maart</option>
<option value="4">april</option>
<option value="5">mei</option>
<option value="6">juni</option>
<option value="7">juli</option>
<option value="8" selected="selected">augustus</option>
<option value="9">september</option>
<option value="10">oktober</option>
<option value="11">november</option>
<option value="12">december</option>
</select>
<select name="date_year" id="id_date_year">
<option value="0">---</option>
<option value="2007">2007</option>
<option value="2008">2008</option>
<option value="2009">2009</option>
<option value="2010" selected="selected">2010</option>
<option value="2011">2011</option>
<option value="2012">2012</option>
<option value="2013">2013</option>
<option value="2014">2014</option>
<option value="2015">2015</option>
<option value="2016">2016</option>
</select>
""",
)
# Even with an invalid date, the widget should reflect the entered value (#17401).
self.assertEqual(w.render('mydate', '2010-02-30').count('selected="selected"'), 3)
# Years before 1900 work
w = SelectDateWidget(years=('1899',))
self.assertEqual(
w.value_from_datadict({'date_year': '1899', 'date_month': '8', 'date_day': '13'}, {}, 'date'),
'13-08-1899',
)
@override_settings(USE_L10N=True)
@translation.override('nl')
def test_l10n_date_changed(self):
"""
Ensure that DateField.has_changed() with SelectDateWidget works
correctly with a localized date format.
Refs #17165.
"""
# With Field.show_hidden_initial=False -----------------------
b = GetDate({
'mydate_year': '2008',
'mydate_month': '4',
'mydate_day': '1',
}, initial={'mydate': datetime.date(2008, 4, 1)})
self.assertFalse(b.has_changed())
b = GetDate({
'mydate_year': '2008',
'mydate_month': '4',
'mydate_day': '2',
}, initial={'mydate': datetime.date(2008, 4, 1)})
self.assertTrue(b.has_changed())
# With Field.show_hidden_initial=True ------------------------
class GetDateShowHiddenInitial(Form):
mydate = DateField(widget=SelectDateWidget, show_hidden_initial=True)
b = GetDateShowHiddenInitial({
'mydate_year': '2008',
'mydate_month': '4',
'mydate_day': '1',
'initial-mydate': HiddenInput()._format_value(datetime.date(2008, 4, 1))
}, initial={'mydate': datetime.date(2008, 4, 1)})
self.assertFalse(b.has_changed())
b = GetDateShowHiddenInitial({
'mydate_year': '2008',
'mydate_month': '4',
'mydate_day': '22',
'initial-mydate': HiddenInput()._format_value(datetime.date(2008, 4, 1))
}, initial={'mydate': datetime.date(2008, 4, 1)})
self.assertTrue(b.has_changed())
b = GetDateShowHiddenInitial({
'mydate_year': '2008',
'mydate_month': '4',
'mydate_day': '22',
'initial-mydate': HiddenInput()._format_value(datetime.date(2008, 4, 1))
}, initial={'mydate': datetime.date(2008, 4, 22)})
self.assertTrue(b.has_changed())
b = GetDateShowHiddenInitial({
'mydate_year': '2008',
'mydate_month': '4',
'mydate_day': '22',
'initial-mydate': HiddenInput()._format_value(datetime.date(2008, 4, 22))
}, initial={'mydate': datetime.date(2008, 4, 1)})
self.assertFalse(b.has_changed())
@override_settings(USE_L10N=True)
@translation.override('nl')
def test_l10n_invalid_date_in(self):
# Invalid dates shouldn't be allowed
a = GetDate({'mydate_month': '2', 'mydate_day': '31', 'mydate_year': '2010'})
self.assertFalse(a.is_valid())
# 'Geef een geldige datum op.' = 'Enter a valid date.'
self.assertEqual(a.errors, {'mydate': ['Geef een geldige datum op.']})
@override_settings(USE_L10N=True)
@translation.override('nl')
def test_form_label_association(self):
# label tag is correctly associated with first rendered dropdown
a = GetDate({'mydate_month': '1', 'mydate_day': '1', 'mydate_year': '2010'})
self.assertIn('<label for="id_mydate_day">', a.as_p())
class SelectWidgetTests(SimpleTestCase):
def test_deepcopy(self):
"""
__deepcopy__() should copy all attributes properly (#25085).
"""
widget = Select()
obj = copy.deepcopy(widget)
self.assertIsNot(widget, obj)
self.assertEqual(widget.choices, obj.choices)
self.assertIsNot(widget.choices, obj.choices)
self.assertEqual(widget.attrs, obj.attrs)
self.assertIsNot(widget.attrs, obj.attrs)
| bsd-3-clause |
awkspace/ansible | test/units/executor/test_play_iterator.py | 45 | 18430 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from units.compat import unittest
from units.compat.mock import patch, MagicMock
from ansible.errors import AnsibleError, AnsibleParserError
from ansible.executor.play_iterator import HostState, PlayIterator
from ansible.playbook import Playbook
from ansible.playbook.task import Task
from ansible.playbook.play_context import PlayContext
from units.mock.loader import DictDataLoader
from units.mock.path import mock_unfrackpath_noop
class TestPlayIterator(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_host_state(self):
hs = HostState(blocks=[x for x in range(0, 10)])
hs.tasks_child_state = HostState(blocks=[0])
hs.rescue_child_state = HostState(blocks=[1])
hs.always_child_state = HostState(blocks=[2])
hs.__repr__()
hs.run_state = 100
hs.__repr__()
hs.fail_state = 15
hs.__repr__()
for i in range(0, 10):
hs.cur_block = i
self.assertEqual(hs.get_current_block(), i)
new_hs = hs.copy()
@patch('ansible.playbook.role.definition.unfrackpath', mock_unfrackpath_noop)
def test_play_iterator(self):
# import epdb; epdb.st()
fake_loader = DictDataLoader({
"test_play.yml": """
- hosts: all
gather_facts: false
roles:
- test_role
pre_tasks:
- debug: msg="this is a pre_task"
tasks:
- debug: msg="this is a regular task"
- block:
- debug: msg="this is a block task"
- block:
- debug: msg="this is a sub-block in a block"
rescue:
- debug: msg="this is a rescue task"
- block:
- debug: msg="this is a sub-block in a rescue"
always:
- debug: msg="this is an always task"
- block:
- debug: msg="this is a sub-block in an always"
post_tasks:
- debug: msg="this is a post_task"
""",
'/etc/ansible/roles/test_role/tasks/main.yml': """
- name: role task
debug: msg="this is a role task"
- block:
- name: role block task
debug: msg="inside block in role"
always:
- name: role always task
debug: msg="always task in block in role"
- include: foo.yml
- name: role task after include
debug: msg="after include in role"
- block:
- name: starting role nested block 1
debug:
- block:
- name: role nested block 1 task 1
debug:
- name: role nested block 1 task 2
debug:
- name: role nested block 1 task 3
debug:
- name: end of role nested block 1
debug:
- name: starting role nested block 2
debug:
- block:
- name: role nested block 2 task 1
debug:
- name: role nested block 2 task 2
debug:
- name: role nested block 2 task 3
debug:
- name: end of role nested block 2
debug:
""",
'/etc/ansible/roles/test_role/tasks/foo.yml': """
- name: role included task
debug: msg="this is task in an include from a role"
"""
})
mock_var_manager = MagicMock()
mock_var_manager._fact_cache = dict()
mock_var_manager.get_vars.return_value = dict()
p = Playbook.load('test_play.yml', loader=fake_loader, variable_manager=mock_var_manager)
hosts = []
for i in range(0, 10):
host = MagicMock()
host.name = host.get_name.return_value = 'host%02d' % i
hosts.append(host)
mock_var_manager._fact_cache['host00'] = dict()
inventory = MagicMock()
inventory.get_hosts.return_value = hosts
inventory.filter_hosts.return_value = hosts
play_context = PlayContext(play=p._entries[0])
itr = PlayIterator(
inventory=inventory,
play=p._entries[0],
play_context=play_context,
variable_manager=mock_var_manager,
all_vars=dict(),
)
# pre task
(host_state, task) = itr.get_next_task_for_host(hosts[0])
self.assertIsNotNone(task)
self.assertEqual(task.action, 'debug')
# implicit meta: flush_handlers
(host_state, task) = itr.get_next_task_for_host(hosts[0])
self.assertIsNotNone(task)
self.assertEqual(task.action, 'meta')
# role task
(host_state, task) = itr.get_next_task_for_host(hosts[0])
self.assertIsNotNone(task)
self.assertEqual(task.action, 'debug')
self.assertEqual(task.name, "role task")
self.assertIsNotNone(task._role)
# role block task
(host_state, task) = itr.get_next_task_for_host(hosts[0])
self.assertIsNotNone(task)
self.assertEqual(task.name, "role block task")
self.assertIsNotNone(task._role)
# role block always task
(host_state, task) = itr.get_next_task_for_host(hosts[0])
self.assertIsNotNone(task)
self.assertEqual(task.name, "role always task")
self.assertIsNotNone(task._role)
# role include task
# (host_state, task) = itr.get_next_task_for_host(hosts[0])
# self.assertIsNotNone(task)
# self.assertEqual(task.action, 'debug')
# self.assertEqual(task.name, "role included task")
# self.assertIsNotNone(task._role)
# role task after include
(host_state, task) = itr.get_next_task_for_host(hosts[0])
self.assertIsNotNone(task)
self.assertEqual(task.name, "role task after include")
self.assertIsNotNone(task._role)
# role nested block tasks
(host_state, task) = itr.get_next_task_for_host(hosts[0])
self.assertIsNotNone(task)
self.assertEqual(task.name, "starting role nested block 1")
self.assertIsNotNone(task._role)
(host_state, task) = itr.get_next_task_for_host(hosts[0])
self.assertIsNotNone(task)
self.assertEqual(task.name, "role nested block 1 task 1")
self.assertIsNotNone(task._role)
(host_state, task) = itr.get_next_task_for_host(hosts[0])
self.assertIsNotNone(task)
self.assertEqual(task.name, "role nested block 1 task 2")
self.assertIsNotNone(task._role)
(host_state, task) = itr.get_next_task_for_host(hosts[0])
self.assertIsNotNone(task)
self.assertEqual(task.name, "role nested block 1 task 3")
self.assertIsNotNone(task._role)
(host_state, task) = itr.get_next_task_for_host(hosts[0])
self.assertIsNotNone(task)
self.assertEqual(task.name, "end of role nested block 1")
self.assertIsNotNone(task._role)
(host_state, task) = itr.get_next_task_for_host(hosts[0])
self.assertIsNotNone(task)
self.assertEqual(task.name, "starting role nested block 2")
self.assertIsNotNone(task._role)
(host_state, task) = itr.get_next_task_for_host(hosts[0])
self.assertIsNotNone(task)
self.assertEqual(task.name, "role nested block 2 task 1")
self.assertIsNotNone(task._role)
(host_state, task) = itr.get_next_task_for_host(hosts[0])
self.assertIsNotNone(task)
self.assertEqual(task.name, "role nested block 2 task 2")
self.assertIsNotNone(task._role)
(host_state, task) = itr.get_next_task_for_host(hosts[0])
self.assertIsNotNone(task)
self.assertEqual(task.name, "role nested block 2 task 3")
self.assertIsNotNone(task._role)
(host_state, task) = itr.get_next_task_for_host(hosts[0])
self.assertIsNotNone(task)
self.assertEqual(task.name, "end of role nested block 2")
self.assertIsNotNone(task._role)
# regular play task
(host_state, task) = itr.get_next_task_for_host(hosts[0])
self.assertIsNotNone(task)
self.assertEqual(task.action, 'debug')
self.assertIsNone(task._role)
# block task
(host_state, task) = itr.get_next_task_for_host(hosts[0])
self.assertIsNotNone(task)
self.assertEqual(task.action, 'debug')
self.assertEqual(task.args, dict(msg="this is a block task"))
# sub-block task
(host_state, task) = itr.get_next_task_for_host(hosts[0])
self.assertIsNotNone(task)
self.assertEqual(task.action, 'debug')
self.assertEqual(task.args, dict(msg="this is a sub-block in a block"))
# mark the host failed
itr.mark_host_failed(hosts[0])
# block rescue task
(host_state, task) = itr.get_next_task_for_host(hosts[0])
self.assertIsNotNone(task)
self.assertEqual(task.action, 'debug')
self.assertEqual(task.args, dict(msg="this is a rescue task"))
# sub-block rescue task
(host_state, task) = itr.get_next_task_for_host(hosts[0])
self.assertIsNotNone(task)
self.assertEqual(task.action, 'debug')
self.assertEqual(task.args, dict(msg="this is a sub-block in a rescue"))
# block always task
(host_state, task) = itr.get_next_task_for_host(hosts[0])
self.assertIsNotNone(task)
self.assertEqual(task.action, 'debug')
self.assertEqual(task.args, dict(msg="this is an always task"))
# sub-block always task
(host_state, task) = itr.get_next_task_for_host(hosts[0])
self.assertIsNotNone(task)
self.assertEqual(task.action, 'debug')
self.assertEqual(task.args, dict(msg="this is a sub-block in an always"))
# implicit meta: flush_handlers
(host_state, task) = itr.get_next_task_for_host(hosts[0])
self.assertIsNotNone(task)
self.assertEqual(task.action, 'meta')
# post task
(host_state, task) = itr.get_next_task_for_host(hosts[0])
self.assertIsNotNone(task)
self.assertEqual(task.action, 'debug')
# implicit meta: flush_handlers
(host_state, task) = itr.get_next_task_for_host(hosts[0])
self.assertIsNotNone(task)
self.assertEqual(task.action, 'meta')
# end of iteration
(host_state, task) = itr.get_next_task_for_host(hosts[0])
self.assertIsNone(task)
# host 0 shouldn't be in the failed hosts, as the error
# was handled by a rescue block
failed_hosts = itr.get_failed_hosts()
self.assertNotIn(hosts[0], failed_hosts)
def test_play_iterator_nested_blocks(self):
fake_loader = DictDataLoader({
"test_play.yml": """
- hosts: all
gather_facts: false
tasks:
- block:
- block:
- block:
- block:
- block:
- debug: msg="this is the first task"
- ping:
rescue:
- block:
- block:
- block:
- block:
- debug: msg="this is the rescue task"
always:
- block:
- block:
- block:
- block:
- debug: msg="this is the always task"
""",
})
mock_var_manager = MagicMock()
mock_var_manager._fact_cache = dict()
mock_var_manager.get_vars.return_value = dict()
p = Playbook.load('test_play.yml', loader=fake_loader, variable_manager=mock_var_manager)
hosts = []
for i in range(0, 10):
host = MagicMock()
host.name = host.get_name.return_value = 'host%02d' % i
hosts.append(host)
inventory = MagicMock()
inventory.get_hosts.return_value = hosts
inventory.filter_hosts.return_value = hosts
play_context = PlayContext(play=p._entries[0])
itr = PlayIterator(
inventory=inventory,
play=p._entries[0],
play_context=play_context,
variable_manager=mock_var_manager,
all_vars=dict(),
)
# implicit meta: flush_handlers
(host_state, task) = itr.get_next_task_for_host(hosts[0])
self.assertIsNotNone(task)
self.assertEqual(task.action, 'meta')
self.assertEqual(task.args, dict(_raw_params='flush_handlers'))
# get the first task
(host_state, task) = itr.get_next_task_for_host(hosts[0])
self.assertIsNotNone(task)
self.assertEqual(task.action, 'debug')
self.assertEqual(task.args, dict(msg='this is the first task'))
# fail the host
itr.mark_host_failed(hosts[0])
# get the resuce task
(host_state, task) = itr.get_next_task_for_host(hosts[0])
self.assertIsNotNone(task)
self.assertEqual(task.action, 'debug')
self.assertEqual(task.args, dict(msg='this is the rescue task'))
# get the always task
(host_state, task) = itr.get_next_task_for_host(hosts[0])
self.assertIsNotNone(task)
self.assertEqual(task.action, 'debug')
self.assertEqual(task.args, dict(msg='this is the always task'))
# implicit meta: flush_handlers
(host_state, task) = itr.get_next_task_for_host(hosts[0])
self.assertIsNotNone(task)
self.assertEqual(task.action, 'meta')
self.assertEqual(task.args, dict(_raw_params='flush_handlers'))
# implicit meta: flush_handlers
(host_state, task) = itr.get_next_task_for_host(hosts[0])
self.assertIsNotNone(task)
self.assertEqual(task.action, 'meta')
self.assertEqual(task.args, dict(_raw_params='flush_handlers'))
# end of iteration
(host_state, task) = itr.get_next_task_for_host(hosts[0])
self.assertIsNone(task)
def test_play_iterator_add_tasks(self):
fake_loader = DictDataLoader({
'test_play.yml': """
- hosts: all
gather_facts: no
tasks:
- debug: msg="dummy task"
""",
})
mock_var_manager = MagicMock()
mock_var_manager._fact_cache = dict()
mock_var_manager.get_vars.return_value = dict()
p = Playbook.load('test_play.yml', loader=fake_loader, variable_manager=mock_var_manager)
hosts = []
for i in range(0, 10):
host = MagicMock()
host.name = host.get_name.return_value = 'host%02d' % i
hosts.append(host)
inventory = MagicMock()
inventory.get_hosts.return_value = hosts
inventory.filter_hosts.return_value = hosts
play_context = PlayContext(play=p._entries[0])
itr = PlayIterator(
inventory=inventory,
play=p._entries[0],
play_context=play_context,
variable_manager=mock_var_manager,
all_vars=dict(),
)
# test the high-level add_tasks() method
s = HostState(blocks=[0, 1, 2])
itr._insert_tasks_into_state = MagicMock(return_value=s)
itr.add_tasks(hosts[0], [MagicMock(), MagicMock(), MagicMock()])
self.assertEqual(itr._host_states[hosts[0].name], s)
# now actually test the lower-level method that does the work
itr = PlayIterator(
inventory=inventory,
play=p._entries[0],
play_context=play_context,
variable_manager=mock_var_manager,
all_vars=dict(),
)
# iterate past first task
_, task = itr.get_next_task_for_host(hosts[0])
while(task and task.action != 'debug'):
_, task = itr.get_next_task_for_host(hosts[0])
if task is None:
raise Exception("iterated past end of play while looking for place to insert tasks")
# get the current host state and copy it so we can mutate it
s = itr.get_host_state(hosts[0])
s_copy = s.copy()
# assert with an empty task list, or if we're in a failed state, we simply return the state as-is
res_state = itr._insert_tasks_into_state(s_copy, task_list=[])
self.assertEqual(res_state, s_copy)
s_copy.fail_state = itr.FAILED_TASKS
res_state = itr._insert_tasks_into_state(s_copy, task_list=[MagicMock()])
self.assertEqual(res_state, s_copy)
# but if we've failed with a rescue/always block
mock_task = MagicMock()
s_copy.run_state = itr.ITERATING_RESCUE
res_state = itr._insert_tasks_into_state(s_copy, task_list=[mock_task])
self.assertEqual(res_state, s_copy)
self.assertIn(mock_task, res_state._blocks[res_state.cur_block].rescue)
itr._host_states[hosts[0].name] = res_state
(next_state, next_task) = itr.get_next_task_for_host(hosts[0], peek=True)
self.assertEqual(next_task, mock_task)
itr._host_states[hosts[0].name] = s
# test a regular insertion
s_copy = s.copy()
res_state = itr._insert_tasks_into_state(s_copy, task_list=[MagicMock()])
| gpl-3.0 |
ioangogo/Suntimes | suntimes.py | 1 | 1345 | #! /bin/python
# -*- coding: UTF-8 -*-
import urllib2, json, datetime, time
import dateutil.parser
global latitude
global longitude
api=json.loads(urllib2.urlopen("http://freegeoip.net/json/").read().decode("UTF-8"))
latitude=str(api['latitude'])
longitude=str(api["longitude"])
def getsunrise(lat="", lng="", formatted=1):
if lat=="" or lng == "":
lat=latitude
lng=longitude
url="http://api.sunrise-sunset.org/json?lat=" + lat + "&lng=" + lng + "&formatted=" + str(formatted)
print url
sunapi=urllib2.urlopen(url)
return json.loads(sunapi.read().decode("UTF-8"))['results']['sunrise']
def getsunset(lat="", lng="", formatted="1"):
if lat=="" or lng == "":
lat=latitude
lng=longitude
sunapi=urllib2.urlopen("http://api.sunrise-sunset.org/json?lat=" + lat + "&lng=" + lng + "&formatted=" + str(formatted))
return json.loads(sunapi.read().decode("UTF-8"))['results']['sunset']
def nighttrue(lat="", lng=""):
sunrise = dateutil.parser.parse(getsunrise(lat, lng, 0).replace("+00:00",""))
sunset = dateutil.parser.parse(getsunset(lat, lng, 0).replace("+00:00",""))
timenow = datetime.datetime.now()
if sunrise >= timenow >= sunset ==False:
return False
else:
return True
if __name__ == '__main__':
bools=nighttrue()
if bools == True:
print "night time"
elif bools == False:
print "day"
else:
print bools
| bsd-3-clause |
feibaliang/blog | node_modules/pygmentize-bundled/vendor/pygments/ez_setup.py | 181 | 9709 | #!python
"""Bootstrap setuptools installation
If you want to use setuptools in your package's setup.py, just include this
file in the same directory with it, and add this to the top of your setup.py::
from ez_setup import use_setuptools
use_setuptools()
If you want to require a specific version of setuptools, set a download
mirror, or use an alternate download directory, you can do so by supplying
the appropriate options to ``use_setuptools()``.
This file can also be run as a script to install or upgrade setuptools.
"""
import sys
DEFAULT_VERSION = "0.6c9"
DEFAULT_URL = "http://pypi.python.org/packages/%s/s/setuptools/" % sys.version[:3]
md5_data = {
'setuptools-0.6b1-py2.3.egg': '8822caf901250d848b996b7f25c6e6ca',
'setuptools-0.6b1-py2.4.egg': 'b79a8a403e4502fbb85ee3f1941735cb',
'setuptools-0.6b2-py2.3.egg': '5657759d8a6d8fc44070a9d07272d99b',
'setuptools-0.6b2-py2.4.egg': '4996a8d169d2be661fa32a6e52e4f82a',
'setuptools-0.6b3-py2.3.egg': 'bb31c0fc7399a63579975cad9f5a0618',
'setuptools-0.6b3-py2.4.egg': '38a8c6b3d6ecd22247f179f7da669fac',
'setuptools-0.6b4-py2.3.egg': '62045a24ed4e1ebc77fe039aa4e6f7e5',
'setuptools-0.6b4-py2.4.egg': '4cb2a185d228dacffb2d17f103b3b1c4',
'setuptools-0.6c1-py2.3.egg': 'b3f2b5539d65cb7f74ad79127f1a908c',
'setuptools-0.6c1-py2.4.egg': 'b45adeda0667d2d2ffe14009364f2a4b',
'setuptools-0.6c2-py2.3.egg': 'f0064bf6aa2b7d0f3ba0b43f20817c27',
'setuptools-0.6c2-py2.4.egg': '616192eec35f47e8ea16cd6a122b7277',
'setuptools-0.6c3-py2.3.egg': 'f181fa125dfe85a259c9cd6f1d7b78fa',
'setuptools-0.6c3-py2.4.egg': 'e0ed74682c998bfb73bf803a50e7b71e',
'setuptools-0.6c3-py2.5.egg': 'abef16fdd61955514841c7c6bd98965e',
'setuptools-0.6c4-py2.3.egg': 'b0b9131acab32022bfac7f44c5d7971f',
'setuptools-0.6c4-py2.4.egg': '2a1f9656d4fbf3c97bf946c0a124e6e2',
'setuptools-0.6c4-py2.5.egg': '8f5a052e32cdb9c72bcf4b5526f28afc',
'setuptools-0.6c5-py2.3.egg': 'ee9fd80965da04f2f3e6b3576e9d8167',
'setuptools-0.6c5-py2.4.egg': 'afe2adf1c01701ee841761f5bcd8aa64',
'setuptools-0.6c5-py2.5.egg': 'a8d3f61494ccaa8714dfed37bccd3d5d',
'setuptools-0.6c6-py2.3.egg': '35686b78116a668847237b69d549ec20',
'setuptools-0.6c6-py2.4.egg': '3c56af57be3225019260a644430065ab',
'setuptools-0.6c6-py2.5.egg': 'b2f8a7520709a5b34f80946de5f02f53',
'setuptools-0.6c7-py2.3.egg': '209fdf9adc3a615e5115b725658e13e2',
'setuptools-0.6c7-py2.4.egg': '5a8f954807d46a0fb67cf1f26c55a82e',
'setuptools-0.6c7-py2.5.egg': '45d2ad28f9750e7434111fde831e8372',
'setuptools-0.6c8-py2.3.egg': '50759d29b349db8cfd807ba8303f1902',
'setuptools-0.6c8-py2.4.egg': 'cba38d74f7d483c06e9daa6070cce6de',
'setuptools-0.6c8-py2.5.egg': '1721747ee329dc150590a58b3e1ac95b',
'setuptools-0.6c9-py2.3.egg': 'a83c4020414807b496e4cfbe08507c03',
'setuptools-0.6c9-py2.4.egg': '260a2be2e5388d66bdaee06abec6342a',
'setuptools-0.6c9-py2.5.egg': 'fe67c3e5a17b12c0e7c541b7ea43a8e6',
'setuptools-0.6c9-py2.6.egg': 'ca37b1ff16fa2ede6e19383e7b59245a',
}
import sys, os
try: from hashlib import md5
except ImportError: from md5 import md5
def _validate_md5(egg_name, data):
if egg_name in md5_data:
digest = md5(data).hexdigest()
if digest != md5_data[egg_name]:
print >>sys.stderr, (
"md5 validation of %s failed! (Possible download problem?)"
% egg_name
)
sys.exit(2)
return data
def use_setuptools(
version=DEFAULT_VERSION, download_base=DEFAULT_URL, to_dir=os.curdir,
download_delay=15
):
"""Automatically find/download setuptools and make it available on sys.path
`version` should be a valid setuptools version number that is available
as an egg for download under the `download_base` URL (which should end with
a '/'). `to_dir` is the directory where setuptools will be downloaded, if
it is not already available. If `download_delay` is specified, it should
be the number of seconds that will be paused before initiating a download,
should one be required. If an older version of setuptools is installed,
this routine will print a message to ``sys.stderr`` and raise SystemExit in
an attempt to abort the calling script.
"""
was_imported = 'pkg_resources' in sys.modules or 'setuptools' in sys.modules
def do_download():
egg = download_setuptools(version, download_base, to_dir, download_delay)
sys.path.insert(0, egg)
import setuptools; setuptools.bootstrap_install_from = egg
try:
import pkg_resources
except ImportError:
return do_download()
try:
pkg_resources.require("setuptools>="+version); return
except pkg_resources.VersionConflict, e:
if was_imported:
print >>sys.stderr, (
"The required version of setuptools (>=%s) is not available, and\n"
"can't be installed while this script is running. Please install\n"
" a more recent version first, using 'easy_install -U setuptools'."
"\n\n(Currently using %r)"
) % (version, e.args[0])
sys.exit(2)
else:
del pkg_resources, sys.modules['pkg_resources'] # reload ok
return do_download()
except pkg_resources.DistributionNotFound:
return do_download()
def download_setuptools(
version=DEFAULT_VERSION, download_base=DEFAULT_URL, to_dir=os.curdir,
delay = 15
):
"""Download setuptools from a specified location and return its filename
`version` should be a valid setuptools version number that is available
as an egg for download under the `download_base` URL (which should end
with a '/'). `to_dir` is the directory where the egg will be downloaded.
`delay` is the number of seconds to pause before an actual download attempt.
"""
import urllib2, shutil
egg_name = "setuptools-%s-py%s.egg" % (version,sys.version[:3])
url = download_base + egg_name
saveto = os.path.join(to_dir, egg_name)
src = dst = None
if not os.path.exists(saveto): # Avoid repeated downloads
try:
from distutils import log
if delay:
log.warn("""
---------------------------------------------------------------------------
This script requires setuptools version %s to run (even to display
help). I will attempt to download it for you (from
%s), but
you may need to enable firewall access for this script first.
I will start the download in %d seconds.
(Note: if this machine does not have network access, please obtain the file
%s
and place it in this directory before rerunning this script.)
---------------------------------------------------------------------------""",
version, download_base, delay, url
); from time import sleep; sleep(delay)
log.warn("Downloading %s", url)
src = urllib2.urlopen(url)
# Read/write all in one block, so we don't create a corrupt file
# if the download is interrupted.
data = _validate_md5(egg_name, src.read())
dst = open(saveto,"wb"); dst.write(data)
finally:
if src: src.close()
if dst: dst.close()
return os.path.realpath(saveto)
def main(argv, version=DEFAULT_VERSION):
"""Install or upgrade setuptools and EasyInstall"""
try:
import setuptools
except ImportError:
egg = None
try:
egg = download_setuptools(version, delay=0)
sys.path.insert(0,egg)
from setuptools.command.easy_install import main
return main(list(argv)+[egg]) # we're done here
finally:
if egg and os.path.exists(egg):
os.unlink(egg)
else:
if setuptools.__version__ == '0.0.1':
print >>sys.stderr, (
"You have an obsolete version of setuptools installed. Please\n"
"remove it from your system entirely before rerunning this script."
)
sys.exit(2)
req = "setuptools>="+version
import pkg_resources
try:
pkg_resources.require(req)
except pkg_resources.VersionConflict:
try:
from setuptools.command.easy_install import main
except ImportError:
from easy_install import main
main(list(argv)+[download_setuptools(delay=0)])
sys.exit(0) # try to force an exit
else:
if argv:
from setuptools.command.easy_install import main
main(argv)
else:
print "Setuptools version",version,"or greater has been installed."
print '(Run "ez_setup.py -U setuptools" to reinstall or upgrade.)'
def update_md5(filenames):
"""Update our built-in md5 registry"""
import re
for name in filenames:
base = os.path.basename(name)
f = open(name,'rb')
md5_data[base] = md5(f.read()).hexdigest()
f.close()
data = [" %r: %r,\n" % it for it in md5_data.items()]
data.sort()
repl = "".join(data)
import inspect
srcfile = inspect.getsourcefile(sys.modules[__name__])
f = open(srcfile, 'rb'); src = f.read(); f.close()
match = re.search("\nmd5_data = {\n([^}]+)}", src)
if not match:
print >>sys.stderr, "Internal error!"
sys.exit(2)
src = src[:match.start(1)] + repl + src[match.end(1):]
f = open(srcfile,'w')
f.write(src)
f.close()
if __name__=='__main__':
if len(sys.argv)>2 and sys.argv[1]=='--md5update':
update_md5(sys.argv[2:])
else:
main(sys.argv[1:])
| mit |
GustavoHennig/ansible | lib/ansible/modules/network/nxos/nxos_vxlan_vtep_vni.py | 21 | 15423 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: nxos_vxlan_vtep_vni
extends_documentation_fragment: nxos
version_added: "2.2"
short_description: Creates a Virtual Network Identifier member (VNI)
description:
- Creates a Virtual Network Identifier member (VNI) for an NVE
overlay interface.
author: Gabriele Gerbino (@GGabriele)
notes:
- default, where supported, restores params default value.
options:
interface:
description:
- Interface name for the VXLAN Network Virtualization Endpoint.
required: true
vni:
description:
- ID of the Virtual Network Identifier.
required: true
assoc_vrf:
description:
- This attribute is used to identify and separate processing VNIs
that are associated with a VRF and used for routing. The VRF
and VNI specified with this command must match the configuration
of the VNI under the VRF.
required: false
choices: ['true','false']
default: null
ingress_replication:
description:
- Specifies mechanism for host reachability advertisement.
required: false
choices: ['bgp','static']
default: null
multicast_group:
description:
- The multicast group (range) of the VNI. Valid values are
string and keyword 'default'.
required: false
default: null
peer_list:
description:
- Set the ingress-replication static peer list. Valid values
are an array, a space-separated string of ip addresses,
or the keyword 'default'.
required: false
default: null
suppress_arp:
description:
- Suppress arp under layer 2 VNI.
required: false
choices: ['true','false']
default: null
state:
description:
- Determines whether the config should be present or not
on the device.
required: false
default: present
choices: ['present','absent']
include_defaults:
description:
- Specify to use or not the complete running configuration
for module operations.
required: false
default: true
choices: ['true','true']
config:
description:
- Configuration string to be used for module operations. If not
specified, the module will use the current running configuration.
required: false
default: null
save:
description:
- Specify to save the running configuration after
module operations.
required: false
default: false
choices: ['true','false']
'''
EXAMPLES = '''
- nxos_vxlan_vtep_vni:
interface: nve1
vni: 6000
ingress_replication: default
username: "{{ un }}"
password: "{{ pwd }}"
host: "{{ inventory_hostname }}"
'''
RETURN = '''
proposed:
description: k/v pairs of parameters passed into module
returned: verbose mode
type: dict
sample: {"ingress_replication": "default", "interface": "nve1", "vni": "6000"}
existing:
description: k/v pairs of existing configuration
returned: verbose mode
type: dict
sample: {}
end_state:
description: k/v pairs of configuration after module execution
returned: verbose mode
type: dict
sample: {"assoc_vrf": false, "ingress_replication": "", "interface": "nve1",
"multicast_group": "", "peer_list": [],
"suppress_arp": false, "vni": "6000"}
updates:
description: commands sent to the device
returned: always
type: list
sample: ["interface nve1", "member vni 6000"]
changed:
description: check to see if a change was made on the device
returned: always
type: boolean
sample: true
'''
import re
from ansible.module_utils.nxos import get_config, load_config, run_commands
from ansible.module_utils.nxos import nxos_argument_spec, check_args
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.netcfg import CustomNetworkConfig
BOOL_PARAMS = ['suppress_arp']
PARAM_TO_COMMAND_KEYMAP = {
'assoc_vrf': 'associate-vrf',
'interface': 'interface',
'vni': 'member vni',
'ingress_replication': 'ingress-replication protocol',
'multicast_group': 'mcast-group',
'peer_list': 'peer-ip',
'suppress_arp': 'suppress-arp'
}
PARAM_TO_DEFAULT_KEYMAP = {}
WARNINGS = []
def invoke(name, *args, **kwargs):
func = globals().get(name)
if func:
return func(*args, **kwargs)
def get_value(arg, config, module):
if arg in BOOL_PARAMS:
REGEX = re.compile(r'\s+{0}\s*$'.format(PARAM_TO_COMMAND_KEYMAP[arg]), re.M)
value = False
try:
if REGEX.search(config):
value = True
except TypeError:
value = False
else:
REGEX = re.compile(r'(?:{0}\s)(?P<value>.*)$'.format(PARAM_TO_COMMAND_KEYMAP[arg]), re.M)
value = ''
if PARAM_TO_COMMAND_KEYMAP[arg] in config:
value = REGEX.search(config).group('value')
return value
def check_interface(module, netcfg):
config = str(netcfg)
REGEX = re.compile(r'(?:interface nve)(?P<value>.*)$', re.M)
value = ''
if 'interface nve' in config:
value = 'nve{0}'.format(REGEX.search(config).group('value'))
return value
def get_custom_value(arg, config, module):
splitted_config = config.splitlines()
if arg == 'assoc_vrf':
value = False
if 'associate-vrf' in config:
value = True
elif arg == 'peer_list':
value = []
REGEX = re.compile(r'(?:peer-ip\s)(?P<peer_value>.*)$', re.M)
for line in splitted_config:
peer_value = ''
if PARAM_TO_COMMAND_KEYMAP[arg] in line:
peer_value = REGEX.search(line).group('peer_value')
if peer_value:
value.append(peer_value)
return value
def get_existing(module, args):
existing = {}
netcfg = get_config(module)
custom = [
'assoc_vrf',
'peer_list'
]
interface_exist = check_interface(module, netcfg)
if interface_exist:
parents = ['interface {0}'.format(interface_exist)]
temp_config = netcfg.get_section(parents)
if 'member vni {0} associate-vrf'.format(module.params['vni']) in temp_config:
parents.append('member vni {0} associate-vrf'.format(
module.params['vni']))
config = netcfg.get_section(parents)
elif "member vni {0}".format(module.params['vni']) in temp_config:
parents.append('member vni {0}'.format(module.params['vni']))
config = netcfg.get_section(parents)
else:
config = {}
if config:
for arg in args:
if arg not in ['interface', 'vni']:
if arg in custom:
existing[arg] = get_custom_value(arg, config, module)
else:
existing[arg] = get_value(arg, config, module)
existing['interface'] = interface_exist
existing['vni'] = module.params['vni']
return existing, interface_exist
def apply_key_map(key_map, table):
new_dict = {}
for key, value in table.items():
new_key = key_map.get(key)
if new_key:
value = table.get(key)
if value:
new_dict[new_key] = value
else:
new_dict[new_key] = value
return new_dict
def state_present(module, existing, proposed, candidate):
commands = list()
proposed_commands = apply_key_map(PARAM_TO_COMMAND_KEYMAP, proposed)
existing_commands = apply_key_map(PARAM_TO_COMMAND_KEYMAP, existing)
for key, value in proposed_commands.items():
if key == 'associate-vrf':
command = 'member vni {0} {1}'.format(module.params['vni'], key)
if value:
commands.append(command)
else:
commands.append('no {0}'.format(command))
elif key == 'peer-ip' and value != 'default':
for peer in value:
commands.append('{0} {1}'.format(key, peer))
elif value is True:
commands.append(key)
elif value is False:
commands.append('no {0}'.format(key))
elif value == 'default':
if existing_commands.get(key):
existing_value = existing_commands.get(key)
if key == 'peer-ip':
for peer in existing_value:
commands.append('no {0} {1}'.format(key, peer))
else:
commands.append('no {0} {1}'.format(key, existing_value))
else:
if key.replace(' ', '_').replace('-', '_') in BOOL_PARAMS:
commands.append('no {0}'.format(key.lower()))
else:
command = '{0} {1}'.format(key, value.lower())
commands.append(command)
if commands:
vni_command = 'member vni {0}'.format(module.params['vni'])
ingress_replication_command = 'ingress-replication protocol static'
interface_command = 'interface {0}'.format(module.params['interface'])
if ingress_replication_command in commands:
static_level_cmds = [cmd for cmd in commands if 'peer' in cmd]
parents = [interface_command, vni_command, ingress_replication_command]
candidate.add(static_level_cmds, parents=parents)
commands = [cmd for cmd in commands if 'peer' not in cmd]
if vni_command in commands:
parents = [interface_command]
commands.remove(vni_command)
if module.params['assoc_vrf'] is None:
parents.append(vni_command)
candidate.add(commands, parents=parents)
def state_absent(module, existing, proposed, candidate):
if existing['assoc_vrf']:
commands = ['no member vni {0} associate-vrf'.format(
module.params['vni'])]
else:
commands = ['no member vni {0}'.format(module.params['vni'])]
parents = ['interface {0}'.format(module.params['interface'])]
candidate.add(commands, parents=parents)
def main():
argument_spec = dict(
interface=dict(required=True, type='str'),
vni=dict(required=True, type='str'),
assoc_vrf=dict(required=False, type='bool'),
multicast_group=dict(required=False, type='str'),
peer_list=dict(required=False, type='list'),
suppress_arp=dict(required=False, type='bool'),
ingress_replication=dict(required=False, type='str',
choices=['bgp', 'static', 'default']),
state=dict(choices=['present', 'absent'], default='present',
required=False),
include_defaults=dict(default=True),
config=dict(),
save=dict(type='bool', default=False)
)
argument_spec.update(nxos_argument_spec)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
warnings = list()
check_args(module, warnings)
if module.params['assoc_vrf']:
mutually_exclusive_params = ['multicast_group',
'suppress_arp',
'ingress_replication']
for param in mutually_exclusive_params:
if module.params[param]:
module.fail_json(msg='assoc_vrf cannot be used with '
'{0} param'.format(param))
if module.params['peer_list']:
if module.params['ingress_replication'] != 'static':
module.fail_json(msg='ingress_replication=static is required '
'when using peer_list param')
else:
peer_list = module.params['peer_list']
if peer_list[0] == 'default':
module.params['peer_list'] = 'default'
else:
stripped_peer_list = map(str.strip, peer_list)
module.params['peer_list'] = stripped_peer_list
state = module.params['state']
args = [
'assoc_vrf',
'interface',
'vni',
'ingress_replication',
'multicast_group',
'peer_list',
'suppress_arp'
]
existing, interface_exist = invoke('get_existing', module, args)
end_state = existing
proposed_args = dict((k, v) for k, v in module.params.items()
if v is not None and k in args)
proposed = {}
for key, value in proposed_args.items():
if key != 'interface':
if str(value).lower() == 'default':
value = PARAM_TO_DEFAULT_KEYMAP.get(key)
if value is None:
value = 'default'
if existing.get(key) or (not existing.get(key) and value):
proposed[key] = value
result = {}
if state == 'present' or (state == 'absent' and existing):
if not interface_exist:
WARNINGS.append("The proposed NVE interface does not exist. "
"Use nxos_interface to create it first.")
elif interface_exist != module.params['interface']:
module.fail_json(msg='Only 1 NVE interface is allowed on '
'the switch.')
elif (existing and state == 'absent' and
existing['vni'] != module.params['vni']):
module.fail_json(msg="ERROR: VNI delete failed: Could not find"
" vni node for {0}".format(
module.params['vni']),
existing_vni=existing['vni'])
else:
candidate = CustomNetworkConfig(indent=3)
invoke('state_%s' % state, module, existing, proposed, candidate)
try:
response = load_config(module, candidate)
result.update(response)
except ShellError:
exc = get_exception()
module.fail_json(msg=str(exc))
else:
result['updates'] = []
if module._verbosity > 0:
end_state, interface_exist = invoke('get_existing', module, args)
result['end_state'] = end_state
result['existing'] = existing
result['proposed'] = proposed_args
if WARNINGS:
result['warnings'] = WARNINGS
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
veger/ansible | lib/ansible/parsing/quoting.py | 241 | 1141 | # (c) 2014 James Cammarata, <jcammarata@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
def is_quoted(data):
return len(data) > 1 and data[0] == data[-1] and data[0] in ('"', "'") and data[-2] != '\\'
def unquote(data):
''' removes first and last quotes from a string, if the string starts and ends with the same quotes '''
if is_quoted(data):
return data[1:-1]
return data
| gpl-3.0 |
CMPUT410W15T02/CMPUT410W15-project | testenv/lib/python2.7/site-packages/distribute-0.6.24-py2.7.egg/site.py | 108 | 2362 | def __boot():
import sys, imp, os, os.path
PYTHONPATH = os.environ.get('PYTHONPATH')
if PYTHONPATH is None or (sys.platform=='win32' and not PYTHONPATH):
PYTHONPATH = []
else:
PYTHONPATH = PYTHONPATH.split(os.pathsep)
pic = getattr(sys,'path_importer_cache',{})
stdpath = sys.path[len(PYTHONPATH):]
mydir = os.path.dirname(__file__)
#print "searching",stdpath,sys.path
for item in stdpath:
if item==mydir or not item:
continue # skip if current dir. on Windows, or my own directory
importer = pic.get(item)
if importer is not None:
loader = importer.find_module('site')
if loader is not None:
# This should actually reload the current module
loader.load_module('site')
break
else:
try:
stream, path, descr = imp.find_module('site',[item])
except ImportError:
continue
if stream is None:
continue
try:
# This should actually reload the current module
imp.load_module('site',stream,path,descr)
finally:
stream.close()
break
else:
raise ImportError("Couldn't find the real 'site' module")
#print "loaded", __file__
known_paths = dict([(makepath(item)[1],1) for item in sys.path]) # 2.2 comp
oldpos = getattr(sys,'__egginsert',0) # save old insertion position
sys.__egginsert = 0 # and reset the current one
for item in PYTHONPATH:
addsitedir(item)
sys.__egginsert += oldpos # restore effective old position
d,nd = makepath(stdpath[0])
insert_at = None
new_path = []
for item in sys.path:
p,np = makepath(item)
if np==nd and insert_at is None:
# We've hit the first 'system' path entry, so added entries go here
insert_at = len(new_path)
if np in known_paths or insert_at is None:
new_path.append(item)
else:
# new path after the insert point, back-insert it
new_path.insert(insert_at, item)
insert_at += 1
sys.path[:] = new_path
if __name__=='site':
__boot()
del __boot
| gpl-2.0 |
rishig/zulip | zerver/management/commands/generate_realm_creation_link.py | 1 | 1423 |
from typing import Any
from django.db import ProgrammingError
from confirmation.models import generate_realm_creation_url
from zerver.lib.management import ZulipBaseCommand, CommandError
from zerver.models import Realm
class Command(ZulipBaseCommand):
help = """
Outputs a randomly generated, 1-time-use link for Organization creation.
Whoever visits the link can create a new organization on this server, regardless of whether
settings.OPEN_REALM_CREATION is enabled. The link would expire automatically after
settings.REALM_CREATION_LINK_VALIDITY_DAYS.
Usage: ./manage.py generate_realm_creation_link """
def handle(self, *args: Any, **options: Any) -> None:
try:
# first check if the db has been initalized
Realm.objects.first()
except ProgrammingError:
raise CommandError("The Zulip database does not appear to exist. "
"Have you run initialize-database?")
url = generate_realm_creation_url(by_admin=True)
self.stdout.write(self.style.SUCCESS("Please visit the following "
"secure single-use link to register your "))
self.stdout.write(self.style.SUCCESS("new Zulip organization:\033[0m"))
self.stdout.write("")
self.stdout.write(self.style.SUCCESS(" \033[1;92m%s\033[0m" % (url,)))
self.stdout.write("")
| apache-2.0 |
mammadori/asteroids | game/physicalobject.py | 2 | 4916 | # -*- coding: utf-8 *-*
import pyglet
from . import util, resources
# update and collision helper functions
def process_sprite_group(group, dt):
"""
calls update for the whole group and removes after who returns True
"""
for item in set(group):
try:
if item.update(dt):
# remove expired items
group.remove(item)
item.delete()
except AttributeError:
try:
group.remove(item)
except KeyError:
pass
continue
def group_collide(group, other_object):
"""
Check collision between a group and another object
returns how many object collided
removes the collided object in the group and calls
method "destroy" in them
"""
collided = set()
for item in set(group):
try:
if item.collide(other_object):
collided.add(item.destroy())
group.remove(item)
item.delete() # free batch
except AttributeError:
continue
# remove collide objects from group
return collided
def group_group_collide(group1, group2):
"""
For each item in group1 calls group collide
if a collision happened destroy the item
"""
collided = set()
for item in set(group1):
c = group_collide(group2, item)
if len(c) > 0:
# do not destroy
collided.update(c)
try:
group1.remove(item)
item.delete() # free batch
except AttributeError:
continue
return collided
class MovingSprite(pyglet.sprite.Sprite):
"""A sprite with physical properties such as velocity, and angle velocity"""
def __init__(self, rotation=0, vel=(0,0), rotation_speed=0, screensize=(800, 600), *args, **kwargs):
super().__init__(*args, **kwargs)
self.screensize = screensize
# Velocity
self.vel = list(vel)
# Angle (pyglet uses negative degrees)
self.rotation = rotation
self.rotation_speed = rotation_speed
self.should_delete = False
def update(self, dt):
if self.should_delete:
self.delete()
return True
# rotate object
self.rotation += self.rotation_speed * dt
# Update position according to velocity and time
self.x = (self.x + self.vel[0] * dt) % (self.screensize[0] + self.width)
self.y = (self.y + self.vel[1] * dt) % (self.screensize[1] + self.height)
# update methods could be checked for expiring
return False
class ScaledMovingSprite(MovingSprite):
"""A Fullscreen Moving sprite"""
def __init__(self, radius=None, lifespan=float("inf"), *args, **kwargs):
"""
Interesting super() params: rotation=0, vel=(0,0), rotation_speed=0, screensize=(800, 600)
"""
super().__init__(*args, **kwargs)
self.scale = self.screensize[0] / self.image.width
class PhysicalObject(MovingSprite):
"""A Moving sprite with collision and expiring"""
def __init__(self, radius=None, lifespan="inf", *args, **kwargs):
"""
Interesting super() params: rotation=0, vel=(0,0), rotation_speed=0, screensize=(800, 600)
"""
super().__init__(*args, **kwargs)
# collision radius
if radius:
self.radius = radius
else:
self.radius = (max(self.width, self.height) / 2) * self.scale
# track how much it should last before disappearing
self.lifespan = float(lifespan)
self.age = float(0)
def update(self, dt):
self.age += dt
# age the object
return super().update(dt) or (self.age > self.lifespan) # update could be checked for expiring
def collide(self, other_object):
"""Determine if this object collides with another"""
# Calculate distance between object centers that would be a collision,
# assuming circular images
collision_distance = self.radius * self.scale + other_object.radius * other_object.scale
# Get distance using position tuples
actual_distance = util.distance(self.position, other_object.position)
# update methods could be checked for expiring
return (actual_distance <= collision_distance)
def destroy(self):
pos = list(self.position)
vel = (self.vel[0]/2, self.vel[1] / 2)
explosion = MovingSprite(img=resources.explosion_animation,
vel=vel, screensize=self.screensize, x=pos[0], y=pos[1],
batch=self.batch, group=self.group)
# monkey patching done well
@explosion.event
def on_animation_end():
explosion.visible = False
explosion.should_delete = True
resources.explosion_sound.play()
return explosion
| bsd-3-clause |
andreaso/ansible | lib/ansible/modules/cloud/amazon/ec2_ami.py | 51 | 21956 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['stableinterface'],
'supported_by': 'curated'}
DOCUMENTATION = '''
---
module: ec2_ami
version_added: "1.3"
short_description: create or destroy an image in ec2
description:
- Creates or deletes ec2 images.
options:
instance_id:
description:
- Instance ID to create the AMI from.
required: false
default: null
name:
description:
- The name of the new AMI.
required: false
default: null
architecture:
version_added: "2.3"
description:
- The target architecture of the image to register
required: false
default: null
kernel_id:
version_added: "2.3"
description:
- The target kernel id of the image to register
required: false
default: null
virtualization_type:
version_added: "2.3"
description:
- The virtualization type of the image to register
required: false
default: null
root_device_name:
version_added: "2.3"
description:
- The root device name of the image to register
required: false
default: null
wait:
description:
- Wait for the AMI to be in state 'available' before returning.
required: false
default: "no"
choices: [ "yes", "no" ]
wait_timeout:
description:
- How long before wait gives up, in seconds.
default: 300
state:
description:
- Create or deregister/delete AMI.
required: false
default: 'present'
choices: [ "absent", "present" ]
description:
description:
- Human-readable string describing the contents and purpose of the AMI.
required: false
default: null
no_reboot:
description:
- Flag indicating that the bundling process should not attempt to shutdown the instance before bundling. If this flag is True, the
responsibility of maintaining file system integrity is left to the owner of the instance.
required: false
default: no
choices: [ "yes", "no" ]
image_id:
description:
- Image ID to be deregistered.
required: false
default: null
device_mapping:
version_added: "2.0"
description:
- List of device hashes/dictionaries with custom configurations (same block-device-mapping parameters)
- >
Valid properties include: device_name, volume_type, size (in GB), delete_on_termination (boolean), no_device (boolean),
snapshot_id, iops (for io1 volume_type)
required: false
default: null
delete_snapshot:
description:
- Delete snapshots when deregistering the AMI.
required: false
default: "no"
choices: [ "yes", "no" ]
tags:
description:
- A dictionary of tags to add to the new image; '{"key":"value"}' and '{"key":"value","key":"value"}'
required: false
default: null
version_added: "2.0"
launch_permissions:
description:
- Users and groups that should be able to launch the AMI. Expects
dictionary with a key of user_ids and/or group_names. user_ids should
be a list of account ids. group_name should be a list of groups, "all"
is the only acceptable value currently.
required: false
default: null
version_added: "2.0"
author:
- "Evan Duffield (@scicoin-project) <eduffield@iacquire.com>"
- "Constantin Bugneac (@Constantin07) <constantin.bugneac@endava.com>"
- "Ross Williams (@gunzy83) <gunzy83au@gmail.com>"
extends_documentation_fragment:
- aws
- ec2
'''
# Thank you to iAcquire for sponsoring development of this module.
EXAMPLES = '''
# Basic AMI Creation
- ec2_ami:
aws_access_key: xxxxxxxxxxxxxxxxxxxxxxx
aws_secret_key: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
instance_id: i-xxxxxx
wait: yes
name: newtest
tags:
Name: newtest
Service: TestService
register: image
# Basic AMI Creation, without waiting
- ec2_ami:
aws_access_key: xxxxxxxxxxxxxxxxxxxxxxx
aws_secret_key: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
region: xxxxxx
instance_id: i-xxxxxx
wait: no
name: newtest
register: image
# AMI Registration from EBS Snapshot
- ec2_ami:
aws_access_key: xxxxxxxxxxxxxxxxxxxxxxx
aws_secret_key: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
region: xxxxxx
name: newtest
state: present
architecture: x86_64
virtualization_type: hvm
root_device_name: /dev/xvda
device_mapping:
- device_name: /dev/xvda
size: 8
snapshot_id: snap-xxxxxxxx
delete_on_termination: true
volume_type: gp2
register: image
# AMI Creation, with a custom root-device size and another EBS attached
- ec2_ami:
aws_access_key: xxxxxxxxxxxxxxxxxxxxxxx
aws_secret_key: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
instance_id: i-xxxxxx
name: newtest
device_mapping:
- device_name: /dev/sda1
size: XXX
delete_on_termination: true
volume_type: gp2
- device_name: /dev/sdb
size: YYY
delete_on_termination: false
volume_type: gp2
register: image
# AMI Creation, excluding a volume attached at /dev/sdb
- ec2_ami:
aws_access_key: xxxxxxxxxxxxxxxxxxxxxxx
aws_secret_key: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
instance_id: i-xxxxxx
name: newtest
device_mapping:
- device_name: /dev/sda1
size: XXX
delete_on_termination: true
volume_type: gp2
- device_name: /dev/sdb
no_device: yes
register: image
# Deregister/Delete AMI (keep associated snapshots)
- ec2_ami:
aws_access_key: xxxxxxxxxxxxxxxxxxxxxxx
aws_secret_key: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
region: xxxxxx
image_id: "{{ instance.image_id }}"
delete_snapshot: False
state: absent
# Deregister AMI (delete associated snapshots too)
- ec2_ami:
aws_access_key: xxxxxxxxxxxxxxxxxxxxxxx
aws_secret_key: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
region: xxxxxx
image_id: "{{ instance.image_id }}"
delete_snapshot: True
state: absent
# Update AMI Launch Permissions, making it public
- ec2_ami:
aws_access_key: xxxxxxxxxxxxxxxxxxxxxxx
aws_secret_key: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
region: xxxxxx
image_id: "{{ instance.image_id }}"
state: present
launch_permissions:
group_names: ['all']
# Allow AMI to be launched by another account
- ec2_ami:
aws_access_key: xxxxxxxxxxxxxxxxxxxxxxx
aws_secret_key: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
region: xxxxxx
image_id: "{{ instance.image_id }}"
state: present
launch_permissions:
user_ids: ['123456789012']
'''
RETURN = '''
architecture:
description: architecture of image
returned: when AMI is created or already exists
type: string
sample: "x86_64"
block_device_mapping:
description: block device mapping associated with image
returned: when AMI is created or already exists
type: dict
sample: {
"/dev/sda1": {
"delete_on_termination": true,
"encrypted": false,
"size": 10,
"snapshot_id": "snap-1a03b80e7",
"volume_type": "standard"
}
}
creationDate:
description: creation date of image
returned: when AMI is created or already exists
type: string
sample: "2015-10-15T22:43:44.000Z"
description:
description: description of image
returned: when AMI is created or already exists
type: string
sample: "nat-server"
hypervisor:
description: type of hypervisor
returned: when AMI is created or already exists
type: string
sample: "xen"
image_id:
description: id of the image
returned: when AMI is created or already exists
type: string
sample: "ami-1234abcd"
is_public:
description: whether image is public
returned: when AMI is created or already exists
type: bool
sample: false
location:
description: location of image
returned: when AMI is created or already exists
type: string
sample: "315210894379/nat-server"
name:
description: ami name of image
returned: when AMI is created or already exists
type: string
sample: "nat-server"
ownerId:
description: owner of image
returned: when AMI is created or already exists
type: string
sample: "435210894375"
platform:
description: platform of image
returned: when AMI is created or already exists
type: string
sample: null
root_device_name:
description: root device name of image
returned: when AMI is created or already exists
type: string
sample: "/dev/sda1"
root_device_type:
description: root device type of image
returned: when AMI is created or already exists
type: string
sample: "ebs"
state:
description: state of image
returned: when AMI is created or already exists
type: string
sample: "available"
tags:
description: a dictionary of tags assigned to image
returned: when AMI is created or already exists
type: dict
sample: {
"Env": "devel",
"Name": "nat-server"
}
virtualization_type:
description: image virtualization type
returned: when AMI is created or already exists
type: string
sample: "hvm"
snapshots_deleted:
description: a list of snapshot ids deleted after deregistering image
returned: after AMI is deregistered, if 'delete_snapshot' is set to 'yes'
type: list
sample: [
"snap-fbcccb8f",
"snap-cfe7cdb4"
]
'''
import sys
import time
try:
import boto
import boto.ec2
from boto.ec2.blockdevicemapping import BlockDeviceType, BlockDeviceMapping
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
def get_block_device_mapping(image):
"""
Retrieves block device mapping from AMI
"""
bdm_dict = dict()
if image is not None and hasattr(image, 'block_device_mapping'):
bdm = getattr(image,'block_device_mapping')
for device_name in bdm.keys():
bdm_dict[device_name] = {
'size': bdm[device_name].size,
'snapshot_id': bdm[device_name].snapshot_id,
'volume_type': bdm[device_name].volume_type,
'encrypted': bdm[device_name].encrypted,
'delete_on_termination': bdm[device_name].delete_on_termination
}
return bdm_dict
def get_ami_info(image):
return dict(
image_id=image.id,
state=image.state,
architecture=image.architecture,
block_device_mapping=get_block_device_mapping(image),
creationDate=image.creationDate,
description=image.description,
hypervisor=image.hypervisor,
is_public=image.is_public,
location=image.location,
ownerId=image.ownerId,
root_device_name=image.root_device_name,
root_device_type=image.root_device_type,
tags=image.tags,
virtualization_type = image.virtualization_type
)
def create_image(module, ec2):
"""
Creates new AMI
module : AnsibleModule object
ec2: authenticated ec2 connection object
"""
instance_id = module.params.get('instance_id')
name = module.params.get('name')
wait = module.params.get('wait')
wait_timeout = int(module.params.get('wait_timeout'))
description = module.params.get('description')
architecture = module.params.get('architecture')
kernel_id = module.params.get('kernel_id')
root_device_name = module.params.get('root_device_name')
virtualization_type = module.params.get('virtualization_type')
no_reboot = module.params.get('no_reboot')
device_mapping = module.params.get('device_mapping')
tags = module.params.get('tags')
launch_permissions = module.params.get('launch_permissions')
try:
params = {'name': name,
'description': description}
images = ec2.get_all_images(filters={'name': name})
if images and images[0]:
# ensure that launch_permissions are up to date
update_image(module, ec2, images[0].id)
bdm = None
if device_mapping:
bdm = BlockDeviceMapping()
for device in device_mapping:
if 'device_name' not in device:
module.fail_json(msg = 'Device name must be set for volume')
device_name = device['device_name']
del device['device_name']
bd = BlockDeviceType(**device)
bdm[device_name] = bd
if instance_id:
params['instance_id'] = instance_id
params['no_reboot'] = no_reboot
if bdm:
params['block_device_mapping'] = bdm
image_id = ec2.create_image(**params)
else:
params['architecture'] = architecture
params['virtualization_type'] = virtualization_type
if kernel_id:
params['kernel_id'] = kernel_id
if root_device_name:
params['root_device_name'] = root_device_name
if bdm:
params['block_device_map'] = bdm
image_id = ec2.register_image(**params)
except boto.exception.BotoServerError as e:
module.fail_json(msg="%s: %s" % (e.error_code, e.error_message))
# Wait until the image is recognized. EC2 API has eventual consistency,
# such that a successful CreateImage API call doesn't guarantee the success
# of subsequent DescribeImages API call using the new image id returned.
for i in range(wait_timeout):
try:
img = ec2.get_image(image_id)
if img.state == 'available':
break
elif img.state == 'failed':
module.fail_json(msg="AMI creation failed, please see the AWS console for more details")
except boto.exception.EC2ResponseError as e:
if ('InvalidAMIID.NotFound' not in e.error_code and 'InvalidAMIID.Unavailable' not in e.error_code) and wait and i == wait_timeout - 1:
module.fail_json(msg="Error while trying to find the new image. Using wait=yes and/or a longer "
"wait_timeout may help. %s: %s" % (e.error_code, e.error_message))
finally:
time.sleep(1)
if img.state != 'available':
module.fail_json(msg="Error while trying to find the new image. Using wait=yes and/or a longer wait_timeout may help.")
if tags:
try:
ec2.create_tags(image_id, tags)
except boto.exception.EC2ResponseError as e:
module.fail_json(msg = "Image tagging failed => %s: %s" % (e.error_code, e.error_message))
if launch_permissions:
try:
img = ec2.get_image(image_id)
img.set_launch_permissions(**launch_permissions)
except boto.exception.BotoServerError as e:
module.fail_json(msg="%s: %s" % (e.error_code, e.error_message), image_id=image_id)
module.exit_json(msg="AMI creation operation complete", changed=True, **get_ami_info(img))
def deregister_image(module, ec2):
"""
Deregisters AMI
"""
image_id = module.params.get('image_id')
delete_snapshot = module.params.get('delete_snapshot')
wait = module.params.get('wait')
wait_timeout = int(module.params.get('wait_timeout'))
img = ec2.get_image(image_id)
if img is None:
module.fail_json(msg = "Image %s does not exist" % image_id, changed=False)
# Get all associated snapshot ids before deregistering image otherwise this information becomes unavailable
snapshots = []
if hasattr(img, 'block_device_mapping'):
for key in img.block_device_mapping:
snapshots.append(img.block_device_mapping[key].snapshot_id)
# When trying to re-delete already deleted image it doesn't raise an exception
# It just returns an object without image attributes
if hasattr(img, 'id'):
try:
params = {'image_id': image_id,
'delete_snapshot': delete_snapshot}
res = ec2.deregister_image(**params)
except boto.exception.BotoServerError as e:
module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))
else:
module.exit_json(msg = "Image %s has already been deleted" % image_id, changed=False)
# wait here until the image is gone
img = ec2.get_image(image_id)
wait_timeout = time.time() + wait_timeout
while wait and wait_timeout > time.time() and img is not None:
img = ec2.get_image(image_id)
time.sleep(3)
if wait and wait_timeout <= time.time():
# waiting took too long
module.fail_json(msg = "timed out waiting for image to be deregistered/deleted")
# Boto library has hardcoded the deletion of the snapshot for the root volume mounted as '/dev/sda1' only
# Make it possible to delete all snapshots which belong to image, including root block device mapped as '/dev/xvda'
if delete_snapshot:
try:
for snapshot_id in snapshots:
ec2.delete_snapshot(snapshot_id)
except boto.exception.BotoServerError as e:
if e.error_code == 'InvalidSnapshot.NotFound':
# Don't error out if root volume snapshot was already deleted as part of deregister_image
pass
module.exit_json(msg="AMI deregister/delete operation complete", changed=True, snapshots_deleted=snapshots)
else:
module.exit_json(msg="AMI deregister/delete operation complete", changed=True)
def update_image(module, ec2, image_id):
"""
Updates AMI
"""
launch_permissions = module.params.get('launch_permissions') or []
if 'user_ids' in launch_permissions:
launch_permissions['user_ids'] = [str(user_id) for user_id in launch_permissions['user_ids']]
img = ec2.get_image(image_id)
if img is None:
module.fail_json(msg = "Image %s does not exist" % image_id, changed=False)
try:
set_permissions = img.get_launch_permissions()
if set_permissions != launch_permissions:
if (('user_ids' in launch_permissions and launch_permissions['user_ids']) or
('group_names' in launch_permissions and launch_permissions['group_names'])):
res = img.set_launch_permissions(**launch_permissions)
elif ('user_ids' in set_permissions and set_permissions['user_ids']) or ('group_names' in set_permissions and set_permissions['group_names']):
res = img.remove_launch_permissions(**set_permissions)
else:
module.exit_json(msg="AMI not updated", launch_permissions=set_permissions, changed=False)
module.exit_json(msg="AMI launch permissions updated", launch_permissions=launch_permissions, set_perms=set_permissions, changed=True)
else:
module.exit_json(msg="AMI not updated", launch_permissions=set_permissions, changed=False)
except boto.exception.BotoServerError as e:
module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
instance_id = dict(),
image_id = dict(),
architecture = dict(default="x86_64"),
kernel_id = dict(),
virtualization_type = dict(default="hvm"),
root_device_name = dict(),
delete_snapshot = dict(default=False, type='bool'),
name = dict(),
wait = dict(type='bool', default=False),
wait_timeout = dict(default=900),
description = dict(default=""),
no_reboot = dict(default=False, type='bool'),
state = dict(default='present'),
device_mapping = dict(type='list'),
tags = dict(type='dict'),
launch_permissions = dict(type='dict')
)
)
module = AnsibleModule(argument_spec=argument_spec)
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
try:
ec2 = ec2_connect(module)
except Exception as e:
module.fail_json(msg="Error while connecting to aws: %s" % str(e))
if module.params.get('state') == 'absent':
if not module.params.get('image_id'):
module.fail_json(msg='image_id needs to be an ami image to registered/delete')
deregister_image(module, ec2)
elif module.params.get('state') == 'present':
if module.params.get('image_id') and module.params.get('launch_permissions'):
# Update image's launch permissions
update_image(module, ec2,module.params.get('image_id'))
# Changed is always set to true when provisioning new AMI
if not module.params.get('instance_id') and not module.params.get('device_mapping'):
module.fail_json(msg='instance_id or device_mapping (register from ebs snapshot) parameter is required for new image')
if not module.params.get('name'):
module.fail_json(msg='name parameter is required for new image')
create_image(module, ec2)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
if __name__ == '__main__':
main()
| gpl-3.0 |
albertz/music-player | mac/pyobjc-framework-Cocoa/PyObjCTest/test_nsuserdefaults.py | 3 | 2840 | from Foundation import *
from PyObjCTools.TestSupport import *
try:
unicode
except NameError:
unicode = str
class PythonListAsValue (TestCase):
def testSettingPythonList(self):
defaults = NSUserDefaults.standardUserDefaults()
defaults.setObject_forKey_([b'a'.decode('ascii'), b'b'.decode('ascii'), b'c'.decode('ascii')], b'randomKey'.decode('ascii'))
self.assertEqual(defaults.arrayForKey_(b'randomKey'.decode('ascii')), [b'a'.decode('ascii'), b'b'.decode('ascii'), b'c'.decode('ascii')])
def testMethods(self):
self.assertResultIsBOOL(NSUserDefaults.boolForKey_)
self.assertArgIsBOOL(NSUserDefaults.setBool_forKey_, 0)
self.assertResultIsBOOL(NSUserDefaults.synchronize)
self.assertResultIsBOOL(NSUserDefaults.objectIsForcedForKey_)
self.assertResultIsBOOL(NSUserDefaults.objectIsForcedForKey_inDomain_)
def testConstants(self):
self.assertIsInstance(NSGlobalDomain, unicode)
self.assertIsInstance(NSArgumentDomain, unicode)
self.assertIsInstance(NSRegistrationDomain, unicode)
self.assertIsInstance(NSUserDefaultsDidChangeNotification, unicode)
self.assertIsInstance(NSWeekDayNameArray, unicode)
self.assertIsInstance(NSShortWeekDayNameArray, unicode)
self.assertIsInstance(NSMonthNameArray, unicode)
self.assertIsInstance(NSShortMonthNameArray, unicode)
self.assertIsInstance(NSTimeFormatString, unicode)
self.assertIsInstance(NSDateFormatString, unicode)
self.assertIsInstance(NSTimeDateFormatString, unicode)
self.assertIsInstance(NSShortTimeDateFormatString, unicode)
self.assertIsInstance(NSCurrencySymbol, unicode)
self.assertIsInstance(NSDecimalSeparator, unicode)
self.assertIsInstance(NSThousandsSeparator, unicode)
self.assertIsInstance(NSDecimalDigits, unicode)
self.assertIsInstance(NSAMPMDesignation, unicode)
self.assertIsInstance(NSHourNameDesignations, unicode)
self.assertIsInstance(NSYearMonthWeekDesignations, unicode)
self.assertIsInstance(NSEarlierTimeDesignations, unicode)
self.assertIsInstance(NSLaterTimeDesignations, unicode)
self.assertIsInstance(NSThisDayDesignations, unicode)
self.assertIsInstance(NSNextDayDesignations, unicode)
self.assertIsInstance(NSNextNextDayDesignations, unicode)
self.assertIsInstance(NSPriorDayDesignations, unicode)
self.assertIsInstance(NSDateTimeOrdering, unicode)
self.assertIsInstance(NSInternationalCurrencyString, unicode)
self.assertIsInstance(NSShortDateFormatString, unicode)
self.assertIsInstance(NSPositiveCurrencyFormatString, unicode)
self.assertIsInstance(NSNegativeCurrencyFormatString, unicode)
if __name__ == "__main__":
main()
| bsd-2-clause |
kelseyoo14/Wander | venv_2_7/lib/python2.7/site-packages/numpy/fft/info.py | 34 | 7236 | """
Discrete Fourier Transform (:mod:`numpy.fft`)
=============================================
.. currentmodule:: numpy.fft
Standard FFTs
-------------
.. autosummary::
:toctree: generated/
fft Discrete Fourier transform.
ifft Inverse discrete Fourier transform.
fft2 Discrete Fourier transform in two dimensions.
ifft2 Inverse discrete Fourier transform in two dimensions.
fftn Discrete Fourier transform in N-dimensions.
ifftn Inverse discrete Fourier transform in N dimensions.
Real FFTs
---------
.. autosummary::
:toctree: generated/
rfft Real discrete Fourier transform.
irfft Inverse real discrete Fourier transform.
rfft2 Real discrete Fourier transform in two dimensions.
irfft2 Inverse real discrete Fourier transform in two dimensions.
rfftn Real discrete Fourier transform in N dimensions.
irfftn Inverse real discrete Fourier transform in N dimensions.
Hermitian FFTs
--------------
.. autosummary::
:toctree: generated/
hfft Hermitian discrete Fourier transform.
ihfft Inverse Hermitian discrete Fourier transform.
Helper routines
---------------
.. autosummary::
:toctree: generated/
fftfreq Discrete Fourier Transform sample frequencies.
rfftfreq DFT sample frequencies (for usage with rfft, irfft).
fftshift Shift zero-frequency component to center of spectrum.
ifftshift Inverse of fftshift.
Background information
----------------------
Fourier analysis is fundamentally a method for expressing a function as a
sum of periodic components, and for recovering the function from those
components. When both the function and its Fourier transform are
replaced with discretized counterparts, it is called the discrete Fourier
transform (DFT). The DFT has become a mainstay of numerical computing in
part because of a very fast algorithm for computing it, called the Fast
Fourier Transform (FFT), which was known to Gauss (1805) and was brought
to light in its current form by Cooley and Tukey [CT]_. Press et al. [NR]_
provide an accessible introduction to Fourier analysis and its
applications.
Because the discrete Fourier transform separates its input into
components that contribute at discrete frequencies, it has a great number
of applications in digital signal processing, e.g., for filtering, and in
this context the discretized input to the transform is customarily
referred to as a *signal*, which exists in the *time domain*. The output
is called a *spectrum* or *transform* and exists in the *frequency
domain*.
Implementation details
----------------------
There are many ways to define the DFT, varying in the sign of the
exponent, normalization, etc. In this implementation, the DFT is defined
as
.. math::
A_k = \\sum_{m=0}^{n-1} a_m \\exp\\left\\{-2\\pi i{mk \\over n}\\right\\}
\\qquad k = 0,\\ldots,n-1.
The DFT is in general defined for complex inputs and outputs, and a
single-frequency component at linear frequency :math:`f` is
represented by a complex exponential
:math:`a_m = \\exp\\{2\\pi i\\,f m\\Delta t\\}`, where :math:`\\Delta t`
is the sampling interval.
The values in the result follow so-called "standard" order: If ``A =
fft(a, n)``, then ``A[0]`` contains the zero-frequency term (the mean of
the signal), which is always purely real for real inputs. Then ``A[1:n/2]``
contains the positive-frequency terms, and ``A[n/2+1:]`` contains the
negative-frequency terms, in order of decreasingly negative frequency.
For an even number of input points, ``A[n/2]`` represents both positive and
negative Nyquist frequency, and is also purely real for real input. For
an odd number of input points, ``A[(n-1)/2]`` contains the largest positive
frequency, while ``A[(n+1)/2]`` contains the largest negative frequency.
The routine ``np.fft.fftfreq(n)`` returns an array giving the frequencies
of corresponding elements in the output. The routine
``np.fft.fftshift(A)`` shifts transforms and their frequencies to put the
zero-frequency components in the middle, and ``np.fft.ifftshift(A)`` undoes
that shift.
When the input `a` is a time-domain signal and ``A = fft(a)``, ``np.abs(A)``
is its amplitude spectrum and ``np.abs(A)**2`` is its power spectrum.
The phase spectrum is obtained by ``np.angle(A)``.
The inverse DFT is defined as
.. math::
a_m = \\frac{1}{n}\\sum_{k=0}^{n-1}A_k\\exp\\left\\{2\\pi i{mk\\over n}\\right\\}
\\qquad m = 0,\\ldots,n-1.
It differs from the forward transform by the sign of the exponential
argument and the default normalization by :math:`1/n`.
Normalization
-------------
The default normalization has the direct transforms unscaled and the inverse
transforms are scaled by :math:`1/n`. It is possible to obtain unitary
transforms by setting the keyword argument ``norm`` to ``"ortho"`` (default is
`None`) so that both direct and inverse transforms will be scaled by
:math:`1/\\sqrt{n}`.
Real and Hermitian transforms
-----------------------------
When the input is purely real, its transform is Hermitian, i.e., the
component at frequency :math:`f_k` is the complex conjugate of the
component at frequency :math:`-f_k`, which means that for real
inputs there is no information in the negative frequency components that
is not already available from the positive frequency components.
The family of `rfft` functions is
designed to operate on real inputs, and exploits this symmetry by
computing only the positive frequency components, up to and including the
Nyquist frequency. Thus, ``n`` input points produce ``n/2+1`` complex
output points. The inverses of this family assumes the same symmetry of
its input, and for an output of ``n`` points uses ``n/2+1`` input points.
Correspondingly, when the spectrum is purely real, the signal is
Hermitian. The `hfft` family of functions exploits this symmetry by
using ``n/2+1`` complex points in the input (time) domain for ``n`` real
points in the frequency domain.
In higher dimensions, FFTs are used, e.g., for image analysis and
filtering. The computational efficiency of the FFT means that it can
also be a faster way to compute large convolutions, using the property
that a convolution in the time domain is equivalent to a point-by-point
multiplication in the frequency domain.
Higher dimensions
-----------------
In two dimensions, the DFT is defined as
.. math::
A_{kl} = \\sum_{m=0}^{M-1} \\sum_{n=0}^{N-1}
a_{mn}\\exp\\left\\{-2\\pi i \\left({mk\\over M}+{nl\\over N}\\right)\\right\\}
\\qquad k = 0, \\ldots, M-1;\\quad l = 0, \\ldots, N-1,
which extends in the obvious way to higher dimensions, and the inverses
in higher dimensions also extend in the same way.
References
----------
.. [CT] Cooley, James W., and John W. Tukey, 1965, "An algorithm for the
machine calculation of complex Fourier series," *Math. Comput.*
19: 297-301.
.. [NR] Press, W., Teukolsky, S., Vetterline, W.T., and Flannery, B.P.,
2007, *Numerical Recipes: The Art of Scientific Computing*, ch.
12-13. Cambridge Univ. Press, Cambridge, UK.
Examples
--------
For examples, see the various functions.
"""
from __future__ import division, absolute_import, print_function
depends = ['core']
| artistic-2.0 |
unix-beard/gloria | service/decorator.py | 1 | 3222 | import inspect
import logging
from gloria.service.runnable import Task, Service
on_task_wrapped = None
on_service_wrapped = None
class _WrapperHelper(object):
def __call__(self, klass, base, _enabled, _autostart, _respawn):
return type(klass.__name__, (base,), dict(klass.__dict__, enabled=_enabled, autostart=_autostart, respawn=_respawn))
def task(enabled=True, autostart=False, respawn=False):
"""Mark class as a task"""
def _task(klass):
class Wrapper(object):
wrapped_task = _WrapperHelper()(klass, Task, enabled, autostart, respawn)
# Notify whoever is interested in the task being wrapped
if on_task_wrapped is not None:
on_task_wrapped(Wrapper.wrapped_task)
return Wrapper
return _task
class _ServiceWrapperHelper:
def __call__(self, klass, service_dir=''):
if klass == Service:
return type(service_dir, (object,), dict(klass.__dict__))
return type(klass.__name__, (Service,), dict(klass.__dict__))
def service(tasks=[]):
"""Mark class as a service"""
#####################################
# TODO: add autostart=False parameter
#####################################
def _service(klass, service_dir=''):
class Wrapper:
wrapped_class = _ServiceWrapperHelper()(klass, service_dir)
wrapped_tasks = tasks
# Notify whoever is interested in the class being wrapped
if on_service_wrapped is not None:
on_service_wrapped(Wrapper)
return Wrapper
return _service
class Property(object):
"""Expose task's property to the outside world"""
def __init__(self, fget=None, fset=None, doc=None):
self.fget = fget
self.fset = fset
if doc is None and fget is not None:
doc = fget.__doc__
self.__doc__ = doc
def __get__(self, obj, objtype=None):
if obj is None:
return self
if self.fget is None:
raise AttributeError('unreadable attribute')
return self.fget(obj)
def __set__(self, obj, value):
if self.fset is None:
raise AttributeError('cannot set attribute')
self.fset(obj, value)
def __str__(self):
return '{0}:{1}'.format(self.fget.__name__, self.__doc__)
def getter(self, fget):
return type(self)(fget, self.fset, self.__doc__)
def setter(self, fset):
return type(self)(self.fget, fset, self.__doc__)
class Command(object):
"""Expose task's command to the outside world"""
def __init__(self, func=None):
self.func = func
# Number of arguments this function takes ('self' is counted as well)
self.argc = len(inspect.getargspec(func)[0])
logging.debug('Command __init__: func={0} argc={1}'.format(func.__name__, self.argc))
def __call__(self, obj, args=''):
if obj is None:
return self
if self.func is None:
raise AttributeError('uncallable method')
if self.argc == 1:
return self.func(obj)
return self.func(obj, args)
def __str__(self):
return '{0}:{1}'.format(self.func.__name__, self.func.__doc__)
| mit |
jmanday/Master | TFM/library/boost_1_63_0/libs/geometry/doc/make_qbk.py | 4 | 6751 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# ===========================================================================
# Copyright (c) 2007-2012 Barend Gehrels, Amsterdam, the Netherlands.
# Copyright (c) 2008-2012 Bruno Lalande, Paris, France.
# Copyright (c) 2009-2012 Mateusz Loskot (mateusz@loskot.net), London, UK
#
# Use, modification and distribution is subject to the Boost Software License,
# Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
# ============================================================================
import os, sys
script_dir = os.path.dirname(__file__)
os.chdir(os.path.abspath(script_dir))
print("Boost.Geometry is making .qbk files in %s" % os.getcwd())
if 'DOXYGEN' in os.environ:
doxygen_cmd = os.environ['DOXYGEN']
else:
doxygen_cmd = 'doxygen'
if 'DOXYGEN_XML2QBK' in os.environ:
doxygen_xml2qbk_cmd = os.environ['DOXYGEN_XML2QBK']
elif '--doxygen-xml2qbk' in sys.argv:
doxygen_xml2qbk_cmd = sys.argv[sys.argv.index('--doxygen-xml2qbk')+1]
else:
doxygen_xml2qbk_cmd = 'doxygen_xml2qbk'
os.environ['PATH'] = os.environ['PATH']+os.pathsep+os.path.dirname(doxygen_xml2qbk_cmd)
doxygen_xml2qbk_cmd = os.path.basename(doxygen_xml2qbk_cmd)
cmd = doxygen_xml2qbk_cmd
cmd = cmd + " --xml doxy/doxygen_output/xml/%s.xml"
cmd = cmd + " --start_include boost/geometry/"
cmd = cmd + " --convenience_header_path ../../../boost/geometry/"
cmd = cmd + " --convenience_headers geometry.hpp,geometries/geometries.hpp"
cmd = cmd + " --skip_namespace boost::geometry::"
cmd = cmd + " --copyright src/copyright_block.qbk"
cmd = cmd + " --output_member_variables false"
cmd = cmd + " > generated/%s.qbk"
def run_command(command):
if os.system(command) != 0:
raise Exception("Error running %s" % command)
def remove_all_files(dir):
if os.path.exists(dir):
for f in os.listdir(dir):
os.remove(dir+f)
def call_doxygen():
os.chdir("doxy")
remove_all_files("doxygen_output/xml/")
run_command(doxygen_cmd)
os.chdir("..")
def group_to_quickbook(section):
run_command(cmd % ("group__" + section.replace("_", "__"), section))
def model_to_quickbook(section):
run_command(cmd % ("classboost_1_1geometry_1_1model_1_1" + section.replace("_", "__"), section))
def model_to_quickbook2(classname, section):
run_command(cmd % ("classboost_1_1geometry_1_1model_1_1" + classname, section))
def struct_to_quickbook(section):
run_command(cmd % ("structboost_1_1geometry_1_1" + section.replace("_", "__"), section))
def class_to_quickbook(section):
run_command(cmd % ("classboost_1_1geometry_1_1" + section.replace("_", "__"), section))
def class_to_quickbook2(classname, section):
run_command(cmd % ("classboost_1_1geometry_1_1" + classname, section))
def strategy_to_quickbook(section):
p = section.find("::")
ns = section[:p]
strategy = section[p+2:]
run_command(cmd % ("classboost_1_1geometry_1_1strategy_1_1"
+ ns.replace("_", "__") + "_1_1" + strategy.replace("_", "__"),
ns + "_" + strategy))
def cs_to_quickbook(section):
run_command(cmd % ("structboost_1_1geometry_1_1cs_1_1" + section.replace("_", "__"), section))
call_doxygen()
algorithms = ["append", "assign", "make", "clear"
, "area", "buffer", "centroid", "convert", "correct", "covered_by"
, "convex_hull", "crosses", "difference", "disjoint", "distance"
, "envelope", "equals", "expand", "for_each", "is_empty"
, "is_simple", "is_valid", "intersection", "intersects", "length"
, "num_geometries", "num_interior_rings", "num_points"
, "num_segments", "overlaps", "perimeter", "relate", "relation"
, "reverse", "simplify", "sym_difference", "touches"
, "transform", "union", "unique", "within"]
access_functions = ["get", "set", "exterior_ring", "interior_rings"
, "num_points", "num_interior_rings", "num_geometries"]
coordinate_systems = ["cartesian", "geographic", "polar", "spherical", "spherical_equatorial"]
core = ["closure", "coordinate_system", "coordinate_type", "cs_tag"
, "dimension", "exception", "interior_type"
, "degree", "radian"
, "is_radian", "point_order"
, "point_type", "ring_type", "tag", "tag_cast" ]
exceptions = ["exception", "centroid_exception"];
iterators = ["circular_iterator", "closing_iterator"
, "ever_circling_iterator"]
models = ["point", "linestring", "box"
, "polygon", "segment", "ring"
, "multi_linestring", "multi_point", "multi_polygon", "referring_segment"]
strategies = ["distance::pythagoras", "distance::pythagoras_box_box"
, "distance::pythagoras_point_box", "distance::haversine"
, "distance::cross_track", "distance::cross_track_point_box"
, "distance::projected_point"
, "within::winding", "within::franklin", "within::crossings_multiply"
, "area::surveyor", "area::huiller"
, "buffer::point_circle", "buffer::point_square"
, "buffer::join_round", "buffer::join_miter"
, "buffer::end_round", "buffer::end_flat"
, "buffer::distance_symmetric", "buffer::distance_asymmetric"
, "buffer::side_straight"
, "centroid::bashein_detmer", "centroid::average"
, "convex_hull::graham_andrew"
, "simplify::douglas_peucker"
, "side::side_by_triangle", "side::side_by_cross_track", "side::spherical_side_formula"
, "transform::inverse_transformer", "transform::map_transformer"
, "transform::rotate_transformer", "transform::scale_transformer"
, "transform::translate_transformer", "transform::ublas_transformer"
]
views = ["box_view", "segment_view"
, "closeable_view", "reversible_view", "identity_view"]
for i in algorithms:
group_to_quickbook(i)
for i in access_functions:
group_to_quickbook(i)
for i in coordinate_systems:
cs_to_quickbook(i)
for i in core:
struct_to_quickbook(i)
for i in exceptions:
class_to_quickbook(i)
for i in iterators:
struct_to_quickbook(i)
for i in models:
model_to_quickbook(i)
for i in strategies:
strategy_to_quickbook(i)
for i in views:
struct_to_quickbook(i)
model_to_quickbook2("d2_1_1point__xy", "point_xy")
group_to_quickbook("arithmetic")
group_to_quickbook("enum")
group_to_quickbook("register")
group_to_quickbook("svg")
class_to_quickbook("svg_mapper")
group_to_quickbook("wkt")
class_to_quickbook2("de9im_1_1matrix", "de9im_matrix")
class_to_quickbook2("de9im_1_1mask", "de9im_mask")
class_to_quickbook2("de9im_1_1static__mask", "de9im_static_mask")
os.chdir("index")
execfile("make_qbk.py")
os.chdir("..")
# Use either bjam or b2 or ../../../b2 (the last should be done on Release branch)
if "--release-build" not in sys.argv:
run_command("b2")
| apache-2.0 |
npiganeau/odoo | addons/hr_timesheet_invoice/report/hr_timesheet_invoice_report.py | 23 | 9496 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields,osv
from openerp.tools.sql import drop_view_if_exists
class report_timesheet_line(osv.osv):
_name = "report.timesheet.line"
_description = "Timesheet Line"
_auto = False
_columns = {
'name': fields.char('Year', required=False, readonly=True),
'user_id': fields.many2one('res.users', 'User', readonly=True),
'date': fields.date('Date', readonly=True),
'day': fields.char('Day', size=128, readonly=True),
'quantity': fields.float('Time', readonly=True),
'cost': fields.float('Cost', readonly=True),
'product_id': fields.many2one('product.product', 'Product',readonly=True),
'account_id': fields.many2one('account.analytic.account', 'Analytic Account', readonly=True),
'general_account_id': fields.many2one('account.account', 'Financial Account', readonly=True),
'invoice_id': fields.many2one('account.invoice', 'Invoiced', readonly=True),
'month': fields.selection([('01','January'), ('02','February'), ('03','March'), ('04','April'), ('05','May'), ('06','June'),
('07','July'), ('08','August'), ('09','September'), ('10','October'), ('11','November'), ('12','December')],'Month', readonly=True),
}
_order = 'name desc,user_id desc'
def init(self, cr):
drop_view_if_exists(cr, 'report_timesheet_line')
cr.execute("""
create or replace view report_timesheet_line as (
select
min(l.id) as id,
l.date as date,
to_char(l.date,'YYYY') as name,
to_char(l.date,'MM') as month,
l.user_id,
to_char(l.date, 'YYYY-MM-DD') as day,
l.invoice_id,
l.product_id,
l.account_id,
l.general_account_id,
sum(l.unit_amount) as quantity,
sum(l.amount) as cost
from
account_analytic_line l
where
l.user_id is not null
group by
l.date,
l.user_id,
l.product_id,
l.account_id,
l.general_account_id,
l.invoice_id
)
""")
class report_timesheet_user(osv.osv):
_name = "report_timesheet.user"
_description = "Timesheet per day"
_auto = False
_columns = {
'name': fields.char('Year', required=False, readonly=True),
'user_id':fields.many2one('res.users', 'User', readonly=True),
'quantity': fields.float('Time', readonly=True),
'cost': fields.float('Cost', readonly=True),
'month':fields.selection([('01','January'), ('02','February'), ('03','March'), ('04','April'), ('05','May'), ('06','June'),
('07','July'), ('08','August'), ('09','September'), ('10','October'), ('11','November'), ('12','December')],'Month', readonly=True),
}
_order = 'name desc,user_id desc'
def init(self, cr):
drop_view_if_exists(cr, 'report_timesheet_user')
cr.execute("""
create or replace view report_timesheet_user as (
select
min(l.id) as id,
to_char(l.date,'YYYY') as name,
to_char(l.date,'MM') as month,
l.user_id,
sum(l.unit_amount) as quantity,
sum(l.amount) as cost
from
account_analytic_line l
where
user_id is not null
group by l.date, to_char(l.date,'YYYY'),to_char(l.date,'MM'), l.user_id
)
""")
class report_timesheet_account(osv.osv):
_name = "report_timesheet.account"
_description = "Timesheet per account"
_auto = False
_columns = {
'name': fields.char('Year', required=False, readonly=True),
'user_id':fields.many2one('res.users', 'User', readonly=True),
'account_id':fields.many2one('account.analytic.account', 'Analytic Account', readonly=True),
'quantity': fields.float('Time', readonly=True),
'month':fields.selection([('01','January'), ('02','February'), ('03','March'), ('04','April'), ('05','May'), ('06','June'),
('07','July'), ('08','August'), ('09','September'), ('10','October'), ('11','November'), ('12','December')],'Month', readonly=True),
}
_order = 'name desc,account_id desc,user_id desc'
def init(self, cr):
drop_view_if_exists(cr, 'report_timesheet_account')
cr.execute("""
create or replace view report_timesheet_account as (
select
min(id) as id,
to_char(create_date, 'YYYY') as name,
to_char(create_date,'MM') as month,
user_id,
account_id,
sum(unit_amount) as quantity
from
account_analytic_line
group by
to_char(create_date, 'YYYY'),to_char(create_date, 'MM'), user_id, account_id
)
""")
class report_timesheet_account_date(osv.osv):
_name = "report_timesheet.account.date"
_description = "Daily timesheet per account"
_auto = False
_columns = {
'name': fields.char('Year', required=False, readonly=True),
'user_id':fields.many2one('res.users', 'User', readonly=True),
'account_id':fields.many2one('account.analytic.account', 'Analytic Account', readonly=True),
'quantity': fields.float('Time', readonly=True),
'month':fields.selection([('01','January'), ('02','February'), ('03','March'), ('04','April'), ('05','May'), ('06','June'),
('07','July'), ('08','August'), ('09','September'), ('10','October'), ('11','November'), ('12','December')],'Month', readonly=True),
}
_order = 'name desc,account_id desc,user_id desc'
def init(self, cr):
drop_view_if_exists(cr, 'report_timesheet_account_date')
cr.execute("""
create or replace view report_timesheet_account_date as (
select
min(id) as id,
to_char(date,'YYYY') as name,
to_char(date,'MM') as month,
user_id,
account_id,
sum(unit_amount) as quantity
from
account_analytic_line
group by
to_char(date,'YYYY'),to_char(date,'MM'), user_id, account_id
)
""")
class report_timesheet_invoice(osv.osv):
_name = "report_timesheet.invoice"
_description = "Costs to invoice"
_auto = False
_columns = {
'user_id':fields.many2one('res.users', 'User', readonly=True),
'account_id':fields.many2one('account.analytic.account', 'Project', readonly=True),
'manager_id':fields.many2one('res.users', 'Manager', readonly=True),
'quantity': fields.float('Time', readonly=True),
'amount_invoice': fields.float('To invoice', readonly=True)
}
_rec_name = 'user_id'
_order = 'user_id desc'
def init(self, cr):
drop_view_if_exists(cr, 'report_timesheet_invoice')
cr.execute("""
create or replace view report_timesheet_invoice as (
select
min(l.id) as id,
l.user_id as user_id,
l.account_id as account_id,
a.user_id as manager_id,
sum(l.unit_amount) as quantity,
sum(l.unit_amount * t.list_price) as amount_invoice
from account_analytic_line l
left join hr_timesheet_invoice_factor f on (l.to_invoice=f.id)
left join account_analytic_account a on (l.account_id=a.id)
left join product_product p on (l.to_invoice=f.id)
left join product_template t on (l.to_invoice=f.id)
where
l.to_invoice is not null and
l.invoice_id is null
group by
l.user_id,
l.account_id,
a.user_id
)
""")
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
tzewangdorje/SIPserv | Twisted-13.1.0/twisted/mail/scripts/mailmail.py | 40 | 9934 | # -*- test-case-name: twisted.mail.test.test_mailmail -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Implementation module for the I{mailmail} command.
"""
import os
import sys
import rfc822
import getpass
from ConfigParser import ConfigParser
try:
import cStringIO as StringIO
except:
import StringIO
from twisted.copyright import version
from twisted.internet import reactor
from twisted.mail import smtp
GLOBAL_CFG = "/etc/mailmail"
LOCAL_CFG = os.path.expanduser("~/.twisted/mailmail")
SMARTHOST = '127.0.0.1'
ERROR_FMT = """\
Subject: Failed Message Delivery
Message delivery failed. The following occurred:
%s
--
The Twisted sendmail application.
"""
def log(message, *args):
sys.stderr.write(str(message) % args + '\n')
class Options:
"""
@type to: C{list} of C{str}
@ivar to: The addresses to which to deliver this message.
@type sender: C{str}
@ivar sender: The address from which this message is being sent.
@type body: C{file}
@ivar body: The object from which the message is to be read.
"""
def getlogin():
try:
return os.getlogin()
except:
return getpass.getuser()
_unsupportedOption = SystemExit("Unsupported option.")
def parseOptions(argv):
o = Options()
o.to = [e for e in argv if not e.startswith('-')]
o.sender = getlogin()
# Just be very stupid
# Skip -bm -- it is the default
# Add a non-standard option for querying the version of this tool.
if '--version' in argv:
print 'mailmail version:', version
raise SystemExit()
# -bp lists queue information. Screw that.
if '-bp' in argv:
raise _unsupportedOption
# -bs makes sendmail use stdin/stdout as its transport. Screw that.
if '-bs' in argv:
raise _unsupportedOption
# -F sets who the mail is from, but is overridable by the From header
if '-F' in argv:
o.sender = argv[argv.index('-F') + 1]
o.to.remove(o.sender)
# -i and -oi makes us ignore lone "."
if ('-i' in argv) or ('-oi' in argv):
raise _unsupportedOption
# -odb is background delivery
if '-odb' in argv:
o.background = True
else:
o.background = False
# -odf is foreground delivery
if '-odf' in argv:
o.background = False
else:
o.background = True
# -oem and -em cause errors to be mailed back to the sender.
# It is also the default.
# -oep and -ep cause errors to be printed to stderr
if ('-oep' in argv) or ('-ep' in argv):
o.printErrors = True
else:
o.printErrors = False
# -om causes a copy of the message to be sent to the sender if the sender
# appears in an alias expansion. We do not support aliases.
if '-om' in argv:
raise _unsupportedOption
# -t causes us to pick the recipients of the message from the To, Cc, and Bcc
# headers, and to remove the Bcc header if present.
if '-t' in argv:
o.recipientsFromHeaders = True
o.excludeAddresses = o.to
o.to = []
else:
o.recipientsFromHeaders = False
o.exludeAddresses = []
requiredHeaders = {
'from': [],
'to': [],
'cc': [],
'bcc': [],
'date': [],
}
headers = []
buffer = StringIO.StringIO()
while 1:
write = 1
line = sys.stdin.readline()
if not line.strip():
break
hdrs = line.split(': ', 1)
hdr = hdrs[0].lower()
if o.recipientsFromHeaders and hdr in ('to', 'cc', 'bcc'):
o.to.extend([
a[1] for a in rfc822.AddressList(hdrs[1]).addresslist
])
if hdr == 'bcc':
write = 0
elif hdr == 'from':
o.sender = rfc822.parseaddr(hdrs[1])[1]
if hdr in requiredHeaders:
requiredHeaders[hdr].append(hdrs[1])
if write:
buffer.write(line)
if not requiredHeaders['from']:
buffer.write('From: %s\r\n' % (o.sender,))
if not requiredHeaders['to']:
if not o.to:
raise SystemExit("No recipients specified.")
buffer.write('To: %s\r\n' % (', '.join(o.to),))
if not requiredHeaders['date']:
buffer.write('Date: %s\r\n' % (smtp.rfc822date(),))
buffer.write(line)
if o.recipientsFromHeaders:
for a in o.excludeAddresses:
try:
o.to.remove(a)
except:
pass
buffer.seek(0, 0)
o.body = StringIO.StringIO(buffer.getvalue() + sys.stdin.read())
return o
class Configuration:
"""
@ivar allowUIDs: A list of UIDs which are allowed to send mail.
@ivar allowGIDs: A list of GIDs which are allowed to send mail.
@ivar denyUIDs: A list of UIDs which are not allowed to send mail.
@ivar denyGIDs: A list of GIDs which are not allowed to send mail.
@type defaultAccess: C{bool}
@ivar defaultAccess: C{True} if access will be allowed when no other access
control rule matches or C{False} if it will be denied in that case.
@ivar useraccess: Either C{'allow'} to check C{allowUID} first
or C{'deny'} to check C{denyUID} first.
@ivar groupaccess: Either C{'allow'} to check C{allowGID} first or
C{'deny'} to check C{denyGID} first.
@ivar identities: A C{dict} mapping hostnames to credentials to use when
sending mail to that host.
@ivar smarthost: C{None} or a hostname through which all outgoing mail will
be sent.
@ivar domain: C{None} or the hostname with which to identify ourselves when
connecting to an MTA.
"""
def __init__(self):
self.allowUIDs = []
self.denyUIDs = []
self.allowGIDs = []
self.denyGIDs = []
self.useraccess = 'deny'
self.groupaccess= 'deny'
self.identities = {}
self.smarthost = None
self.domain = None
self.defaultAccess = True
def loadConfig(path):
# [useraccess]
# allow=uid1,uid2,...
# deny=uid1,uid2,...
# order=allow,deny
# [groupaccess]
# allow=gid1,gid2,...
# deny=gid1,gid2,...
# order=deny,allow
# [identity]
# host1=username:password
# host2=username:password
# [addresses]
# smarthost=a.b.c.d
# default_domain=x.y.z
c = Configuration()
if not os.access(path, os.R_OK):
return c
p = ConfigParser()
p.read(path)
au = c.allowUIDs
du = c.denyUIDs
ag = c.allowGIDs
dg = c.denyGIDs
for (section, a, d) in (('useraccess', au, du), ('groupaccess', ag, dg)):
if p.has_section(section):
for (mode, L) in (('allow', a), ('deny', d)):
if p.has_option(section, mode) and p.get(section, mode):
for id in p.get(section, mode).split(','):
try:
id = int(id)
except ValueError:
log("Illegal %sID in [%s] section: %s", section[0].upper(), section, id)
else:
L.append(id)
order = p.get(section, 'order')
order = map(str.split, map(str.lower, order.split(',')))
if order[0] == 'allow':
setattr(c, section, 'allow')
else:
setattr(c, section, 'deny')
if p.has_section('identity'):
for (host, up) in p.items('identity'):
parts = up.split(':', 1)
if len(parts) != 2:
log("Illegal entry in [identity] section: %s", up)
continue
p.identities[host] = parts
if p.has_section('addresses'):
if p.has_option('addresses', 'smarthost'):
c.smarthost = p.get('addresses', 'smarthost')
if p.has_option('addresses', 'default_domain'):
c.domain = p.get('addresses', 'default_domain')
return c
def success(result):
reactor.stop()
failed = None
def failure(f):
global failed
reactor.stop()
failed = f
def sendmail(host, options, ident):
d = smtp.sendmail(host, options.sender, options.to, options.body)
d.addCallbacks(success, failure)
reactor.run()
def senderror(failure, options):
recipient = [options.sender]
sender = '"Internally Generated Message (%s)"<postmaster@%s>' % (sys.argv[0], smtp.DNSNAME)
error = StringIO.StringIO()
failure.printTraceback(file=error)
body = StringIO.StringIO(ERROR_FMT % error.getvalue())
d = smtp.sendmail('localhost', sender, recipient, body)
d.addBoth(lambda _: reactor.stop())
def deny(conf):
uid = os.getuid()
gid = os.getgid()
if conf.useraccess == 'deny':
if uid in conf.denyUIDs:
return True
if uid in conf.allowUIDs:
return False
else:
if uid in conf.allowUIDs:
return False
if uid in conf.denyUIDs:
return True
if conf.groupaccess == 'deny':
if gid in conf.denyGIDs:
return True
if gid in conf.allowGIDs:
return False
else:
if gid in conf.allowGIDs:
return False
if gid in conf.denyGIDs:
return True
return not conf.defaultAccess
def run():
o = parseOptions(sys.argv[1:])
gConf = loadConfig(GLOBAL_CFG)
lConf = loadConfig(LOCAL_CFG)
if deny(gConf) or deny(lConf):
log("Permission denied")
return
host = lConf.smarthost or gConf.smarthost or SMARTHOST
ident = gConf.identities.copy()
ident.update(lConf.identities)
if lConf.domain:
smtp.DNSNAME = lConf.domain
elif gConf.domain:
smtp.DNSNAME = gConf.domain
sendmail(host, o, ident)
if failed:
if o.printErrors:
failed.printTraceback(file=sys.stderr)
raise SystemExit(1)
else:
senderror(failed, o)
| gpl-3.0 |
jblackburne/scikit-learn | sklearn/neural_network/rbm.py | 46 | 12291 | """Restricted Boltzmann Machine
"""
# Authors: Yann N. Dauphin <dauphiya@iro.umontreal.ca>
# Vlad Niculae
# Gabriel Synnaeve
# Lars Buitinck
# License: BSD 3 clause
import time
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator
from ..base import TransformerMixin
from ..externals.six.moves import xrange
from ..utils import check_array
from ..utils import check_random_state
from ..utils import gen_even_slices
from ..utils import issparse
from ..utils.extmath import safe_sparse_dot
from ..utils.extmath import log_logistic
from ..utils.fixes import expit # logistic function
from ..utils.validation import check_is_fitted
class BernoulliRBM(BaseEstimator, TransformerMixin):
"""Bernoulli Restricted Boltzmann Machine (RBM).
A Restricted Boltzmann Machine with binary visible units and
binary hidden units. Parameters are estimated using Stochastic Maximum
Likelihood (SML), also known as Persistent Contrastive Divergence (PCD)
[2].
The time complexity of this implementation is ``O(d ** 2)`` assuming
d ~ n_features ~ n_components.
Read more in the :ref:`User Guide <rbm>`.
Parameters
----------
n_components : int, optional
Number of binary hidden units.
learning_rate : float, optional
The learning rate for weight updates. It is *highly* recommended
to tune this hyper-parameter. Reasonable values are in the
10**[0., -3.] range.
batch_size : int, optional
Number of examples per minibatch.
n_iter : int, optional
Number of iterations/sweeps over the training dataset to perform
during training.
verbose : int, optional
The verbosity level. The default, zero, means silent mode.
random_state : integer or numpy.RandomState, optional
A random number generator instance to define the state of the
random permutations generator. If an integer is given, it fixes the
seed. Defaults to the global numpy random number generator.
Attributes
----------
intercept_hidden_ : array-like, shape (n_components,)
Biases of the hidden units.
intercept_visible_ : array-like, shape (n_features,)
Biases of the visible units.
components_ : array-like, shape (n_components, n_features)
Weight matrix, where n_features in the number of
visible units and n_components is the number of hidden units.
Examples
--------
>>> import numpy as np
>>> from sklearn.neural_network import BernoulliRBM
>>> X = np.array([[0, 0, 0], [0, 1, 1], [1, 0, 1], [1, 1, 1]])
>>> model = BernoulliRBM(n_components=2)
>>> model.fit(X)
BernoulliRBM(batch_size=10, learning_rate=0.1, n_components=2, n_iter=10,
random_state=None, verbose=0)
References
----------
[1] Hinton, G. E., Osindero, S. and Teh, Y. A fast learning algorithm for
deep belief nets. Neural Computation 18, pp 1527-1554.
http://www.cs.toronto.edu/~hinton/absps/fastnc.pdf
[2] Tieleman, T. Training Restricted Boltzmann Machines using
Approximations to the Likelihood Gradient. International Conference
on Machine Learning (ICML) 2008
"""
def __init__(self, n_components=256, learning_rate=0.1, batch_size=10,
n_iter=10, verbose=0, random_state=None):
self.n_components = n_components
self.learning_rate = learning_rate
self.batch_size = batch_size
self.n_iter = n_iter
self.verbose = verbose
self.random_state = random_state
def transform(self, X):
"""Compute the hidden layer activation probabilities, P(h=1|v=X).
Parameters
----------
X : {array-like, sparse matrix} shape (n_samples, n_features)
The data to be transformed.
Returns
-------
h : array, shape (n_samples, n_components)
Latent representations of the data.
"""
check_is_fitted(self, "components_")
X = check_array(X, accept_sparse='csr', dtype=np.float64)
return self._mean_hiddens(X)
def _mean_hiddens(self, v):
"""Computes the probabilities P(h=1|v).
Parameters
----------
v : array-like, shape (n_samples, n_features)
Values of the visible layer.
Returns
-------
h : array-like, shape (n_samples, n_components)
Corresponding mean field values for the hidden layer.
"""
p = safe_sparse_dot(v, self.components_.T)
p += self.intercept_hidden_
return expit(p, out=p)
def _sample_hiddens(self, v, rng):
"""Sample from the distribution P(h|v).
Parameters
----------
v : array-like, shape (n_samples, n_features)
Values of the visible layer to sample from.
rng : RandomState
Random number generator to use.
Returns
-------
h : array-like, shape (n_samples, n_components)
Values of the hidden layer.
"""
p = self._mean_hiddens(v)
return (rng.random_sample(size=p.shape) < p)
def _sample_visibles(self, h, rng):
"""Sample from the distribution P(v|h).
Parameters
----------
h : array-like, shape (n_samples, n_components)
Values of the hidden layer to sample from.
rng : RandomState
Random number generator to use.
Returns
-------
v : array-like, shape (n_samples, n_features)
Values of the visible layer.
"""
p = np.dot(h, self.components_)
p += self.intercept_visible_
expit(p, out=p)
return (rng.random_sample(size=p.shape) < p)
def _free_energy(self, v):
"""Computes the free energy F(v) = - log sum_h exp(-E(v,h)).
Parameters
----------
v : array-like, shape (n_samples, n_features)
Values of the visible layer.
Returns
-------
free_energy : array-like, shape (n_samples,)
The value of the free energy.
"""
return (- safe_sparse_dot(v, self.intercept_visible_)
- np.logaddexp(0, safe_sparse_dot(v, self.components_.T)
+ self.intercept_hidden_).sum(axis=1))
def gibbs(self, v):
"""Perform one Gibbs sampling step.
Parameters
----------
v : array-like, shape (n_samples, n_features)
Values of the visible layer to start from.
Returns
-------
v_new : array-like, shape (n_samples, n_features)
Values of the visible layer after one Gibbs step.
"""
check_is_fitted(self, "components_")
if not hasattr(self, "random_state_"):
self.random_state_ = check_random_state(self.random_state)
h_ = self._sample_hiddens(v, self.random_state_)
v_ = self._sample_visibles(h_, self.random_state_)
return v_
def partial_fit(self, X, y=None):
"""Fit the model to the data X which should contain a partial
segment of the data.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
Returns
-------
self : BernoulliRBM
The fitted model.
"""
X = check_array(X, accept_sparse='csr', dtype=np.float64)
if not hasattr(self, 'random_state_'):
self.random_state_ = check_random_state(self.random_state)
if not hasattr(self, 'components_'):
self.components_ = np.asarray(
self.random_state_.normal(
0,
0.01,
(self.n_components, X.shape[1])
),
order='F')
if not hasattr(self, 'intercept_hidden_'):
self.intercept_hidden_ = np.zeros(self.n_components, )
if not hasattr(self, 'intercept_visible_'):
self.intercept_visible_ = np.zeros(X.shape[1], )
if not hasattr(self, 'h_samples_'):
self.h_samples_ = np.zeros((self.batch_size, self.n_components))
self._fit(X, self.random_state_)
def _fit(self, v_pos, rng):
"""Inner fit for one mini-batch.
Adjust the parameters to maximize the likelihood of v using
Stochastic Maximum Likelihood (SML).
Parameters
----------
v_pos : array-like, shape (n_samples, n_features)
The data to use for training.
rng : RandomState
Random number generator to use for sampling.
"""
h_pos = self._mean_hiddens(v_pos)
v_neg = self._sample_visibles(self.h_samples_, rng)
h_neg = self._mean_hiddens(v_neg)
lr = float(self.learning_rate) / v_pos.shape[0]
update = safe_sparse_dot(v_pos.T, h_pos, dense_output=True).T
update -= np.dot(h_neg.T, v_neg)
self.components_ += lr * update
self.intercept_hidden_ += lr * (h_pos.sum(axis=0) - h_neg.sum(axis=0))
self.intercept_visible_ += lr * (np.asarray(
v_pos.sum(axis=0)).squeeze() -
v_neg.sum(axis=0))
h_neg[rng.uniform(size=h_neg.shape) < h_neg] = 1.0 # sample binomial
self.h_samples_ = np.floor(h_neg, h_neg)
def score_samples(self, X):
"""Compute the pseudo-likelihood of X.
Parameters
----------
X : {array-like, sparse matrix} shape (n_samples, n_features)
Values of the visible layer. Must be all-boolean (not checked).
Returns
-------
pseudo_likelihood : array-like, shape (n_samples,)
Value of the pseudo-likelihood (proxy for likelihood).
Notes
-----
This method is not deterministic: it computes a quantity called the
free energy on X, then on a randomly corrupted version of X, and
returns the log of the logistic function of the difference.
"""
check_is_fitted(self, "components_")
v = check_array(X, accept_sparse='csr')
rng = check_random_state(self.random_state)
# Randomly corrupt one feature in each sample in v.
ind = (np.arange(v.shape[0]),
rng.randint(0, v.shape[1], v.shape[0]))
if issparse(v):
data = -2 * v[ind] + 1
v_ = v + sp.csr_matrix((data.A.ravel(), ind), shape=v.shape)
else:
v_ = v.copy()
v_[ind] = 1 - v_[ind]
fe = self._free_energy(v)
fe_ = self._free_energy(v_)
return v.shape[1] * log_logistic(fe_ - fe)
def fit(self, X, y=None):
"""Fit the model to the data X.
Parameters
----------
X : {array-like, sparse matrix} shape (n_samples, n_features)
Training data.
Returns
-------
self : BernoulliRBM
The fitted model.
"""
X = check_array(X, accept_sparse='csr', dtype=np.float64)
n_samples = X.shape[0]
rng = check_random_state(self.random_state)
self.components_ = np.asarray(
rng.normal(0, 0.01, (self.n_components, X.shape[1])),
order='F')
self.intercept_hidden_ = np.zeros(self.n_components, )
self.intercept_visible_ = np.zeros(X.shape[1], )
self.h_samples_ = np.zeros((self.batch_size, self.n_components))
n_batches = int(np.ceil(float(n_samples) / self.batch_size))
batch_slices = list(gen_even_slices(n_batches * self.batch_size,
n_batches, n_samples))
verbose = self.verbose
begin = time.time()
for iteration in xrange(1, self.n_iter + 1):
for batch_slice in batch_slices:
self._fit(X[batch_slice], rng)
if verbose:
end = time.time()
print("[%s] Iteration %d, pseudo-likelihood = %.2f,"
" time = %.2fs"
% (type(self).__name__, iteration,
self.score_samples(X).mean(), end - begin))
begin = end
return self
| bsd-3-clause |
tschmorleiz/amcat | api/rest/viewsets/task.py | 2 | 5446 | ###########################################################################
# (C) Vrije Universiteit, Amsterdam (the Netherlands) #
# #
# This file is part of AmCAT - The Amsterdam Content Analysis Toolkit #
# #
# AmCAT is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Affero General Public License as published by the #
# Free Software Foundation, either version 3 of the License, or (at your #
# option) any later version. #
# #
# AmCAT is distributed in the hope that it will be useful, but WITHOUT #
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or #
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public #
# License for more details. #
# #
# You should have received a copy of the GNU Affero General Public #
# License along with AmCAT. If not, see <http://www.gnu.org/licenses/>. #
###########################################################################
import functools
from rest_framework import serializers
from amcat.models.task import IN_PROGRESS
from amcat.models import Task
from amcat.tools import amcattest
from api.rest.serializer import AmCATModelSerializer
__all__ = ("TaskSerializer", "TaskResultSerializer")
class TaskSerializer(AmCATModelSerializer):
"""Represents a Task object defined in amcat.models.task.Task. Adds two
fields to the model: status and ready."""
status = serializers.SerializerMethodField('get_status')
ready = serializers.SerializerMethodField('get_ready')
progress = serializers.SerializerMethodField('get_progress')
def __init__(self, *args, **kwargs):
super(TaskSerializer, self).__init__(*args, **kwargs)
self._tasks = {}
def set_status_ready(self, task):
async = task.get_async_result()
self._tasks[task] = (async.ready(), async.result, async.status)
def get_status_ready(self, task):
"""Returns tuple with (status, ready) => (str, bool)"""
if task not in self._tasks:
self.set_status_ready(task)
return self._tasks[task]
def get_status(self, task):
_, _, status = self.get_status_ready(task)
return status
def get_ready(self, task):
ready, _, _ = self.get_status_ready(task)
return ready
def get_progress(self, task):
_, result, status = self.get_status_ready(task)
if status == IN_PROGRESS and isinstance(result, dict):
return result
class Meta:
model = Task
class TaskResultSerializer(AmCATModelSerializer):
result = serializers.SerializerMethodField('get_result')
ready = serializers.SerializerMethodField('get_ready')
def get_ready(self, task):
return task.get_async_result().ready()
def get_result(self, task):
if not self.get_ready(task):
return None
return task.get_result()
class Meta:
model = Task
fields = ("uuid", "ready", "result")
class TestTaskSerializer(amcattest.AmCATTestCase):
def test_order(self):
class MockTask:
def __init__(self, ready=False, status="PENDING", result=None, callback=None):
self._ready = ready
self._status = status
self._result = result
self.callback = callback
def ready(self):
if self.callback: self.callback("_ready")
return self._ready
@property
def status(self, **kwargs):
if self.callback: self.callback("_status")
return self._status
@property
def result(self):
if self.callback: self.callback("_result")
return self._result
def get_async_result(self):
return self
ts = TaskSerializer()
mt = MockTask()
mt2 = MockTask(ready=True, status="SUCCESS")
mt3 = MockTask()
mt4 = MockTask()
# Test simple getting / caching
self.assertEqual("PENDING", ts.get_status(mt))
self.assertEqual(False, ts.get_ready(mt))
self.assertEqual("SUCCESS", ts.get_status(mt2))
self.assertEqual(True, ts.get_ready(mt2))
# Test order of ready/status/result
def _change(task, set_prop, set_value, prop, callprop):
if prop == callprop:
setattr(task, set_prop, set_value)
# Set ready to True when _result is fetched
change = functools.partial(_change, mt3, "_ready", True, "_result")
mt3.callback = change
self.assertEqual("PENDING", ts.get_status(mt3))
self.assertEqual(False, ts.get_ready(mt3))
self.assertEqual(True, mt3._ready)
# Set ready to True when _status is fetched
change = functools.partial(_change, mt4, "_ready", True, "_status")
mt4.callback = change
self.assertEqual("PENDING", ts.get_status(mt4))
self.assertEqual(False, ts.get_ready(mt4))
self.assertEqual(True, mt4._ready)
| agpl-3.0 |
dentaku65/plugin.video.sod | channels/piratestreaming.py | 1 | 15107 | # -*- coding: utf-8 -*-
# ------------------------------------------------------------
# streamondemand.- XBMC Plugin
# Canal para piratestreaming
# http://blog.tvalacarta.info/plugin-xbmc/streamondemand.
# ------------------------------------------------------------
import re
import urlparse
from core import config
from core import logger
from core import scrapertools
from core.item import Item
from core.tmdb import infoSod
from servers import servertools
__channel__ = "piratestreaming"
__category__ = "F,S,A"
__type__ = "generic"
__title__ = "piratestreaming"
__language__ = "IT"
DEBUG = config.get_setting("debug")
host = "http://www.piratestreaming.news"
def isGeneric():
return True
def mainlist(item):
logger.info("[piratestreaming.py] mainlist")
itemlist = [Item(channel=__channel__,
title="[COLOR azure]Aggiornamenti[/COLOR]",
action="peliculas",
url="%s/film-aggiornamenti.php" % host,
thumbnail="http://orig03.deviantart.net/6889/f/2014/079/7/b/movies_and_popcorn_folder_icon_by_matheusgrilo-d7ay4tw.png"),
Item(channel=__channel__,
title="[COLOR azure]Contenuti per Genere[/COLOR]",
action="categorias",
url=host,
thumbnail="http://xbmc-repo-ackbarr.googlecode.com/svn/trunk/dev/skin.cirrus%20extended%20v2/extras/moviegenres/All%20Movies%20by%20Genre.png"),
Item(channel=__channel__,
title="[COLOR yellow]Cerca...[/COLOR]",
action="search",
thumbnail="http://dc467.4shared.com/img/fEbJqOum/s7/13feaf0c8c0/Search"),
Item(channel=__channel__,
title="[COLOR azure]Archivio Serie TV[/COLOR]",
action="categoryarchive",
url="%s/archivio-serietv.php" % host,
thumbnail="http://repository-butchabay.googlecode.com/svn/branches/eden/skin.cirrus.extended.v2/extras/moviegenres/TV%20Series.png"),
Item(channel=__channel__,
title="[COLOR azure]Serie TV[/COLOR]",
extra="serie",
action="peliculas_tv",
url="%s/serietv-aggiornamenti.php" % host,
thumbnail="http://xbmc-repo-ackbarr.googlecode.com/svn/trunk/dev/skin.cirrus%20extended%20v2/extras/moviegenres/New%20TV%20Shows.png"),
Item(channel=__channel__,
title="[COLOR yellow]Cerca Serie TV...[/COLOR]",
action="search",
extra="serie",
thumbnail="http://dc467.4shared.com/img/fEbJqOum/s7/13feaf0c8c0/Search")]
return itemlist
def peliculas(item):
logger.info("streamondemand.piratestreaming peliculas")
itemlist = []
# Descarga la pagina
data = scrapertools.cache_page(item.url)
# Extrae las entradas (carpetas)
patron = '<div class="featuredItem">.*?<a href="([^"]+)".*?<img src="([^"]+)".*?<a href=[^>]*>(.*?)</a>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle).strip()
try:
daa = scrapertools.cache_page(scrapedurl)
da = daa.split('justify;">')
da = da[1].split('</p>')
scrapedplot = scrapertools.htmlclean(da[0]).strip()
except:
scrapedplot = "Trama non disponibile"
if DEBUG: logger.info(
"title=[" + scrapedtitle + "], url=[" + scrapedurl + "], thumbnail=[" + scrapedthumbnail + "]")
itemlist.append(infoSod(
Item(channel=__channel__,
action="episodios" if item.extra == "serie" else "findvideos",
fulltitle=scrapedtitle,
show=scrapedtitle,
title=scrapedtitle,
url=scrapedurl,
thumbnail=scrapedthumbnail,
plot=scrapedplot,
folder=True), tipo='movie'))
# Extrae el paginador
patronvideos = '<td align="center">[^<]+</td>[^<]+<td align="center">\s*<a href="([^"]+)">[^<]+</a>'
matches = re.compile(patronvideos, re.DOTALL).findall(data)
if len(matches) > 0:
scrapedurl = urlparse.urljoin(item.url, matches[0])
itemlist.append(
Item(channel=__channel__,
action="HomePage",
title="[COLOR yellow]Torna Home[/COLOR]",
folder=True)),
itemlist.append(
Item(channel=__channel__,
action="peliculas",
title="[COLOR orange]Successivo >>[/COLOR]",
url=scrapedurl,
thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png",
folder=True))
return itemlist
def peliculas_tv(item):
logger.info("streamondemand.piratestreaming peliculas")
itemlist = []
# Descarga la pagina
data = scrapertools.cache_page(item.url)
# Extrae las entradas (carpetas)
patron = '<div class="featuredItem">.*?<a href="([^"]+)".*?<img src="([^"]+)".*?<a href=[^>]*>(.*?)</a>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle).strip()
try:
daa = scrapertools.cache_page(scrapedurl)
da = daa.split('justify;">')
da = da[1].split('</p>')
scrapedplot = scrapertools.htmlclean(da[0]).strip()
except:
scrapedplot = "Trama non disponibile"
if DEBUG: logger.info(
"title=[" + scrapedtitle + "], url=[" + scrapedurl + "], thumbnail=[" + scrapedthumbnail + "]")
itemlist.append(infoSod(
Item(channel=__channel__,
action="episodios" if item.extra == "serie" else "findvideos",
fulltitle=scrapedtitle,
show=scrapedtitle,
title=scrapedtitle,
url=scrapedurl,
thumbnail=scrapedthumbnail,
plot=scrapedplot,
folder=True), tipo='tv'))
# Extrae el paginador
patronvideos = '<td align="center">[^<]+</td>[^<]+<td align="center">\s*<a href="([^"]+)">[^<]+</a>'
matches = re.compile(patronvideos, re.DOTALL).findall(data)
if len(matches) > 0:
scrapedurl = urlparse.urljoin(item.url, matches[0])
itemlist.append(
Item(channel=__channel__,
action="HomePage",
title="[COLOR yellow]Torna Home[/COLOR]",
folder=True)),
itemlist.append(
Item(channel=__channel__,
action="peliculas_tv",
title="[COLOR orange]Successivo >>[/COLOR]",
url=scrapedurl,
thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png",
folder=True))
return itemlist
def HomePage(item):
import xbmc
xbmc.executebuiltin("ReplaceWindow(10024,plugin://plugin.video.streamondemand)")
def categorias(item):
itemlist = []
data = scrapertools.cache_page(item.url)
patron = '<a href="#">Film</a>[^<]+<ul>(.*?)</ul>'
data = scrapertools.find_single_match(data, patron)
patron = '<li><a href="([^"]+)">([^<]+)</a></li>'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle in matches:
scrapedplot = ""
scrapedthumbnail = ""
if DEBUG: logger.info(
"title=[" + scrapedtitle + "], url=[" + scrapedurl + "], thumbnail=[" + scrapedthumbnail + "]")
itemlist.append(
Item(channel=__channel__,
action="peliculas",
title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
url=scrapedurl,
thumbnail=scrapedthumbnail,
plot=scrapedplot,
folder=True))
return itemlist
def categoryarchive(item):
itemlist = []
data = scrapertools.cache_page(item.url)
patron = '<b>0-9</b><hr />(.*?)<div class="clear"></div>'
data = scrapertools.find_single_match(data, patron)
patron = '<a href=([^>]+)>([^<]+)</a><br />'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle in matches:
scrapedplot = ""
scrapedthumbnail = ""
if DEBUG: logger.info(
"title=[" + scrapedtitle + "], url=[" + scrapedurl + "], thumbnail=[" + scrapedthumbnail + "]")
itemlist.append(
Item(channel=__channel__,
action="episodios",
fulltitle=scrapedtitle,
show=scrapedtitle,
title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
url=scrapedurl,
thumbnail="http://repository-butchabay.googlecode.com/svn/branches/eden/skin.cirrus.extended.v2/extras/moviegenres/TV%20Series.png",
plot=scrapedplot,
folder=True))
return itemlist
def search(item, texto):
logger.info("[piratestreaming.py] search " + texto)
item.url = host + "/cerca.php?all=" + texto
try:
return cerca(item)
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def cerca(item):
itemlist = []
# Descarga la pagina
data = scrapertools.cache_page(item.url)
if item.extra == "serie":
data = data.split('Serie TV Complete')[1]
patron = '<!-- Featured Item -->(.*?)<!-- End of Content -->'
bloque = scrapertools.find_single_match(data, patron)
# Extrae las entradas (carpetas)
patron = '<img src=(.*?) alt="featured item" style="width:\s+80.8px; height: 109.6px;" /></a>\s*<div class="featuredText">\s*'
patron += '<b><a href=([^>]+)>(.*?)</b>'
matches = re.compile(patron).findall(bloque)
for scrapedthumbnail, scrapedurl, scrapedtitle in matches:
scrapedplot = ""
scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle.replace("</a>", ""))
if DEBUG: logger.info("title=[" + scrapedtitle + "], url=[" + scrapedurl + "]")
itemlist.append(infoSod(
Item(channel=__channel__,
action="episodios" if item.extra == "serie" else "findvideos",
fulltitle=scrapedtitle,
show=scrapedtitle,
title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
url=scrapedurl,
thumbnail=scrapedthumbnail,
plot=scrapedplot,
folder=True), tipo='movie'))
# Extrae el paginador
patronvideos = '<td align="center">[^<]+</td>[^<]+<td align="center">\s*<a href="([^"]+)">[^<]+</a>'
matches = re.compile(patronvideos, re.DOTALL).findall(data)
if len(matches) > 0:
scrapedurl = urlparse.urljoin(item.url, matches[0])
itemlist.append(
Item(channel=__channel__,
action="HomePage",
title="[COLOR yellow]Torna Home[/COLOR]",
folder=True)),
itemlist.append(
Item(channel=__channel__,
action="cerca",
title="[COLOR orange]Successivo >>[/COLOR]",
url=scrapedurl,
thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png",
folder=True))
return itemlist
def episodios(item):
def load_episodios(html, item, itemlist, lang_title):
for data in scrapertools.decodeHtmlentities(html).splitlines():
# Extrae las entradas
end = data.find('<a ')
if end > 0:
scrapedtitle = re.sub(r'<[^>]*>', '', data[:end]).strip()
else:
scrapedtitle = ''
if scrapedtitle == '':
patron = '<a\s*rel="nofollow"\s*target="_blank"\s*href="[^"]+">([^<]+)</a>'
scrapedtitle = scrapertools.find_single_match(data, patron).strip()
title = scrapertools.find_single_match(scrapedtitle, '\d+[^\d]+\d+')
if title == '':
title = scrapedtitle
if title != '':
itemlist.append(
Item(channel=__channel__,
action="findvid_serie",
title=title + " (" + lang_title + ")",
url=item.url,
thumbnail=item.thumbnail,
extra=data,
fulltitle=item.fulltitle,
show=item.show))
logger.info("[piratestreaming.py] episodios")
itemlist = []
# Descarga la página
data = scrapertools.cache_page(item.url)
start = data.find('<!--googleoff: all-->')
end = data.find('<!--googleon: all-->', start)
data = data[start:end]
lang_titles = []
starts = []
patron = r"(?:STAGIONE|MINISERIE|WEBSERIE|SERIE).*?ITA"
matches = re.compile(patron, re.IGNORECASE).finditer(data)
for match in matches:
season_title = match.group()
if season_title != '':
lang_titles.append('SUB ITA' if 'SUB' in season_title.upper() else 'ITA')
starts.append(match.end())
i = 1
len_lang_titles = len(lang_titles)
while i <= len_lang_titles:
inizio = starts[i - 1]
fine = starts[i] if i < len_lang_titles else -1
html = data[inizio:fine]
lang_title = lang_titles[i - 1]
load_episodios(html, item, itemlist, lang_title)
i += 1
if len(itemlist) == 0:
load_episodios(data, item, itemlist, 'ITA')
if config.get_library_support() and len(itemlist) != 0:
itemlist.append(
Item(channel=__channel__,
title=item.title,
url=item.url,
action="add_serie_to_library",
extra="episodios",
show=item.show))
itemlist.append(
Item(channel=item.channel,
title="Scarica tutti gli episodi della serie",
url=item.url,
action="download_all_episodes",
extra="episodios",
show=item.show))
return itemlist
def findvid_serie(item):
logger.info("[piratestreaming.py] findvideos")
# Descarga la página
data = item.extra
itemlist = servertools.find_video_items(data=data)
for videoitem in itemlist:
videoitem.title = item.title + videoitem.title
videoitem.fulltitle = item.fulltitle
videoitem.thumbnail = item.thumbnail
videoitem.show = item.show
videoitem.plot = item.plot
videoitem.channel = __channel__
return itemlist
| gpl-3.0 |
LREN-CHUV/data-factory-airflow-dags | reorganisation_steps/cleanup_all_local.py | 2 | 1473 | """
Reorganisation step: cleanup all local data.
Cleanup the local data (for the whole data-set) created during copy_to_local step.
Configuration variables used:
* :reorganisation:copy_to_local section
* OUTPUT_FOLDER: destination folder for the local copy
"""
from datetime import timedelta
from textwrap import dedent
from airflow import configuration
from airflow.operators.bash_operator import BashOperator
from common_steps import Step
def cleanup_all_local_cfg(dag, upstream_step, step_section=None):
cleanup_folder = configuration.get(step_section, "OUTPUT_FOLDER")
return cleanup_all_local_step(dag, upstream_step, cleanup_folder)
def cleanup_all_local_step(dag, upstream_step, cleanup_folder):
cleanup_local_cmd = dedent("""
rm -rf {{ params["cleanup_folder"] }}/*
""")
cleanup_all_local = BashOperator(
task_id='cleanup_all_local',
bash_command=cleanup_local_cmd,
params={'cleanup_folder': cleanup_folder},
priority_weight=upstream_step.priority_weight,
execution_timeout=timedelta(hours=1),
dag=dag
)
if upstream_step.task:
cleanup_all_local.set_upstream(upstream_step.task)
cleanup_all_local.doc_md = dedent("""\
# Cleanup all local files
Remove locally stored files as they have been already reorganised.
""")
return Step(cleanup_all_local, cleanup_all_local.task_id, upstream_step.priority_weight + 10)
| apache-2.0 |
danielvdao/facebookMacBot | venv/lib/python2.7/site-packages/setuptools/tests/test_find_packages.py | 109 | 5619 | """Tests for setuptools.find_packages()."""
import os
import sys
import shutil
import tempfile
import unittest
import platform
import setuptools
from setuptools import find_packages
from setuptools.tests.py26compat import skipIf
find_420_packages = setuptools.PEP420PackageFinder.find
def has_symlink():
bad_symlink = (
# Windows symlink directory detection is broken on Python 3.2
platform.system() == 'Windows' and sys.version_info[:2] == (3,2)
)
return hasattr(os, 'symlink') and not bad_symlink
class TestFindPackages(unittest.TestCase):
def setUp(self):
self.dist_dir = tempfile.mkdtemp()
self._make_pkg_structure()
def tearDown(self):
shutil.rmtree(self.dist_dir)
def _make_pkg_structure(self):
"""Make basic package structure.
dist/
docs/
conf.py
pkg/
__pycache__/
nspkg/
mod.py
subpkg/
assets/
asset
__init__.py
setup.py
"""
self.docs_dir = self._mkdir('docs', self.dist_dir)
self._touch('conf.py', self.docs_dir)
self.pkg_dir = self._mkdir('pkg', self.dist_dir)
self._mkdir('__pycache__', self.pkg_dir)
self.ns_pkg_dir = self._mkdir('nspkg', self.pkg_dir)
self._touch('mod.py', self.ns_pkg_dir)
self.sub_pkg_dir = self._mkdir('subpkg', self.pkg_dir)
self.asset_dir = self._mkdir('assets', self.sub_pkg_dir)
self._touch('asset', self.asset_dir)
self._touch('__init__.py', self.sub_pkg_dir)
self._touch('setup.py', self.dist_dir)
def _mkdir(self, path, parent_dir=None):
if parent_dir:
path = os.path.join(parent_dir, path)
os.mkdir(path)
return path
def _touch(self, path, dir_=None):
if dir_:
path = os.path.join(dir_, path)
fp = open(path, 'w')
fp.close()
return path
def test_regular_package(self):
self._touch('__init__.py', self.pkg_dir)
packages = find_packages(self.dist_dir)
self.assertEqual(packages, ['pkg', 'pkg.subpkg'])
def test_exclude(self):
self._touch('__init__.py', self.pkg_dir)
packages = find_packages(self.dist_dir, exclude=('pkg.*',))
assert packages == ['pkg']
def test_include_excludes_other(self):
"""
If include is specified, other packages should be excluded.
"""
self._touch('__init__.py', self.pkg_dir)
alt_dir = self._mkdir('other_pkg', self.dist_dir)
self._touch('__init__.py', alt_dir)
packages = find_packages(self.dist_dir, include=['other_pkg'])
self.assertEqual(packages, ['other_pkg'])
def test_dir_with_dot_is_skipped(self):
shutil.rmtree(os.path.join(self.dist_dir, 'pkg/subpkg/assets'))
data_dir = self._mkdir('some.data', self.pkg_dir)
self._touch('__init__.py', data_dir)
self._touch('file.dat', data_dir)
packages = find_packages(self.dist_dir)
self.assertTrue('pkg.some.data' not in packages)
def test_dir_with_packages_in_subdir_is_excluded(self):
"""
Ensure that a package in a non-package such as build/pkg/__init__.py
is excluded.
"""
build_dir = self._mkdir('build', self.dist_dir)
build_pkg_dir = self._mkdir('pkg', build_dir)
self._touch('__init__.py', build_pkg_dir)
packages = find_packages(self.dist_dir)
self.assertTrue('build.pkg' not in packages)
@skipIf(not has_symlink(), 'Symlink support required')
def test_symlinked_packages_are_included(self):
"""
A symbolically-linked directory should be treated like any other
directory when matched as a package.
Create a link from lpkg -> pkg.
"""
self._touch('__init__.py', self.pkg_dir)
linked_pkg = os.path.join(self.dist_dir, 'lpkg')
os.symlink('pkg', linked_pkg)
assert os.path.isdir(linked_pkg)
packages = find_packages(self.dist_dir)
self.assertTrue('lpkg' in packages)
def _assert_packages(self, actual, expected):
self.assertEqual(set(actual), set(expected))
def test_pep420_ns_package(self):
packages = find_420_packages(
self.dist_dir, include=['pkg*'], exclude=['pkg.subpkg.assets'])
self._assert_packages(packages, ['pkg', 'pkg.nspkg', 'pkg.subpkg'])
def test_pep420_ns_package_no_includes(self):
packages = find_420_packages(
self.dist_dir, exclude=['pkg.subpkg.assets'])
self._assert_packages(packages, ['docs', 'pkg', 'pkg.nspkg', 'pkg.subpkg'])
def test_pep420_ns_package_no_includes_or_excludes(self):
packages = find_420_packages(self.dist_dir)
expected = [
'docs', 'pkg', 'pkg.nspkg', 'pkg.subpkg', 'pkg.subpkg.assets']
self._assert_packages(packages, expected)
def test_regular_package_with_nested_pep420_ns_packages(self):
self._touch('__init__.py', self.pkg_dir)
packages = find_420_packages(
self.dist_dir, exclude=['docs', 'pkg.subpkg.assets'])
self._assert_packages(packages, ['pkg', 'pkg.nspkg', 'pkg.subpkg'])
def test_pep420_ns_package_no_non_package_dirs(self):
shutil.rmtree(self.docs_dir)
shutil.rmtree(os.path.join(self.dist_dir, 'pkg/subpkg/assets'))
packages = find_420_packages(self.dist_dir)
self._assert_packages(packages, ['pkg', 'pkg.nspkg', 'pkg.subpkg'])
| mit |
ozturkemre/programming-challanges | 02-temperature_converter/temperature_converter.py | 1 | 1147 | print("""Enter 'C' or 'c' for Celsius,
'K' or 'k' for Kelvin,
'F' or 'f' for Fahrenheit\n\n""")
converted=0
fr=input("I want converter from: \n")
value1=input("Enter value: \n")
to=input("to: \n")
try:
value1=float(value1)
if(fr=='C' or fr=='c'):
if(to=='F' or to=='f'):
converted=value1*1,8+32
elif(to=='K' or to=='k'):
converted = value1 + 273.15
else:
print("you enter different value\n")
exit()
elif(fr=='K' or fr=='k'):
if(to=='C' or to=='c'):
converted=value1-273.15
elif(to=='F' or to=='f'):
converted = (value1 - 273.15) * 1.8 + 32
else:
print("you enter different value\n")
exit()
elif(fr=='F' or fr=='f'):
if(to=='C' or to=='c'):
converted=(value1-32)/1.8
elif(to=='K' or to=='k'):
converted = ((f - 32) / 1.8) + 273.15
else:
print("you enter different value\n")
exit()
except ValueError:
print("That was no valid number.")
print("result = {}".format(converted))
| mit |
Empeeric/dirometer | django/views/generic/edit.py | 159 | 7457 | from django.forms import models as model_forms
from django.core.exceptions import ImproperlyConfigured
from django.http import HttpResponseRedirect
from django.views.generic.base import TemplateResponseMixin, View
from django.views.generic.detail import (SingleObjectMixin,
SingleObjectTemplateResponseMixin, BaseDetailView)
class FormMixin(object):
"""
A mixin that provides a way to show and handle a form in a request.
"""
initial = {}
form_class = None
success_url = None
def get_initial(self):
"""
Returns the initial data to use for forms on this view.
"""
return self.initial
def get_form_class(self):
"""
Returns the form class to use in this view
"""
return self.form_class
def get_form(self, form_class):
"""
Returns an instance of the form to be used in this view.
"""
return form_class(**self.get_form_kwargs())
def get_form_kwargs(self):
"""
Returns the keyword arguments for instanciating the form.
"""
kwargs = {'initial': self.get_initial()}
if self.request.method in ('POST', 'PUT'):
kwargs.update({
'data': self.request.POST,
'files': self.request.FILES,
})
return kwargs
def get_context_data(self, **kwargs):
return kwargs
def get_success_url(self):
if self.success_url:
url = self.success_url
else:
raise ImproperlyConfigured(
"No URL to redirect to. Provide a success_url.")
return url
def form_valid(self, form):
return HttpResponseRedirect(self.get_success_url())
def form_invalid(self, form):
return self.render_to_response(self.get_context_data(form=form))
class ModelFormMixin(FormMixin, SingleObjectMixin):
"""
A mixin that provides a way to show and handle a modelform in a request.
"""
def get_form_class(self):
"""
Returns the form class to use in this view
"""
if self.form_class:
return self.form_class
else:
if self.model is not None:
# If a model has been explicitly provided, use it
model = self.model
elif hasattr(self, 'object') and self.object is not None:
# If this view is operating on a single object, use
# the class of that object
model = self.object.__class__
else:
# Try to get a queryset and extract the model class
# from that
model = self.get_queryset().model
return model_forms.modelform_factory(model)
def get_form_kwargs(self):
"""
Returns the keyword arguments for instanciating the form.
"""
kwargs = super(ModelFormMixin, self).get_form_kwargs()
kwargs.update({'instance': self.object})
return kwargs
def get_success_url(self):
if self.success_url:
url = self.success_url % self.object.__dict__
else:
try:
url = self.object.get_absolute_url()
except AttributeError:
raise ImproperlyConfigured(
"No URL to redirect to. Either provide a url or define"
" a get_absolute_url method on the Model.")
return url
def form_valid(self, form):
self.object = form.save()
return super(ModelFormMixin, self).form_valid(form)
def get_context_data(self, **kwargs):
context = kwargs
if self.object:
context['object'] = self.object
context_object_name = self.get_context_object_name(self.object)
if context_object_name:
context[context_object_name] = self.object
return context
class ProcessFormView(View):
"""
A mixin that processes a form on POST.
"""
def get(self, request, *args, **kwargs):
form_class = self.get_form_class()
form = self.get_form(form_class)
return self.render_to_response(self.get_context_data(form=form))
def post(self, request, *args, **kwargs):
form_class = self.get_form_class()
form = self.get_form(form_class)
if form.is_valid():
return self.form_valid(form)
else:
return self.form_invalid(form)
# PUT is a valid HTTP verb for creating (with a known URL) or editing an
# object, note that browsers only support POST for now.
def put(self, *args, **kwargs):
return self.post(*args, **kwargs)
class BaseFormView(FormMixin, ProcessFormView):
"""
A base view for displaying a form
"""
class FormView(TemplateResponseMixin, BaseFormView):
"""
A view for displaying a form, and rendering a template response.
"""
class BaseCreateView(ModelFormMixin, ProcessFormView):
"""
Base view for creating an new object instance.
Using this base class requires subclassing to provide a response mixin.
"""
def get(self, request, *args, **kwargs):
self.object = None
return super(BaseCreateView, self).get(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
self.object = None
return super(BaseCreateView, self).post(request, *args, **kwargs)
class CreateView(SingleObjectTemplateResponseMixin, BaseCreateView):
"""
View for creating an new object instance,
with a response rendered by template.
"""
template_name_suffix = '_form'
class BaseUpdateView(ModelFormMixin, ProcessFormView):
"""
Base view for updating an existing object.
Using this base class requires subclassing to provide a response mixin.
"""
def get(self, request, *args, **kwargs):
self.object = self.get_object()
return super(BaseUpdateView, self).get(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
self.object = self.get_object()
return super(BaseUpdateView, self).post(request, *args, **kwargs)
class UpdateView(SingleObjectTemplateResponseMixin, BaseUpdateView):
"""
View for updating an object,
with a response rendered by template..
"""
template_name_suffix = '_form'
class DeletionMixin(object):
"""
A mixin providing the ability to delete objects
"""
success_url = None
def delete(self, request, *args, **kwargs):
self.object = self.get_object()
self.object.delete()
return HttpResponseRedirect(self.get_success_url())
# Add support for browsers which only accept GET and POST for now.
def post(self, *args, **kwargs):
return self.delete(*args, **kwargs)
def get_success_url(self):
if self.success_url:
return self.success_url
else:
raise ImproperlyConfigured(
"No URL to redirect to. Provide a success_url.")
class BaseDeleteView(DeletionMixin, BaseDetailView):
"""
Base view for deleting an object.
Using this base class requires subclassing to provide a response mixin.
"""
class DeleteView(SingleObjectTemplateResponseMixin, BaseDeleteView):
"""
View for deleting an object retrieved with `self.get_object()`,
with a response rendered by template.
"""
template_name_suffix = '_confirm_delete'
| mit |
ipylypiv/grpc | src/python/grpcio/grpc/_plugin_wrapping.py | 19 | 4602 | # Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import collections
import threading
import grpc
from grpc import _common
from grpc._cython import cygrpc
class AuthMetadataContext(
collections.namedtuple('AuthMetadataContext', (
'service_url', 'method_name',)), grpc.AuthMetadataContext):
pass
class AuthMetadataPluginCallback(grpc.AuthMetadataContext):
def __init__(self, callback):
self._callback = callback
def __call__(self, metadata, error):
self._callback(metadata, error)
class _WrappedCygrpcCallback(object):
def __init__(self, cygrpc_callback):
self.is_called = False
self.error = None
self.is_called_lock = threading.Lock()
self.cygrpc_callback = cygrpc_callback
def _invoke_failure(self, error):
# TODO(atash) translate different Exception superclasses into different
# status codes.
self.cygrpc_callback(_common.EMPTY_METADATA, cygrpc.StatusCode.internal,
_common.encode(str(error)))
def _invoke_success(self, metadata):
try:
cygrpc_metadata = _common.to_cygrpc_metadata(metadata)
except Exception as exception: # pylint: disable=broad-except
self._invoke_failure(exception)
return
self.cygrpc_callback(cygrpc_metadata, cygrpc.StatusCode.ok, b'')
def __call__(self, metadata, error):
with self.is_called_lock:
if self.is_called:
raise RuntimeError('callback should only ever be invoked once')
if self.error:
self._invoke_failure(self.error)
return
self.is_called = True
if error is None:
self._invoke_success(metadata)
else:
self._invoke_failure(error)
def notify_failure(self, error):
with self.is_called_lock:
if not self.is_called:
self.error = error
class _WrappedPlugin(object):
def __init__(self, plugin):
self.plugin = plugin
def __call__(self, context, cygrpc_callback):
wrapped_cygrpc_callback = _WrappedCygrpcCallback(cygrpc_callback)
wrapped_context = AuthMetadataContext(
_common.decode(context.service_url),
_common.decode(context.method_name))
try:
self.plugin(wrapped_context,
AuthMetadataPluginCallback(wrapped_cygrpc_callback))
except Exception as error:
wrapped_cygrpc_callback.notify_failure(error)
raise
def call_credentials_metadata_plugin(plugin, name):
"""
Args:
plugin: A callable accepting a grpc.AuthMetadataContext
object and a callback (itself accepting a list of metadata key/value
2-tuples and a None-able exception value). The callback must be eventually
called, but need not be called in plugin's invocation.
plugin's invocation must be non-blocking.
"""
return cygrpc.call_credentials_metadata_plugin(
cygrpc.CredentialsMetadataPlugin(
_WrappedPlugin(plugin), _common.encode(name)))
| bsd-3-clause |
bobcyw/django | django/core/management/commands/check.py | 316 | 1892 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.apps import apps
from django.core import checks
from django.core.checks.registry import registry
from django.core.management.base import BaseCommand, CommandError
class Command(BaseCommand):
help = "Checks the entire Django project for potential problems."
requires_system_checks = False
def add_arguments(self, parser):
parser.add_argument('args', metavar='app_label', nargs='*')
parser.add_argument('--tag', '-t', action='append', dest='tags',
help='Run only checks labeled with given tag.')
parser.add_argument('--list-tags', action='store_true', dest='list_tags',
help='List available tags.')
parser.add_argument('--deploy', action='store_true', dest='deploy',
help='Check deployment settings.')
def handle(self, *app_labels, **options):
include_deployment_checks = options['deploy']
if options.get('list_tags'):
self.stdout.write('\n'.join(sorted(registry.tags_available(include_deployment_checks))))
return
if app_labels:
app_configs = [apps.get_app_config(app_label) for app_label in app_labels]
else:
app_configs = None
tags = options.get('tags')
if tags:
try:
invalid_tag = next(
tag for tag in tags if not checks.tag_exists(tag, include_deployment_checks)
)
except StopIteration:
# no invalid tags
pass
else:
raise CommandError('There is no system check with the "%s" tag.' % invalid_tag)
self.check(
app_configs=app_configs,
tags=tags,
display_num_errors=True,
include_deployment_checks=include_deployment_checks,
)
| bsd-3-clause |
ldbc/ldbc_snb_datagen | tools/get-sizes.py | 1 | 1059 | #!/usr/bin/env python3
import argparse
import os
import sys
import boto3
import json
def get_entity_sizes(bucket, prefix):
s3 = boto3.client("s3")
prefix = f"{prefix}social_network/csv/raw/composite-merged-fk/dynamic/"
more = True
token = None
sizes = {}
while more:
resp = s3.list_objects_v2(Bucket=bucket, Prefix=prefix, **({'ContinuationToken': token} if token else {}))
for obj in resp["Contents"]:
splits = obj["Key"][len(prefix):].split("/", 1)
if len(splits) > 1:
entity, rest = splits
if rest.endswith(".csv"):
total, c, m = sizes.get(entity, [0, 0, 0])
sizes[entity] = [total + obj["Size"], c + 1, max(m, obj["Size"])]
more = False
if 'NextContinuationToken' in resp.keys():
token = resp['NextContinuationToken']
more = True
with open(f"sizes.json", "w") as f:
json.dump(sizes, f)
get_entity_sizes("ldbc-datagen-sf10k-debug", "sf1000/runs/20210412_091530/")
| gpl-3.0 |
energicryptocurrency/energi | qa/rpc-tests/bipdersig-p2p.py | 1 | 7055 | #!/usr/bin/env python3
# Copyright (c) 2015-2018 The Energi Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
# Copyright (c) 2015-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.test_framework import ComparisonTestFramework
from test_framework.util import *
from test_framework.mininode import CTransaction, NetworkThread
from test_framework.blocktools import create_coinbase, create_block
from test_framework.comptool import TestInstance, TestManager
from test_framework.script import CScript
from io import BytesIO
# A canonical signature consists of:
# <30> <total len> <02> <len R> <R> <02> <len S> <S> <hashtype>
def unDERify(tx):
'''
Make the signature in vin 0 of a tx non-DER-compliant,
by adding padding after the S-value.
'''
scriptSig = CScript(tx.vin[0].scriptSig)
newscript = []
for i in scriptSig:
if (len(newscript) == 0):
newscript.append(i[0:-1] + b'\0' + i[-1:])
else:
newscript.append(i)
tx.vin[0].scriptSig = CScript(newscript)
'''
This test is meant to exercise BIP66 (DER SIG).
Connect to a single node.
Mine 2 (version 2) blocks (save the coinbases for later).
Generate 98 more version 2 blocks, verify the node accepts.
Mine 749 version 3 blocks, verify the node accepts.
Check that the new DERSIG rules are not enforced on the 750th version 3 block.
Check that the new DERSIG rules are enforced on the 751st version 3 block.
Mine 199 new version blocks.
Mine 1 old-version block.
Mine 1 new version block.
Mine 1 old version block, see that the node rejects.
'''
class BIP66Test(ComparisonTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 1
def setup_network(self):
# Must set the blockversion for this test
self.nodes = start_nodes(self.num_nodes, self.options.tmpdir,
extra_args=[['-debug', '-whitelist=127.0.0.1', '-blockversion=2']],
binary=[self.options.testbinary])
def run_test(self):
test = TestManager(self, self.options.tmpdir)
test.add_all_connections(self.nodes)
NetworkThread().start() # Start up network handling in another thread
test.run()
def create_transaction(self, node, coinbase, to_address, amount):
from_txid = node.getblock(coinbase)['tx'][0]
inputs = [{ "txid" : from_txid, "vout" : 0}]
outputs = { to_address : amount }
rawtx = node.createrawtransaction(inputs, outputs)
signresult = node.signrawtransaction(rawtx)
tx = CTransaction()
f = BytesIO(hex_str_to_bytes(signresult['hex']))
tx.deserialize(f)
return tx
def get_tests(self):
self.coinbase_blocks = self.nodes[0].generate(2)
height = 3 # height of the next block to build
self.tip = int("0x" + self.nodes[0].getbestblockhash(), 0)
self.nodeaddress = self.nodes[0].getnewaddress()
self.last_block_time = get_mocktime() + 1
''' 298 more version 2 blocks '''
test_blocks = []
for i in range(298):
block = create_block(self.tip, create_coinbase(height), self.last_block_time + 1)
block.nVersion = 2
block.rehash()
block.solve()
test_blocks.append([block, True])
self.last_block_time += 1
self.tip = block.sha256
height += 1
yield TestInstance(test_blocks, sync_every_block=False)
''' Mine 749 version 3 blocks '''
test_blocks = []
for i in range(749):
block = create_block(self.tip, create_coinbase(height), self.last_block_time + 1)
block.nVersion = 3
block.rehash()
block.solve()
test_blocks.append([block, True])
self.last_block_time += 1
self.tip = block.sha256
height += 1
yield TestInstance(test_blocks, sync_every_block=False)
'''
Check that the new DERSIG rules are not enforced in the 750th
version 3 block.
'''
spendtx = self.create_transaction(self.nodes[0],
self.coinbase_blocks[0], self.nodeaddress, 1.0)
unDERify(spendtx)
spendtx.rehash()
block = create_block(self.tip, create_coinbase(height), self.last_block_time + 1)
block.nVersion = 3
block.vtx.append(spendtx)
block.hashMerkleRoot = block.calc_merkle_root()
block.rehash()
block.solve()
self.last_block_time += 1
self.tip = block.sha256
height += 1
yield TestInstance([[block, True]])
''' Mine 199 new version blocks on last valid tip '''
test_blocks = []
for i in range(199):
block = create_block(self.tip, create_coinbase(height), self.last_block_time + 1)
block.nVersion = 3
block.rehash()
block.solve()
test_blocks.append([block, True])
self.last_block_time += 1
self.tip = block.sha256
height += 1
yield TestInstance(test_blocks, sync_every_block=False)
''' Mine 1 old version block '''
block = create_block(self.tip, create_coinbase(height), self.last_block_time + 1)
block.nVersion = 2
block.rehash()
block.solve()
self.last_block_time += 1
self.tip = block.sha256
height += 1
yield TestInstance([[block, True]])
''' Mine 1 new version block '''
block = create_block(self.tip, create_coinbase(height), self.last_block_time + 1)
block.nVersion = 3
block.rehash()
block.solve()
self.last_block_time += 1
self.tip = block.sha256
height += 1
yield TestInstance([[block, True]])
'''
Check that the new DERSIG rules are enforced in the 951st version 3
block.
'''
spendtx = self.create_transaction(self.nodes[0],
self.coinbase_blocks[1], self.nodeaddress, 1.0)
unDERify(spendtx)
spendtx.rehash()
block = create_block(self.tip, create_coinbase(height), self.last_block_time + 1)
block.nVersion = 3
block.vtx.append(spendtx)
block.hashMerkleRoot = block.calc_merkle_root()
block.rehash()
block.solve()
self.last_block_time += 1
yield TestInstance([[block, False]])
''' Mine 1 old version block, should be invalid '''
block = create_block(self.tip, create_coinbase(height), self.last_block_time + 1)
block.nVersion = 2
block.rehash()
block.solve()
self.last_block_time += 1
yield TestInstance([[block, False]])
if __name__ == '__main__':
BIP66Test().main()
| mit |
fidomason/kbengine | kbe/src/lib/python/Lib/test/test_cmd_line.py | 72 | 19246 | # Tests invocation of the interpreter with various command line arguments
# Most tests are executed with environment variables ignored
# See test_cmd_line_script.py for testing of script execution
import test.support, unittest
import os
import shutil
import sys
import subprocess
import tempfile
from test.script_helper import (spawn_python, kill_python, assert_python_ok,
assert_python_failure)
# XXX (ncoghlan): Move to script_helper and make consistent with run_python
def _kill_python_and_exit_code(p):
data = kill_python(p)
returncode = p.wait()
return data, returncode
class CmdLineTest(unittest.TestCase):
def test_directories(self):
assert_python_failure('.')
assert_python_failure('< .')
def verify_valid_flag(self, cmd_line):
rc, out, err = assert_python_ok(*cmd_line)
self.assertTrue(out == b'' or out.endswith(b'\n'))
self.assertNotIn(b'Traceback', out)
self.assertNotIn(b'Traceback', err)
def test_optimize(self):
self.verify_valid_flag('-O')
self.verify_valid_flag('-OO')
def test_site_flag(self):
self.verify_valid_flag('-S')
def test_usage(self):
rc, out, err = assert_python_ok('-h')
self.assertIn(b'usage', out)
def test_version(self):
version = ('Python %d.%d' % sys.version_info[:2]).encode("ascii")
for switch in '-V', '--version':
rc, out, err = assert_python_ok(switch)
self.assertFalse(err.startswith(version))
self.assertTrue(out.startswith(version))
def test_verbose(self):
# -v causes imports to write to stderr. If the write to
# stderr itself causes an import to happen (for the output
# codec), a recursion loop can occur.
rc, out, err = assert_python_ok('-v')
self.assertNotIn(b'stack overflow', err)
rc, out, err = assert_python_ok('-vv')
self.assertNotIn(b'stack overflow', err)
def test_xoptions(self):
def get_xoptions(*args):
# use subprocess module directly because test.script_helper adds
# "-X faulthandler" to the command line
args = (sys.executable, '-E') + args
args += ('-c', 'import sys; print(sys._xoptions)')
out = subprocess.check_output(args)
opts = eval(out.splitlines()[0])
return opts
opts = get_xoptions()
self.assertEqual(opts, {})
opts = get_xoptions('-Xa', '-Xb=c,d=e')
self.assertEqual(opts, {'a': True, 'b': 'c,d=e'})
def test_showrefcount(self):
def run_python(*args):
# this is similar to assert_python_ok but doesn't strip
# the refcount from stderr. It can be replaced once
# assert_python_ok stops doing that.
cmd = [sys.executable]
cmd.extend(args)
PIPE = subprocess.PIPE
p = subprocess.Popen(cmd, stdout=PIPE, stderr=PIPE)
out, err = p.communicate()
p.stdout.close()
p.stderr.close()
rc = p.returncode
self.assertEqual(rc, 0)
return rc, out, err
code = 'import sys; print(sys._xoptions)'
# normally the refcount is hidden
rc, out, err = run_python('-c', code)
self.assertEqual(out.rstrip(), b'{}')
self.assertEqual(err, b'')
# "-X showrefcount" shows the refcount, but only in debug builds
rc, out, err = run_python('-X', 'showrefcount', '-c', code)
self.assertEqual(out.rstrip(), b"{'showrefcount': True}")
if hasattr(sys, 'gettotalrefcount'): # debug build
self.assertRegex(err, br'^\[\d+ refs, \d+ blocks\]')
else:
self.assertEqual(err, b'')
def test_run_module(self):
# Test expected operation of the '-m' switch
# Switch needs an argument
assert_python_failure('-m')
# Check we get an error for a nonexistent module
assert_python_failure('-m', 'fnord43520xyz')
# Check the runpy module also gives an error for
# a nonexistent module
assert_python_failure('-m', 'runpy', 'fnord43520xyz')
# All good if module is located and run successfully
assert_python_ok('-m', 'timeit', '-n', '1')
def test_run_module_bug1764407(self):
# -m and -i need to play well together
# Runs the timeit module and checks the __main__
# namespace has been populated appropriately
p = spawn_python('-i', '-m', 'timeit', '-n', '1')
p.stdin.write(b'Timer\n')
p.stdin.write(b'exit()\n')
data = kill_python(p)
self.assertTrue(data.find(b'1 loop') != -1)
self.assertTrue(data.find(b'__main__.Timer') != -1)
def test_run_code(self):
# Test expected operation of the '-c' switch
# Switch needs an argument
assert_python_failure('-c')
# Check we get an error for an uncaught exception
assert_python_failure('-c', 'raise Exception')
# All good if execution is successful
assert_python_ok('-c', 'pass')
@unittest.skipUnless(test.support.FS_NONASCII, 'need support.FS_NONASCII')
def test_non_ascii(self):
# Test handling of non-ascii data
command = ("assert(ord(%r) == %s)"
% (test.support.FS_NONASCII, ord(test.support.FS_NONASCII)))
assert_python_ok('-c', command)
# On Windows, pass bytes to subprocess doesn't test how Python decodes the
# command line, but how subprocess does decode bytes to unicode. Python
# doesn't decode the command line because Windows provides directly the
# arguments as unicode (using wmain() instead of main()).
@unittest.skipIf(sys.platform == 'win32',
'Windows has a native unicode API')
def test_undecodable_code(self):
undecodable = b"\xff"
env = os.environ.copy()
# Use C locale to get ascii for the locale encoding
env['LC_ALL'] = 'C'
code = (
b'import locale; '
b'print(ascii("' + undecodable + b'"), '
b'locale.getpreferredencoding())')
p = subprocess.Popen(
[sys.executable, "-c", code],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
env=env)
stdout, stderr = p.communicate()
if p.returncode == 1:
# _Py_char2wchar() decoded b'\xff' as '\udcff' (b'\xff' is not
# decodable from ASCII) and run_command() failed on
# PyUnicode_AsUTF8String(). This is the expected behaviour on
# Linux.
pattern = b"Unable to decode the command from the command line:"
elif p.returncode == 0:
# _Py_char2wchar() decoded b'\xff' as '\xff' even if the locale is
# C and the locale encoding is ASCII. It occurs on FreeBSD, Solaris
# and Mac OS X.
pattern = b"'\\xff' "
# The output is followed by the encoding name, an alias to ASCII.
# Examples: "US-ASCII" or "646" (ISO 646, on Solaris).
else:
raise AssertionError("Unknown exit code: %s, output=%a" % (p.returncode, stdout))
if not stdout.startswith(pattern):
raise AssertionError("%a doesn't start with %a" % (stdout, pattern))
@unittest.skipUnless(sys.platform == 'darwin', 'test specific to Mac OS X')
def test_osx_utf8(self):
def check_output(text):
decoded = text.decode('utf-8', 'surrogateescape')
expected = ascii(decoded).encode('ascii') + b'\n'
env = os.environ.copy()
# C locale gives ASCII locale encoding, but Python uses UTF-8
# to parse the command line arguments on Mac OS X
env['LC_ALL'] = 'C'
p = subprocess.Popen(
(sys.executable, "-c", "import sys; print(ascii(sys.argv[1]))", text),
stdout=subprocess.PIPE,
env=env)
stdout, stderr = p.communicate()
self.assertEqual(stdout, expected)
self.assertEqual(p.returncode, 0)
# test valid utf-8
text = 'e:\xe9, euro:\u20ac, non-bmp:\U0010ffff'.encode('utf-8')
check_output(text)
# test invalid utf-8
text = (
b'\xff' # invalid byte
b'\xc3\xa9' # valid utf-8 character
b'\xc3\xff' # invalid byte sequence
b'\xed\xa0\x80' # lone surrogate character (invalid)
)
check_output(text)
def test_unbuffered_output(self):
# Test expected operation of the '-u' switch
for stream in ('stdout', 'stderr'):
# Binary is unbuffered
code = ("import os, sys; sys.%s.buffer.write(b'x'); os._exit(0)"
% stream)
rc, out, err = assert_python_ok('-u', '-c', code)
data = err if stream == 'stderr' else out
self.assertEqual(data, b'x', "binary %s not unbuffered" % stream)
# Text is line-buffered
code = ("import os, sys; sys.%s.write('x\\n'); os._exit(0)"
% stream)
rc, out, err = assert_python_ok('-u', '-c', code)
data = err if stream == 'stderr' else out
self.assertEqual(data.strip(), b'x',
"text %s not line-buffered" % stream)
def test_unbuffered_input(self):
# sys.stdin still works with '-u'
code = ("import sys; sys.stdout.write(sys.stdin.read(1))")
p = spawn_python('-u', '-c', code)
p.stdin.write(b'x')
p.stdin.flush()
data, rc = _kill_python_and_exit_code(p)
self.assertEqual(rc, 0)
self.assertTrue(data.startswith(b'x'), data)
def test_large_PYTHONPATH(self):
path1 = "ABCDE" * 100
path2 = "FGHIJ" * 100
path = path1 + os.pathsep + path2
code = """if 1:
import sys
path = ":".join(sys.path)
path = path.encode("ascii", "backslashreplace")
sys.stdout.buffer.write(path)"""
rc, out, err = assert_python_ok('-S', '-c', code,
PYTHONPATH=path)
self.assertIn(path1.encode('ascii'), out)
self.assertIn(path2.encode('ascii'), out)
def test_empty_PYTHONPATH_issue16309(self):
# On Posix, it is documented that setting PATH to the
# empty string is equivalent to not setting PATH at all,
# which is an exception to the rule that in a string like
# "/bin::/usr/bin" the empty string in the middle gets
# interpreted as '.'
code = """if 1:
import sys
path = ":".join(sys.path)
path = path.encode("ascii", "backslashreplace")
sys.stdout.buffer.write(path)"""
rc1, out1, err1 = assert_python_ok('-c', code, PYTHONPATH="")
rc2, out2, err2 = assert_python_ok('-c', code, __isolated=False)
# regarding to Posix specification, outputs should be equal
# for empty and unset PYTHONPATH
self.assertEqual(out1, out2)
def test_displayhook_unencodable(self):
for encoding in ('ascii', 'latin-1', 'utf-8'):
env = os.environ.copy()
env['PYTHONIOENCODING'] = encoding
p = subprocess.Popen(
[sys.executable, '-i'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
env=env)
# non-ascii, surrogate, non-BMP printable, non-BMP unprintable
text = "a=\xe9 b=\uDC80 c=\U00010000 d=\U0010FFFF"
p.stdin.write(ascii(text).encode('ascii') + b"\n")
p.stdin.write(b'exit()\n')
data = kill_python(p)
escaped = repr(text).encode(encoding, 'backslashreplace')
self.assertIn(escaped, data)
def check_input(self, code, expected):
with tempfile.NamedTemporaryFile("wb+") as stdin:
sep = os.linesep.encode('ASCII')
stdin.write(sep.join((b'abc', b'def')))
stdin.flush()
stdin.seek(0)
with subprocess.Popen(
(sys.executable, "-c", code),
stdin=stdin, stdout=subprocess.PIPE) as proc:
stdout, stderr = proc.communicate()
self.assertEqual(stdout.rstrip(), expected)
def test_stdin_readline(self):
# Issue #11272: check that sys.stdin.readline() replaces '\r\n' by '\n'
# on Windows (sys.stdin is opened in binary mode)
self.check_input(
"import sys; print(repr(sys.stdin.readline()))",
b"'abc\\n'")
def test_builtin_input(self):
# Issue #11272: check that input() strips newlines ('\n' or '\r\n')
self.check_input(
"print(repr(input()))",
b"'abc'")
def test_output_newline(self):
# Issue 13119 Newline for print() should be \r\n on Windows.
code = """if 1:
import sys
print(1)
print(2)
print(3, file=sys.stderr)
print(4, file=sys.stderr)"""
rc, out, err = assert_python_ok('-c', code)
if sys.platform == 'win32':
self.assertEqual(b'1\r\n2\r\n', out)
self.assertEqual(b'3\r\n4', err)
else:
self.assertEqual(b'1\n2\n', out)
self.assertEqual(b'3\n4', err)
def test_unmached_quote(self):
# Issue #10206: python program starting with unmatched quote
# spewed spaces to stdout
rc, out, err = assert_python_failure('-c', "'")
self.assertRegex(err.decode('ascii', 'ignore'), 'SyntaxError')
self.assertEqual(b'', out)
def test_stdout_flush_at_shutdown(self):
# Issue #5319: if stdout.flush() fails at shutdown, an error should
# be printed out.
code = """if 1:
import os, sys
sys.stdout.write('x')
os.close(sys.stdout.fileno())"""
rc, out, err = assert_python_ok('-c', code)
self.assertEqual(b'', out)
self.assertRegex(err.decode('ascii', 'ignore'),
'Exception ignored in.*\nOSError: .*')
def test_closed_stdout(self):
# Issue #13444: if stdout has been explicitly closed, we should
# not attempt to flush it at shutdown.
code = "import sys; sys.stdout.close()"
rc, out, err = assert_python_ok('-c', code)
self.assertEqual(b'', err)
# Issue #7111: Python should work without standard streams
@unittest.skipIf(os.name != 'posix', "test needs POSIX semantics")
def _test_no_stdio(self, streams):
code = """if 1:
import os, sys
for i, s in enumerate({streams}):
if getattr(sys, s) is not None:
os._exit(i + 1)
os._exit(42)""".format(streams=streams)
def preexec():
if 'stdin' in streams:
os.close(0)
if 'stdout' in streams:
os.close(1)
if 'stderr' in streams:
os.close(2)
p = subprocess.Popen(
[sys.executable, "-E", "-c", code],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
preexec_fn=preexec)
out, err = p.communicate()
self.assertEqual(test.support.strip_python_stderr(err), b'')
self.assertEqual(p.returncode, 42)
def test_no_stdin(self):
self._test_no_stdio(['stdin'])
def test_no_stdout(self):
self._test_no_stdio(['stdout'])
def test_no_stderr(self):
self._test_no_stdio(['stderr'])
def test_no_std_streams(self):
self._test_no_stdio(['stdin', 'stdout', 'stderr'])
def test_hash_randomization(self):
# Verify that -R enables hash randomization:
self.verify_valid_flag('-R')
hashes = []
for i in range(2):
code = 'print(hash("spam"))'
rc, out, err = assert_python_ok('-c', code)
self.assertEqual(rc, 0)
hashes.append(out)
self.assertNotEqual(hashes[0], hashes[1])
# Verify that sys.flags contains hash_randomization
code = 'import sys; print("random is", sys.flags.hash_randomization)'
rc, out, err = assert_python_ok('-c', code)
self.assertEqual(rc, 0)
self.assertIn(b'random is 1', out)
def test_del___main__(self):
# Issue #15001: PyRun_SimpleFileExFlags() did crash because it kept a
# borrowed reference to the dict of __main__ module and later modify
# the dict whereas the module was destroyed
filename = test.support.TESTFN
self.addCleanup(test.support.unlink, filename)
with open(filename, "w") as script:
print("import sys", file=script)
print("del sys.modules['__main__']", file=script)
assert_python_ok(filename)
def test_unknown_options(self):
rc, out, err = assert_python_failure('-E', '-z')
self.assertIn(b'Unknown option: -z', err)
self.assertEqual(err.splitlines().count(b'Unknown option: -z'), 1)
self.assertEqual(b'', out)
# Add "without='-E'" to prevent _assert_python to append -E
# to env_vars and change the output of stderr
rc, out, err = assert_python_failure('-z', without='-E')
self.assertIn(b'Unknown option: -z', err)
self.assertEqual(err.splitlines().count(b'Unknown option: -z'), 1)
self.assertEqual(b'', out)
rc, out, err = assert_python_failure('-a', '-z', without='-E')
self.assertIn(b'Unknown option: -a', err)
# only the first unknown option is reported
self.assertNotIn(b'Unknown option: -z', err)
self.assertEqual(err.splitlines().count(b'Unknown option: -a'), 1)
self.assertEqual(b'', out)
def test_isolatedmode(self):
self.verify_valid_flag('-I')
self.verify_valid_flag('-IEs')
rc, out, err = assert_python_ok('-I', '-c',
'from sys import flags as f; '
'print(f.no_user_site, f.ignore_environment, f.isolated)',
# dummyvar to prevent extranous -E
dummyvar="")
self.assertEqual(out.strip(), b'1 1 1')
with test.support.temp_cwd() as tmpdir:
fake = os.path.join(tmpdir, "uuid.py")
main = os.path.join(tmpdir, "main.py")
with open(fake, "w") as f:
f.write("raise RuntimeError('isolated mode test')\n")
with open(main, "w") as f:
f.write("import uuid\n")
f.write("print('ok')\n")
self.assertRaises(subprocess.CalledProcessError,
subprocess.check_output,
[sys.executable, main], cwd=tmpdir,
stderr=subprocess.DEVNULL)
out = subprocess.check_output([sys.executable, "-I", main],
cwd=tmpdir)
self.assertEqual(out.strip(), b"ok")
def test_main():
test.support.run_unittest(CmdLineTest)
test.support.reap_children()
if __name__ == "__main__":
test_main()
| lgpl-3.0 |
CalvinHsu1223/LinuxCNC-HAL-EtherCAT-Driver-with-ILC | configs/gladevcp/probe/probe.py | 10 | 7401 | #!/usr/bin/env python
# vim: sts=4 sw=4 et
# This is a component of EMC
# probe.py Copyright 2010 Michael Haberler
#
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA''''''
'''
gladevcp probe demo example
Michael Haberler 11/2010
'''
import os,sys
from gladevcp.persistence import IniFile,widget_defaults,set_debug,select_widgets
import hal
import hal_glib
import gtk
import glib
import linuxcnc
debug = 0
class EmcInterface(object):
def __init__(self):
try:
self.s = linuxcnc.stat();
self.c = linuxcnc.command()
except Exception, msg:
print "cant initialize EmcInterface: %s - EMC not running?" %(msg)
def running(self,do_poll=True):
if do_poll: self.s.poll()
return self.s.task_mode == linuxcnc.MODE_AUTO and self.s.interp_state != linuxcnc.INTERP_IDLE
def manual_ok(self,do_poll=True):
if do_poll: self.s.poll()
if self.s.task_state != linuxcnc.STATE_ON: return False
return self.s.interp_state == linuxcnc.INTERP_IDLE
def ensure_mode(self,m, *p):
'''
If emc is not already in one of the modes given, switch it to the first mode
example:
ensure_mode(linuxcnc.MODE_MDI)
ensure_mode(linuxcnc.MODE_AUTO, linuxcnc.MODE_MDI)
'''
self.s.poll()
if self.s.task_mode == m or self.s.task_mode in p: return True
if self.running(do_poll=False): return False
self.c.mode(m)
self.c.wait_complete()
return True
def active_codes(self):
self.s.poll()
return self.s.gcodes
def get_current_system(self):
for i in self.active_codes():
if i >= 540 and i <= 590:
return i/10 - 53
elif i >= 590 and i <= 593:
return i - 584
return 1
def mdi_command(self,command, wait=True):
#ensure_mode(emself.c.MODE_MDI)
self.c.mdi(command)
if wait: self.c.wait_complete()
def emc_status(self):
'''
return tuple (task mode, task state, exec state, interp state) as strings
'''
self.s.poll()
task_mode = ['invalid', 'MANUAL', 'AUTO', 'MDI'][self.s.task_mode]
task_state = ['invalid', 'ESTOP', 'ESTOP_RESET', 'OFF', 'ON'][self.s.task_state]
exec_state = ['invalid', 'ERROR', 'DONE',
'WAITING_FOR_MOTION',
'WAITING_FOR_MOTION_QUEUE',
'WAITING_FOR_IO',
'WAITING_FOR_PAUSE',
'WAITING_FOR_MOTION_AND_IO',
'WAITING_FOR_DELAY',
'WAITING_FOR_SYSTEM_CMD' ][self.s.exec_state]
interp_state = ['invalid', 'IDLE', 'READING', 'PAUSED', 'WAITING'][self.s.interp_state]
return (task_mode, task_state, exec_state, interp_state)
class HandlerClass:
def on_manual_mode(self,widget,data=None):
if self.e.ensure_mode(linuxcnc.MODE_MANUAL):
print "switched to manual mode"
else:
print "cant switch to manual in this state"
def on_mdi_mode(self,widget,data=None):
if self.e.ensure_mode(linuxcnc.MODE_MDI):
print "switched to MDI mode"
else:
print "cant switch to MDI in this state"
def _query_emc_status(self,data=None):
(task_mode, task_state, exec_state, interp_state) = self.e.emc_status()
self.builder.get_object('task_mode').set_label("Task mode: " + task_mode)
self.builder.get_object('task_state').set_label("Task state: " + task_state)
self.builder.get_object('exec_state').set_label("Exec state: " + exec_state)
self.builder.get_object('interp_state').set_label("Interp state: " + interp_state)
return True
def on_probe(self,widget,data=None):
label = widget.get_label()
axis = ord(label[0].lower()) - ord('x')
direction = 1.0
if label[1] == '-':
direction = -1.0
self.e.s.poll()
self.start_feed = self.e.s.settings[1]
# determine system we are touching off - 1...g54 etc
self.current_system = self.e.get_current_system()
# remember current abs or rel mode - g91
self.start_relative = (910 in self.e.active_codes())
self.previous_mode = self.e.s.task_mode
if self.e.s.task_state != linuxcnc.STATE_ON:
print "machine not turned on"
return
if not self.e.s.homed[axis]:
print "%s axis not homed" %(chr(axis + ord('X')))
return
if self.e.running(do_poll=False):
print "cant do that now - intepreter running"
return
self.e.ensure_mode(linuxcnc.MODE_MDI)
self.e.mdi_command("#<_Probe_System> = %d " % (self.current_system ),wait=False)
self.e.mdi_command("#<_Probe_Axis> = %d " % (axis),wait=False)
self.e.mdi_command("#<_Probe_Speed> = %s " % (self.builder.get_object('probe_feed').get_value()),wait=False)
self.e.mdi_command("#<_Probe_Diameter> = %s " % (self.builder.get_object('probe_diameter').get_value() ),wait=False)
self.e.mdi_command("#<_Probe_Distance> = %s " % (self.builder.get_object('probe_travel').get_value() * direction),wait=False)
self.e.mdi_command("#<_Probe_Retract> = %s " % (self.builder.get_object('retract').get_value() * direction * -1.0),wait=False)
self.e.mdi_command("O<probe> call",wait=False)
self.e.mdi_command('F%f' % (self.start_feed),wait=False)
self.e.mdi_command('G91' if self.start_relative else 'G90',wait=False)
# self.e.ensure_mode(self.previous_mode)
def on_destroy(self,obj,data=None):
self.ini.save_state(self)
def on_restore_defaults(self,button,data=None):
'''
example callback for 'Reset to defaults' button
currently unused
'''
self.ini.create_default_ini()
self.ini.restore_state(self)
def __init__(self, halcomp,builder,useropts):
self.halcomp = halcomp
self.builder = builder
self.ini_filename = __name__ + '.ini'
self.defaults = { IniFile.vars: dict(),
IniFile.widgets : widget_defaults(select_widgets(self.builder.get_objects(), hal_only=False,output_only = True))
}
self.ini = IniFile(self.ini_filename,self.defaults,self.builder)
self.ini.restore_state(self)
self.e = EmcInterface()
glib.timeout_add_seconds(1, self._query_emc_status)
def get_handlers(halcomp,builder,useropts):
global debug
for cmd in useropts:
exec cmd in globals()
set_debug(debug)
return [HandlerClass(halcomp,builder,useropts)]
| gpl-2.0 |
brianlsharp/MissionPlanner | Lib/site-packages/numpy/lib/type_check.py | 53 | 17548 | ## Automatically adapted for numpy Sep 19, 2005 by convertcode.py
__all__ = ['iscomplexobj','isrealobj','imag','iscomplex',
'isreal','nan_to_num','real','real_if_close',
'typename','asfarray','mintypecode','asscalar',
'common_type', 'datetime_data']
import numpy.core.numeric as _nx
from numpy.core.numeric import asarray, asanyarray, array, isnan, \
obj2sctype, zeros
from ufunclike import isneginf, isposinf
_typecodes_by_elsize = 'GDFgdfQqLlIiHhBb?'
def mintypecode(typechars,typeset='GDFgdf',default='d'):
"""
Return the character for the minimum-size type to which given types can
be safely cast.
The returned type character must represent the smallest size dtype such
that an array of the returned type can handle the data from an array of
all types in `typechars` (or if `typechars` is an array, then its
dtype.char).
Parameters
----------
typechars : list of str or array_like
If a list of strings, each string should represent a dtype.
If array_like, the character representation of the array dtype is used.
typeset : str or list of str, optional
The set of characters that the returned character is chosen from.
The default set is 'GDFgdf'.
default : str, optional
The default character, this is returned if none of the characters in
`typechars` matches a character in `typeset`.
Returns
-------
typechar : str
The character representing the minimum-size type that was found.
See Also
--------
dtype, sctype2char, maximum_sctype
Examples
--------
>>> np.mintypecode(['d', 'f', 'S'])
'd'
>>> x = np.array([1.1, 2-3.j])
>>> np.mintypecode(x)
'D'
>>> np.mintypecode('abceh', default='G')
'G'
"""
typecodes = [(type(t) is type('') and t) or asarray(t).dtype.char\
for t in typechars]
intersection = [t for t in typecodes if t in typeset]
if not intersection:
return default
if 'F' in intersection and 'd' in intersection:
return 'D'
l = []
for t in intersection:
i = _typecodes_by_elsize.index(t)
l.append((i,t))
l.sort()
return l[0][1]
def asfarray(a, dtype=_nx.float_):
"""
Return an array converted to a float type.
Parameters
----------
a : array_like
The input array.
dtype : str or dtype object, optional
Float type code to coerce input array `a`. If `dtype` is one of the
'int' dtypes, it is replaced with float64.
Returns
-------
out : ndarray
The input `a` as a float ndarray.
Examples
--------
>>> np.asfarray([2, 3])
array([ 2., 3.])
>>> np.asfarray([2, 3], dtype='float')
array([ 2., 3.])
>>> np.asfarray([2, 3], dtype='int8')
array([ 2., 3.])
"""
dtype = _nx.obj2sctype(dtype)
if not issubclass(dtype, _nx.inexact):
dtype = _nx.float_
return asarray(a,dtype=dtype)
def real(val):
"""
Return the real part of the elements of the array.
Parameters
----------
val : array_like
Input array.
Returns
-------
out : ndarray
Output array. If `val` is real, the type of `val` is used for the
output. If `val` has complex elements, the returned type is float.
See Also
--------
real_if_close, imag, angle
Examples
--------
>>> a = np.array([1+2j, 3+4j, 5+6j])
>>> a.real
array([ 1., 3., 5.])
>>> a.real = 9
>>> a
array([ 9.+2.j, 9.+4.j, 9.+6.j])
>>> a.real = np.array([9, 8, 7])
>>> a
array([ 9.+2.j, 8.+4.j, 7.+6.j])
"""
return asanyarray(val).real
def imag(val):
"""
Return the imaginary part of the elements of the array.
Parameters
----------
val : array_like
Input array.
Returns
-------
out : ndarray
Output array. If `val` is real, the type of `val` is used for the
output. If `val` has complex elements, the returned type is float.
See Also
--------
real, angle, real_if_close
Examples
--------
>>> a = np.array([1+2j, 3+4j, 5+6j])
>>> a.imag
array([ 2., 4., 6.])
>>> a.imag = np.array([8, 10, 12])
>>> a
array([ 1. +8.j, 3.+10.j, 5.+12.j])
"""
return asanyarray(val).imag
def iscomplex(x):
"""
Returns a bool array, where True if input element is complex.
What is tested is whether the input has a non-zero imaginary part, not if
the input type is complex.
Parameters
----------
x : array_like
Input array.
Returns
-------
out : ndarray of bools
Output array.
See Also
--------
isreal
iscomplexobj : Return True if x is a complex type or an array of complex
numbers.
Examples
--------
>>> np.iscomplex([1+1j, 1+0j, 4.5, 3, 2, 2j])
array([ True, False, False, False, False, True], dtype=bool)
"""
ax = asanyarray(x)
if issubclass(ax.dtype.type, _nx.complexfloating):
return ax.imag != 0
res = zeros(ax.shape, bool)
return +res # convet to array-scalar if needed
def isreal(x):
"""
Returns a bool array, where True if input element is real.
If element has complex type with zero complex part, the return value
for that element is True.
Parameters
----------
x : array_like
Input array.
Returns
-------
out : ndarray, bool
Boolean array of same shape as `x`.
See Also
--------
iscomplex
isrealobj : Return True if x is not a complex type.
Examples
--------
>>> np.isreal([1+1j, 1+0j, 4.5, 3, 2, 2j])
array([False, True, True, True, True, False], dtype=bool)
"""
return imag(x) == 0
def iscomplexobj(x):
"""
Return True if x is a complex type or an array of complex numbers.
The type of the input is checked, not the value. So even if the input
has an imaginary part equal to zero, `iscomplexobj` evaluates to True
if the data type is complex.
Parameters
----------
x : any
The input can be of any type and shape.
Returns
-------
y : bool
The return value, True if `x` is of a complex type.
See Also
--------
isrealobj, iscomplex
Examples
--------
>>> np.iscomplexobj(1)
False
>>> np.iscomplexobj(1+0j)
True
>>> np.iscomplexobj([3, 1+0j, True])
True
"""
return issubclass( asarray(x).dtype.type, _nx.complexfloating)
def isrealobj(x):
"""
Return True if x is a not complex type or an array of complex numbers.
The type of the input is checked, not the value. So even if the input
has an imaginary part equal to zero, `isrealobj` evaluates to False
if the data type is complex.
Parameters
----------
x : any
The input can be of any type and shape.
Returns
-------
y : bool
The return value, False if `x` is of a complex type.
See Also
--------
iscomplexobj, isreal
Examples
--------
>>> np.isrealobj(1)
True
>>> np.isrealobj(1+0j)
False
>>> np.isrealobj([3, 1+0j, True])
False
"""
return not issubclass( asarray(x).dtype.type, _nx.complexfloating)
#-----------------------------------------------------------------------------
def _getmaxmin(t):
from numpy.core import getlimits
f = getlimits.finfo(t)
return f.max, f.min
def nan_to_num(x):
"""
Replace nan with zero and inf with finite numbers.
Returns an array or scalar replacing Not a Number (NaN) with zero,
(positive) infinity with a very large number and negative infinity
with a very small (or negative) number.
Parameters
----------
x : array_like
Input data.
Returns
-------
out : ndarray, float
Array with the same shape as `x` and dtype of the element in `x` with
the greatest precision. NaN is replaced by zero, and infinity
(-infinity) is replaced by the largest (smallest or most negative)
floating point value that fits in the output dtype. All finite numbers
are upcast to the output dtype (default float64).
See Also
--------
isinf : Shows which elements are negative or negative infinity.
isneginf : Shows which elements are negative infinity.
isposinf : Shows which elements are positive infinity.
isnan : Shows which elements are Not a Number (NaN).
isfinite : Shows which elements are finite (not NaN, not infinity)
Notes
-----
Numpy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754). This means that Not a Number is not equivalent to infinity.
Examples
--------
>>> np.set_printoptions(precision=8)
>>> x = np.array([np.inf, -np.inf, np.nan, -128, 128])
>>> np.nan_to_num(x)
array([ 1.79769313e+308, -1.79769313e+308, 0.00000000e+000,
-1.28000000e+002, 1.28000000e+002])
"""
try:
t = x.dtype.type
except AttributeError:
t = obj2sctype(type(x))
if issubclass(t, _nx.complexfloating):
return nan_to_num(x.real) + 1j * nan_to_num(x.imag)
else:
try:
y = x.copy()
except AttributeError:
y = array(x)
if not issubclass(t, _nx.integer):
if not y.shape:
y = array([x])
scalar = True
else:
scalar = False
are_inf = isposinf(y)
are_neg_inf = isneginf(y)
are_nan = isnan(y)
maxf, minf = _getmaxmin(y.dtype.type)
y[are_nan] = 0
y[are_inf] = maxf
y[are_neg_inf] = minf
if scalar:
y = y[0]
return y
#-----------------------------------------------------------------------------
def real_if_close(a,tol=100):
"""
If complex input returns a real array if complex parts are close to zero.
"Close to zero" is defined as `tol` * (machine epsilon of the type for
`a`).
Parameters
----------
a : array_like
Input array.
tol : float
Tolerance in machine epsilons for the complex part of the elements
in the array.
Returns
-------
out : ndarray
If `a` is real, the type of `a` is used for the output. If `a`
has complex elements, the returned type is float.
See Also
--------
real, imag, angle
Notes
-----
Machine epsilon varies from machine to machine and between data types
but Python floats on most platforms have a machine epsilon equal to
2.2204460492503131e-16. You can use 'np.finfo(np.float).eps' to print
out the machine epsilon for floats.
Examples
--------
>>> np.finfo(np.float).eps
2.2204460492503131e-16
>>> np.real_if_close([2.1 + 4e-14j], tol=1000)
array([ 2.1])
>>> np.real_if_close([2.1 + 4e-13j], tol=1000)
array([ 2.1 +4.00000000e-13j])
"""
a = asanyarray(a)
if not issubclass(a.dtype.type, _nx.complexfloating):
return a
if tol > 1:
from numpy.core import getlimits
f = getlimits.finfo(a.dtype.type)
tol = f.eps * tol
if _nx.allclose(a.imag, 0, atol=tol):
a = a.real
return a
def asscalar(a):
"""
Convert an array of size 1 to its scalar equivalent.
Parameters
----------
a : ndarray
Input array of size 1.
Returns
-------
out : scalar
Scalar representation of `a`. The input data type is preserved.
Examples
--------
>>> np.asscalar(np.array([24]))
24
"""
return a.item()
#-----------------------------------------------------------------------------
_namefromtype = {'S1' : 'character',
'?' : 'bool',
'b' : 'signed char',
'B' : 'unsigned char',
'h' : 'short',
'H' : 'unsigned short',
'i' : 'integer',
'I' : 'unsigned integer',
'l' : 'long integer',
'L' : 'unsigned long integer',
'q' : 'long long integer',
'Q' : 'unsigned long long integer',
'f' : 'single precision',
'd' : 'double precision',
'g' : 'long precision',
'F' : 'complex single precision',
'D' : 'complex double precision',
'G' : 'complex long double precision',
'S' : 'string',
'U' : 'unicode',
'V' : 'void',
'O' : 'object'
}
def typename(char):
"""
Return a description for the given data type code.
Parameters
----------
char : str
Data type code.
Returns
-------
out : str
Description of the input data type code.
See Also
--------
dtype, typecodes
Examples
--------
>>> typechars = ['S1', '?', 'B', 'D', 'G', 'F', 'I', 'H', 'L', 'O', 'Q',
... 'S', 'U', 'V', 'b', 'd', 'g', 'f', 'i', 'h', 'l', 'q']
>>> for typechar in typechars:
... print typechar, ' : ', np.typename(typechar)
...
S1 : character
? : bool
B : unsigned char
D : complex double precision
G : complex long double precision
F : complex single precision
I : unsigned integer
H : unsigned short
L : unsigned long integer
O : object
Q : unsigned long long integer
S : string
U : unicode
V : void
b : signed char
d : double precision
g : long precision
f : single precision
i : integer
h : short
l : long integer
q : long long integer
"""
return _namefromtype[char]
#-----------------------------------------------------------------------------
#determine the "minimum common type" for a group of arrays.
array_type = [[_nx.single, _nx.double, _nx.longdouble],
[_nx.csingle, _nx.cdouble, _nx.clongdouble]]
array_precision = {_nx.single : 0,
_nx.double : 1,
_nx.longdouble : 2,
_nx.csingle : 0,
_nx.cdouble : 1,
_nx.clongdouble : 2}
def common_type(*arrays):
"""
Return a scalar type which is common to the input arrays.
The return type will always be an inexact (i.e. floating point) scalar
type, even if all the arrays are integer arrays. If one of the inputs is
an integer array, the minimum precision type that is returned is a
64-bit floating point dtype.
All input arrays can be safely cast to the returned dtype without loss
of information.
Parameters
----------
array1, array2, ... : ndarrays
Input arrays.
Returns
-------
out : data type code
Data type code.
See Also
--------
dtype, mintypecode
Examples
--------
>>> np.common_type(np.arange(2, dtype=np.float32))
<type 'numpy.float32'>
>>> np.common_type(np.arange(2, dtype=np.float32), np.arange(2))
<type 'numpy.float64'>
>>> np.common_type(np.arange(4), np.array([45, 6.j]), np.array([45.0]))
<type 'numpy.complex128'>
"""
is_complex = False
precision = 0
for a in arrays:
t = a.dtype.type
if iscomplexobj(a):
is_complex = True
if issubclass(t, _nx.integer):
p = 1
else:
p = array_precision.get(t, None)
if p is None:
raise TypeError("can't get common type for non-numeric array")
precision = max(precision, p)
if is_complex:
return array_type[1][precision]
else:
return array_type[0][precision]
def datetime_data(dtype):
"""Return (unit, numerator, denominator, events) from a datetime dtype
"""
try:
import ctypes
except ImportError:
raise RuntimeError, "Cannot access date-time internals without ctypes installed"
if dtype.kind not in ['m','M']:
raise ValueError, "Not a date-time dtype"
# TODO: This used to have
# obj = dtype.metadata[METADATA_DTSTR]
# now we get an error because obj is not set.
class DATETIMEMETA(ctypes.Structure):
_fields_ = [('base', ctypes.c_int),
('num', ctypes.c_int),
('den', ctypes.c_int),
('events', ctypes.c_int)]
import sys
if sys.version_info[:2] >= (3, 0):
func = ctypes.pythonapi.PyCapsule_GetPointer
func.argtypes = [ctypes.py_object, ctypes.c_char_p]
func.restype = ctypes.c_void_p
result = func(ctypes.py_object(obj), ctypes.c_char_p(None))
else:
func = ctypes.pythonapi.PyCObject_AsVoidPtr
func.argtypes = [ctypes.py_object]
func.restype = ctypes.c_void_p
result = func(ctypes.py_object(obj))
result = ctypes.cast(ctypes.c_void_p(result), ctypes.POINTER(DATETIMEMETA))
struct = result[0]
base = struct.base
# FIXME: This needs to be kept consistent with enum in ndarrayobject.h
from numpy.core.multiarray import DATETIMEUNITS
obj = ctypes.py_object(DATETIMEUNITS)
if sys.version_info[:2] >= (2,7):
result = func(obj, ctypes.c_char_p(None))
else:
result = func(obj)
_unitnum2name = ctypes.cast(ctypes.c_void_p(result), ctypes.POINTER(ctypes.c_char_p))
return (_unitnum2name[base], struct.num, struct.den, struct.events)
| gpl-3.0 |
trolldbois/python-haystack | test/haystack/mappings/test_base.py | 1 | 13863 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests haystack.utils ."""
from __future__ import print_function
import logging
import mmap
import os
import struct
import unittest
from haystack import listmodel
from haystack import target
from haystack.mappings.base import AMemoryMapping
from haystack.mappings.process import make_local_memory_handler
from haystack.mappings import folder
from test.haystack import SrcTests
log = logging.getLogger('test_memory_mapping')
class TestMmapHack(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_mmap_hack64(self):
my_target = target.TargetPlatform.make_target_linux_64()
my_ctypes = my_target.get_target_ctypes()
my_utils = my_target.get_target_ctypes_utils()
real_ctypes_long = my_ctypes.get_real_ctypes_member('c_ulong')
fname = os.path.normpath(os.path.abspath(__file__))
fin = open(fname, 'rb')
local_mmap_bytebuffer = mmap.mmap(fin.fileno(), 1024, access=mmap.ACCESS_READ)
# yeap, that right, I'm stealing the pointer value. DEAL WITH IT.
heapmap = struct.unpack('L', real_ctypes_long.from_address(id(local_mmap_bytebuffer) +
2 * (my_ctypes.sizeof(real_ctypes_long))))[0]
log.debug('MMAP HACK: heapmap: 0x%0.8x' % heapmap)
handler = make_local_memory_handler(force=True)
ret = [m for m in handler.get_mappings() if heapmap in m]
if len(ret) == 0:
for m in handler.get_mappings():
print(m)
# heapmap is a pointer value in local memory
self.assertEqual(len(ret), 1)
# heapmap is a pointer value to this executable?
self.assertEqual(ret[0].pathname, fname)
self.assertIn('CTypesProxy-8:8:16', str(my_ctypes))
fin.close()
fin = None
def test_mmap_hack32(self):
my_target = target.TargetPlatform.make_target_linux_32()
my_ctypes = my_target.get_target_ctypes()
my_utils = my_target.get_target_ctypes_utils()
real_ctypes_long = my_ctypes.get_real_ctypes_member('c_ulong')
fname = os.path.normpath(os.path.abspath(__file__))
fin = open(fname, 'rb')
local_mmap_bytebuffer = mmap.mmap(fin.fileno(), 1024, access=mmap.ACCESS_READ)
# yeap, that right, I'm stealing the pointer value. DEAL WITH IT.
heapmap = struct.unpack('L', real_ctypes_long.from_address(id(local_mmap_bytebuffer) +
2 * (my_ctypes.sizeof(real_ctypes_long))))[0]
log.debug('MMAP HACK: heapmap: 0x%0.8x', heapmap)
maps = make_local_memory_handler(force=True)
# print 'MMAP HACK: heapmap: 0x%0.8x' % heapmap
# for m in maps:
# print m
ret = [m for m in maps if heapmap in m]
# heapmap is a pointer value in local memory
self.assertEqual(len(ret), 1)
# heapmap is a pointer value to this executable?
self.assertEqual(ret[0].pathname, fname)
self.assertIn('CTypesProxy-4:4:12', str(my_ctypes))
fin.close()
fin = None
class TestMappingsLinux(SrcTests):
@classmethod
def setUpClass(cls):
cls.memory_handler = folder.load('test/dumps/ssh/ssh.1')
@classmethod
def tearDownClass(cls):
cls.memory_handler.reset_mappings()
cls.memory_handler = None
def test_get_mapping(self):
self.assertEqual(len(self.memory_handler._get_mapping('[heap]')), 1)
self.assertEqual(len(self.memory_handler._get_mapping('None')), 9)
def test_get_mapping_for_address(self):
finder = self.memory_handler.get_heap_finder()
walker = finder.list_heap_walkers()[0]
self.assertEqual(walker.get_heap_address(), self.memory_handler.get_mapping_for_address(0xb84e02d3).start)
def test_contains(self):
for m in self.memory_handler:
self.assertTrue(m.start in self.memory_handler)
self.assertTrue((m.end - 1) in self.memory_handler)
def test_len(self):
self.assertEqual(len(self.memory_handler), 70)
def test_getitem(self):
self.assertTrue(isinstance(self.memory_handler[0], AMemoryMapping))
self.assertTrue(
isinstance(self.memory_handler[len(self.memory_handler) - 1], AMemoryMapping))
with self.assertRaises(IndexError):
self.memory_handler[0x0005c000]
def test_iter(self):
mps = [m for m in self.memory_handler]
mps2 = [m for m in self.memory_handler.get_mappings()]
self.assertEqual(mps, mps2)
def test_setitem(self):
with self.assertRaises(NotImplementedError):
self.memory_handler[0x0005c000] = 1
def test_get_os_name(self):
x = self.memory_handler.get_target_platform().get_os_name()
self.assertEqual(x, 'linux')
def test_get_cpu_bits(self):
x = self.memory_handler.get_target_platform().get_cpu_bits()
self.assertEqual(x, 32)
class TestMappingsLinuxAddresses32(SrcTests):
@classmethod
def setUpClass(cls):
cls.memory_handler = folder.load('test/src/test-ctypes5.32.dump')
cls.my_target = cls.memory_handler.get_target_platform()
cls.my_ctypes = cls.my_target.get_target_ctypes()
cls.my_utils = cls.my_target.get_target_ctypes_utils()
cls.my_model = cls.memory_handler.get_model()
cls.ctypes5_gen32 = cls.my_model.import_module("test.src.ctypes5_gen32")
cls.validator = listmodel.ListModel(cls.memory_handler, None)
def setUp(self):
self._load_offsets_values('test/src/test-ctypes5.32.dump')
@classmethod
def tearDownClass(cls):
cls.memory_handler = None
cls.my_target = None
cls.my_ctypes = None
cls.my_utils = None
cls.my_model = None
cls.ctypes5_gen32 = None
pass
def test_is_valid_address(self):
offset = self.offsets['struct_d'][0]
m = self.memory_handler.get_mapping_for_address(offset)
d = m.read_struct(offset, self.ctypes5_gen32.struct_d)
ret = self.validator.load_members(d, 10)
self.assertTrue(self.memory_handler.is_valid_address(d.a))
self.assertTrue(self.memory_handler.is_valid_address(d.b))
self.assertTrue(self.memory_handler.is_valid_address(d.d))
self.assertTrue(self.memory_handler.is_valid_address(d.h))
pass
def test_is_valid_address_value(self):
offset = self.offsets['struct_d'][0]
m = self.memory_handler.get_mapping_for_address(offset)
d = m.read_struct(offset, self.ctypes5_gen32.struct_d)
ret = self.validator.load_members(d, 10)
self.assertTrue(self.memory_handler.is_valid_address(d.a.value))
self.assertTrue(self.memory_handler.is_valid_address(d.b.value))
self.assertTrue(self.memory_handler.is_valid_address(d.d.value))
self.assertTrue(self.memory_handler.is_valid_address(d.h.value))
pass
class TestMappingsWin32(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.memory_handler = folder.load('test/dumps/putty/putty.1.dump')
cls.my_target = cls.memory_handler.get_target_platform()
cls.my_ctypes = cls.my_target.get_target_ctypes()
cls.my_utils = cls.my_target.get_target_ctypes_utils()
@classmethod
def tearDownClass(cls):
cls.memory_handler.reset_mappings()
cls.memory_handler = None
cls.my_target = None
cls.my_ctypes = None
cls.my_utils = None
def test_get_mapping(self):
# FIXME: remove
with self.assertRaises(IndexError):
self.assertEqual(len(self.memory_handler._get_mapping('[heap]')), 1)
self.assertEqual(len(self.memory_handler._get_mapping('None')), 71)
def test_get_mapping_for_address(self):
m = self.memory_handler.get_mapping_for_address(0x005c0000)
self.assertNotEquals(m, False)
self.assertEqual(m.start, 0x005c0000)
self.assertEqual(m.end, 0x00619000)
def test_contains(self):
for m in self.memory_handler:
self.assertTrue(m.start in self.memory_handler)
self.assertTrue((m.end - 1) in self.memory_handler)
def test_len(self):
self.assertEqual(len(self.memory_handler), 403)
def test_getitem(self):
self.assertTrue(isinstance(self.memory_handler[0], AMemoryMapping))
self.assertTrue(
isinstance(self.memory_handler[len(self.memory_handler) - 1], AMemoryMapping))
with self.assertRaises(IndexError):
self.memory_handler[0x0005c000]
def test_iter(self):
mps = [m for m in self.memory_handler]
mps2 = [m for m in self.memory_handler.get_mappings()]
self.assertEqual(mps, mps2)
def test_setitem(self):
with self.assertRaises(NotImplementedError):
self.memory_handler[0x0005c000] = 1
def test_get_os_name(self):
x = self.memory_handler.get_target_platform().get_os_name()
self.assertEqual(x, 'win7')
def test_get_cpu_bits(self):
x = self.memory_handler.get_target_platform().get_cpu_bits()
self.assertEqual(x, 32)
class TestReferenceBook(unittest.TestCase):
"""Test the reference book."""
def setUp(self):
self.memory_handler = folder.load('test/src/test-ctypes6.32.dump')
def tearDown(self):
self.memory_handler.reset_mappings()
self.memory_handler = None
def test_keepRef(self):
self.assertEqual(len(self.memory_handler.getRefByAddr(0xcafecafe)), 0)
self.assertEqual(len(self.memory_handler.getRefByAddr(0xdeadbeef)), 0)
# same address, same type
self.memory_handler.keepRef(1, int, 0xcafecafe)
self.memory_handler.keepRef(2, int, 0xcafecafe)
self.memory_handler.keepRef(3, int, 0xcafecafe)
me = self.memory_handler.getRefByAddr(0xcafecafe)
# only one ref ( the first)
self.assertEqual(len(me), 1)
# different type, same address
self.memory_handler.keepRef('4', str, 0xcafecafe)
me = self.memory_handler.getRefByAddr(0xcafecafe)
# multiple refs
self.assertEqual(len(me), 2)
return
def test_hasRef(self):
self.assertEqual(len(self.memory_handler.getRefByAddr(0xcafecafe)), 0)
self.assertEqual(len(self.memory_handler.getRefByAddr(0xdeadbeef)), 0)
# same address, different types
self.memory_handler.keepRef(1, int, 0xcafecafe)
self.memory_handler.keepRef(2, float, 0xcafecafe)
self.memory_handler.keepRef(3, str, 0xcafecafe)
self.assertTrue(self.memory_handler.hasRef(int, 0xcafecafe))
self.assertTrue(self.memory_handler.hasRef(float, 0xcafecafe))
self.assertTrue(self.memory_handler.hasRef(str, 0xcafecafe))
self.assertFalse(self.memory_handler.hasRef(int, 0xdeadbeef))
me = self.memory_handler.getRefByAddr(0xcafecafe)
# multiple refs
self.assertEqual(len(me), 3)
def test_getRef(self):
self.assertEqual(len(self.memory_handler.getRefByAddr(0xcafecafe)), 0)
self.assertEqual(len(self.memory_handler.getRefByAddr(0xdeadbeef)), 0)
self.memory_handler.keepRef(1, int, 0xcafecafe)
self.memory_handler.keepRef(2, float, 0xcafecafe)
self.assertEqual(self.memory_handler.getRef(int, 0xcafecafe), 1)
self.assertEqual(self.memory_handler.getRef(float, 0xcafecafe), 2)
self.assertIsNone(self.memory_handler.getRef(str, 0xcafecafe))
self.assertIsNone(self.memory_handler.getRef(str, 0xdeadbeef))
self.assertIsNone(self.memory_handler.getRef(int, 0xdeadbeef))
def test_delRef(self):
self.assertEqual(len(self.memory_handler.getRefByAddr(0xcafecafe)), 0)
self.assertEqual(len(self.memory_handler.getRefByAddr(0xdeadbeef)), 0)
self.memory_handler.keepRef(1, int, 0xcafecafe)
self.memory_handler.keepRef(2, float, 0xcafecafe)
self.memory_handler.keepRef(3, str, 0xcafecafe)
self.assertTrue(self.memory_handler.hasRef(int, 0xcafecafe))
self.assertTrue(self.memory_handler.hasRef(float, 0xcafecafe))
self.assertTrue(self.memory_handler.hasRef(str, 0xcafecafe))
# del one type
self.memory_handler.delRef(str, 0xcafecafe)
self.assertTrue(self.memory_handler.hasRef(int, 0xcafecafe))
self.assertTrue(self.memory_handler.hasRef(float, 0xcafecafe))
self.assertFalse(self.memory_handler.hasRef(str, 0xcafecafe))
# try harder, same type, same result
self.memory_handler.delRef(str, 0xcafecafe)
self.assertTrue(self.memory_handler.hasRef(int, 0xcafecafe))
self.assertTrue(self.memory_handler.hasRef(float, 0xcafecafe))
self.assertFalse(self.memory_handler.hasRef(str, 0xcafecafe))
self.memory_handler.delRef(int, 0xcafecafe)
self.assertFalse(self.memory_handler.hasRef(int, 0xcafecafe))
self.assertTrue(self.memory_handler.hasRef(float, 0xcafecafe))
self.assertFalse(self.memory_handler.hasRef(str, 0xcafecafe))
self.memory_handler.delRef(float, 0xcafecafe)
self.assertFalse(self.memory_handler.hasRef(int, 0xcafecafe))
self.assertFalse(self.memory_handler.hasRef(float, 0xcafecafe))
self.assertFalse(self.memory_handler.hasRef(str, 0xcafecafe))
if __name__ == '__main__':
# logging.basicConfig(level=logging.DEBUG)
logging.basicConfig(level=logging.INFO)
# logging.getLogger('memory_mapping').setLevel(logging.DEBUG)
# logging.getLogger('basicmodel').setLevel(logging.INFO)
# logging.getLogger('model').setLevel(logging.INFO)
# logging.getLogger('listmodel').setLevel(logging.INFO)
unittest.main(verbosity=2)
| gpl-3.0 |
coecms/ARCCSSive | tests/CMIP5/db_fixture.py | 1 | 7548 | #!/usr/bin/env python
"""
file: tests/CMIP5/db_fixture.py
author: Scott Wales <scott.wales@unimelb.edu.au>
Copyright 2015 ARC Centre of Excellence for Climate Systems Science
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import print_function
import pytest
from ARCCSSive import CMIP5
from ARCCSSive.CMIP5.Model import *
from sqlalchemy.orm.exc import NoResultFound
from datetime import date
def insert_unique(db, klass, **kwargs):
"""
Insert an item into the DB if it can't be found
"""
try:
value = db.query(klass).filter_by(**kwargs).one()
except NoResultFound:
value = klass(**kwargs)
db.add(value)
db.commit()
return value
def retrieve_item(db, klass, **kwargs):
"""
Retrieve an item into the DB if it can be found
"""
try:
value = db.query(klass).filter_by(**kwargs).one()
except NoResultFound:
print( "Cannot find fixture with ", kwargs)
return value
def add_instance_item(db, variable, mip, model, experiment, ensemble, realm):
"""
Add a new test instance item to the DB
"""
instance = insert_unique(db, Instance,
variable = variable,
mip = mip,
model = model,
experiment = experiment,
ensemble = ensemble,
realm = realm)
return instance.id
def add_version_item(db, instance_id, path, is_latest, checked_on, to_update, dataset_id, version):
#def add_version_item(db, **kwargs):
"""
Add a new test version item to the DB
"""
#version = insert_unique(db, Version,**kwargs)
version = insert_unique(db, Version,
instance_id = instance_id,
path = path,
is_latest = is_latest,
checked_on = checked_on,
to_update = to_update,
dataset_id = dataset_id,
version = version)
return version.id
def add_warning_item(db, version_id, warning, added_by, added_on):
"""
Add a new test warning item to the DB
"""
warning = insert_unique(db, VersionWarning,
version_id = version_id,
warning = warning,
added_on = added_on,
added_by = added_by)
def add_file_item(db, version_id, filename, md5, sha256):
"""
Add a new test file item to the DB
"""
afile = insert_unique(db, VersionFile,
version_id = version_id,
filename = filename,
md5 = md5,
sha256 = sha256)
@pytest.fixture(scope="module")
def session(request, tmpdir_factory):
session = CMIP5.connect('sqlite:///:memory:')
dira = tmpdir_factory.mktemp('a')
dirb = tmpdir_factory.mktemp('b')
# Create some example entries
db = session.session
added_on=date.today()
inst1_id = add_instance_item(db,
variable = 'a',
mip = '6hrLev',
model = 'c',
experiment = 'd',
ensemble = 'e',
realm = 'realm')
v11_id = add_version_item(db,
instance_id = inst1_id,
path = dira.strpath,
is_latest = False,
checked_on = added_on,
to_update = False,
dataset_id = 'someid',
version = 'v20111201')
v12_id = add_version_item(db,
instance_id = inst1_id,
path = dira.strpath,
is_latest = False,
checked_on = added_on,
to_update = False,
dataset_id = 'someid',
version = 'v20120101')
v13_id = add_version_item(db,
instance_id = inst1_id,
path = dira.strpath,
is_latest = False,
checked_on = added_on,
to_update = False,
dataset_id = 'someid',
version = 'NA')
inst2_id = add_instance_item(db,
variable = 'f',
mip = 'cfMon',
model = 'c',
experiment = 'd',
ensemble = 'e',
realm = 'realm')
v21_id = add_version_item(db,
instance_id = inst2_id,
path = dirb.strpath,
is_latest = True,
checked_on = added_on,
to_update = False,
dataset_id = 'someid',
version = 'v20111201')
v22_id = add_version_item(db,
instance_id = inst2_id,
path = dirb.strpath,
is_latest = False,
checked_on = added_on,
to_update = False,
dataset_id = 'someid',
version = 'v20120101')
add_warning_item(db,
version_id = v11_id,
warning = 'Test warning for inst1 v20111201',
added_by = 'someone@example.com',
added_on = added_on)
add_warning_item(db,
version_id = v12_id,
warning = 'Test warning for inst1 v20120101',
added_by = 'someone@example.com',
added_on = added_on)
add_file_item(db,
version_id = v22_id,
filename = 'Somefilename',
md5 = 'Somemd5',
sha256 = 'Somesha256')
add_file_item(db,
version_id = v22_id,
filename = 'Anotherfilename',
md5 = 'Anothermd5',
sha256 = 'Anothersha256')
add_warning_item(db,
version_id = v21_id,
warning = 'Test warning for inst2 v20111201',
added_by = 'anyone@example.com',
added_on = added_on)
inst = add_instance_item(db,
variable = 'tas',
mip = 'Amon',
model = 'ACCESS1-3',
experiment = 'rcp45',
ensemble = 'r1i1p1',
realm = 'realm')
vers = add_version_item(db,
instance_id = inst,
path = dirb.strpath,
is_latest = False,
checked_on = added_on,
to_update = False,
dataset_id = 'someid',
version = 'v20130507')
add_file_item(db,
version_id = vers,
filename = 'example.nc',
md5 = None,
sha256 = None)
# add more instances to test unique function
inst0 = add_instance_item(db,
variable = 'tas',
mip = 'Amon',
model = 'ACCESS1-3',
experiment = 'rcp26',
ensemble = 'r1i1p1',
realm = 'realm')
inst0 = add_instance_item(db,
variable = 'a',
mip = 'Amon',
model = 'MIROC5',
experiment = 'rcp26',
ensemble = 'r1i1p1',
realm = 'realm')
inst0 = add_instance_item(db,
variable = 'a',
mip = '6hrLev',
model = 'MIROC5',
experiment = 'rcp45',
ensemble = 'r2i1p1',
realm = 'realm')
inst0 = add_instance_item(db,
variable = 'tas',
mip = 'cfMon',
model = 'MIROC5',
experiment = 'rcp45',
ensemble = 'r2i1p1',
realm = 'realm')
db.commit()
# Close the session
def fin():
db.close()
request.addfinalizer(fin)
return session
| apache-2.0 |
jusdng/odoo | openerp/addons/base/module/wizard/base_import_language.py | 337 | 2644 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import base64
from tempfile import TemporaryFile
from openerp import tools
from openerp.osv import osv, fields
class base_language_import(osv.osv_memory):
""" Language Import """
_name = "base.language.import"
_description = "Language Import"
_columns = {
'name': fields.char('Language Name', required=True),
'code': fields.char('ISO Code', size=5, help="ISO Language and Country code, e.g. en_US", required=True),
'data': fields.binary('File', required=True),
'overwrite': fields.boolean('Overwrite Existing Terms',
help="If you enable this option, existing translations (including custom ones) "
"will be overwritten and replaced by those in this file"),
}
def import_lang(self, cr, uid, ids, context=None):
if context is None:
context = {}
this = self.browse(cr, uid, ids[0])
if this.overwrite:
context = dict(context, overwrite=True)
fileobj = TemporaryFile('w+')
try:
fileobj.write(base64.decodestring(this.data))
# now we determine the file format
fileobj.seek(0)
first_line = fileobj.readline().strip().replace('"', '').replace(' ', '')
fileformat = first_line.endswith("type,name,res_id,src,value") and 'csv' or 'po'
fileobj.seek(0)
tools.trans_load_data(cr, fileobj, fileformat, this.code, lang_name=this.name, context=context)
finally:
fileobj.close()
return True
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
EnviroCentre/jython-upgrade | jython/lib/site-packages/pip/commands/uninstall.py | 3 | 2289 | from pip.req import InstallRequirement, RequirementSet, parse_requirements
from pip.basecommand import Command
from pip.exceptions import InstallationError
class UninstallCommand(Command):
"""
Uninstall packages.
pip is able to uninstall most installed packages. Known exceptions are:
- Pure distutils packages installed with ``python setup.py install``, which
leave behind no metadata to determine what files were installed.
- Script wrappers installed by ``python setup.py develop``.
"""
name = 'uninstall'
usage = """
%prog [options] <package> ...
%prog [options] -r <requirements file> ..."""
summary = 'Uninstall packages.'
def __init__(self, *args, **kw):
super(UninstallCommand, self).__init__(*args, **kw)
self.cmd_opts.add_option(
'-r', '--requirement',
dest='requirements',
action='append',
default=[],
metavar='file',
help='Uninstall all the packages listed in the given requirements '
'file. This option can be used multiple times.',
)
self.cmd_opts.add_option(
'-y', '--yes',
dest='yes',
action='store_true',
help="Don't ask for confirmation of uninstall deletions.")
self.parser.insert_option_group(0, self.cmd_opts)
def run(self, options, args):
session = self._build_session(options)
requirement_set = RequirementSet(
build_dir=None,
src_dir=None,
download_dir=None,
session=session,
)
for name in args:
requirement_set.add_requirement(
InstallRequirement.from_line(name))
for filename in options.requirements:
for req in parse_requirements(
filename,
options=options,
session=session):
requirement_set.add_requirement(req)
if not requirement_set.has_requirements:
raise InstallationError(
'You must give at least one requirement to %(name)s (see "pip '
'help %(name)s")' % dict(name=self.name)
)
requirement_set.uninstall(auto_confirm=options.yes)
| mit |
scality/cinder | cinder/tests/unit/api/contrib/test_volume_replication.py | 5 | 11660 | # Copyright 2014 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests for volume replication API code.
"""
import json
import mock
from oslo_config import cfg
import webob
from cinder import context
from cinder import test
from cinder.tests.unit.api import fakes
from cinder.tests.unit import utils as tests_utils
CONF = cfg.CONF
def app():
# no auth, just let environ['cinder.context'] pass through
api = fakes.router.APIRouter()
mapper = fakes.urlmap.URLMap()
mapper['/v2'] = api
return mapper
class VolumeReplicationAPITestCase(test.TestCase):
"""Test Cases for replication API."""
def setUp(self):
super(VolumeReplicationAPITestCase, self).setUp()
self.ctxt = context.RequestContext('admin', 'fake', True)
self.volume_params = {
'host': CONF.host,
'size': 1}
def _get_resp(self, operation, volume_id, xml=False):
"""Helper for a replication action req for the specified volume_id."""
req = webob.Request.blank('/v2/fake/volumes/%s/action' % volume_id)
req.method = 'POST'
if xml:
body = '<os-%s-replica/>' % operation
req.headers['Content-Type'] = 'application/xml'
req.headers['Accept'] = 'application/xml'
req.body = body
else:
body = {'os-%s-replica' % operation: ''}
req.headers['Content-Type'] = 'application/json'
req.body = json.dumps(body)
req.environ['cinder.context'] = context.RequestContext('admin',
'fake',
True)
res = req.get_response(app())
return req, res
def test_promote_bad_id(self):
(req, res) = self._get_resp('promote', 'fake')
msg = ("request: %s\nresult: %s" % (req, res))
self.assertEqual(404, res.status_int, msg)
def test_promote_bad_id_xml(self):
(req, res) = self._get_resp('promote', 'fake', xml=True)
msg = ("request: %s\nresult: %s" % (req, res))
self.assertEqual(404, res.status_int, msg)
def test_promote_volume_not_replicated(self):
volume = tests_utils.create_volume(
self.ctxt,
**self.volume_params)
(req, res) = self._get_resp('promote', volume['id'])
msg = ("request: %s\nresult: %s" % (req, res))
self.assertEqual(400, res.status_int, msg)
def test_promote_volume_not_replicated_xml(self):
volume = tests_utils.create_volume(
self.ctxt,
**self.volume_params)
(req, res) = self._get_resp('promote', volume['id'], xml=True)
msg = ("request: %s\nresult: %s" % (req, res))
self.assertEqual(400, res.status_int, msg)
@mock.patch('cinder.volume.rpcapi.VolumeAPI.promote_replica')
def test_promote_replication_volume_status(self,
_rpcapi_promote):
for status in ['error', 'in-use']:
volume = tests_utils.create_volume(self.ctxt,
status = status,
replication_status = 'active',
**self.volume_params)
(req, res) = self._get_resp('promote', volume['id'])
msg = ("request: %s\nresult: %s" % (req, res))
self.assertEqual(400, res.status_int, msg)
for status in ['available']:
volume = tests_utils.create_volume(self.ctxt,
status = status,
replication_status = 'active',
**self.volume_params)
(req, res) = self._get_resp('promote', volume['id'])
msg = ("request: %s\nresult: %s" % (req, res))
self.assertEqual(202, res.status_int, msg)
@mock.patch('cinder.volume.rpcapi.VolumeAPI.promote_replica')
def test_promote_replication_volume_status_xml(self,
_rpcapi_promote):
for status in ['error', 'in-use']:
volume = tests_utils.create_volume(self.ctxt,
status = status,
replication_status = 'active',
**self.volume_params)
(req, res) = self._get_resp('promote', volume['id'], xml=True)
msg = ("request: %s\nresult: %s" % (req, res))
self.assertEqual(400, res.status_int, msg)
for status in ['available']:
volume = tests_utils.create_volume(self.ctxt,
status = status,
replication_status = 'active',
**self.volume_params)
(req, res) = self._get_resp('promote', volume['id'], xml=True)
msg = ("request: %s\nresult: %s" % (req, res))
self.assertEqual(202, res.status_int, msg)
@mock.patch('cinder.volume.rpcapi.VolumeAPI.promote_replica')
def test_promote_replication_replication_status(self,
_rpcapi_promote):
for status in ['error', 'copying', 'inactive']:
volume = tests_utils.create_volume(self.ctxt,
status = 'available',
replication_status = status,
**self.volume_params)
(req, res) = self._get_resp('promote', volume['id'])
msg = ("request: %s\nresult: %s" % (req, res))
self.assertEqual(400, res.status_int, msg)
for status in ['active', 'active-stopped']:
volume = tests_utils.create_volume(self.ctxt,
status = 'available',
replication_status = status,
**self.volume_params)
(req, res) = self._get_resp('promote', volume['id'])
msg = ("request: %s\nresult: %s" % (req, res))
self.assertEqual(202, res.status_int, msg)
@mock.patch('cinder.volume.rpcapi.VolumeAPI.promote_replica')
def test_promote_replication_replication_status_xml(self,
_rpcapi_promote):
for status in ['error', 'copying', 'inactive']:
volume = tests_utils.create_volume(self.ctxt,
status = 'available',
replication_status = status,
**self.volume_params)
(req, res) = self._get_resp('promote', volume['id'], xml=True)
msg = ("request: %s\nresult: %s" % (req, res))
self.assertEqual(400, res.status_int, msg)
for status in ['active', 'active-stopped']:
volume = tests_utils.create_volume(self.ctxt,
status = 'available',
replication_status = status,
**self.volume_params)
(req, res) = self._get_resp('promote', volume['id'], xml=True)
msg = ("request: %s\nresult: %s" % (req, res))
self.assertEqual(202, res.status_int, msg)
def test_reenable_bad_id(self):
(req, res) = self._get_resp('reenable', 'fake')
msg = ("request: %s\nresult: %s" % (req, res))
self.assertEqual(404, res.status_int, msg)
def test_reenable_bad_id_xml(self):
(req, res) = self._get_resp('reenable', 'fake', xml=True)
msg = ("request: %s\nresult: %s" % (req, res))
self.assertEqual(404, res.status_int, msg)
def test_reenable_volume_not_replicated(self):
volume = tests_utils.create_volume(
self.ctxt,
**self.volume_params)
(req, res) = self._get_resp('reenable', volume['id'])
msg = ("request: %s\nresult: %s" % (req, res))
self.assertEqual(400, res.status_int, msg)
def test_reenable_volume_not_replicated_xml(self):
volume = tests_utils.create_volume(
self.ctxt,
**self.volume_params)
(req, res) = self._get_resp('reenable', volume['id'], xml=True)
msg = ("request: %s\nresult: %s" % (req, res))
self.assertEqual(400, res.status_int, msg)
@mock.patch('cinder.volume.rpcapi.VolumeAPI.reenable_replication')
def test_reenable_replication_replication_status(self,
_rpcapi_promote):
for status in ['active', 'copying']:
volume = tests_utils.create_volume(self.ctxt,
status = 'available',
replication_status = status,
**self.volume_params)
(req, res) = self._get_resp('reenable', volume['id'])
msg = ("request: %s\nresult: %s" % (req, res))
self.assertEqual(400, res.status_int, msg)
for status in ['inactive', 'active-stopped', 'error']:
volume = tests_utils.create_volume(self.ctxt,
status = 'available',
replication_status = status,
**self.volume_params)
(req, res) = self._get_resp('reenable', volume['id'])
msg = ("request: %s\nresult: %s" % (req, res))
self.assertEqual(202, res.status_int, msg)
@mock.patch('cinder.volume.rpcapi.VolumeAPI.reenable_replication')
def test_reenable_replication_replication_status_xml(self,
_rpcapi_promote):
for status in ['active', 'copying']:
volume = tests_utils.create_volume(self.ctxt,
status = 'available',
replication_status = status,
**self.volume_params)
(req, res) = self._get_resp('reenable', volume['id'], xml=True)
msg = ("request: %s\nresult: %s" % (req, res))
self.assertEqual(400, res.status_int, msg)
for status in ['inactive', 'active-stopped', 'error']:
volume = tests_utils.create_volume(self.ctxt,
status = 'available',
replication_status = status,
**self.volume_params)
(req, res) = self._get_resp('reenable', volume['id'], xml=True)
msg = ("request: %s\nresult: %s" % (req, res))
self.assertEqual(202, res.status_int, msg)
| apache-2.0 |
emonty/ansible | lib/ansible/modules/system/iptables.py | 20 | 28339 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2015, Linus Unnebäck <linus@folkdatorn.se>
# Copyright: (c) 2017, Sébastien DA ROCHA <sebastien@da-rocha.net>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'core'}
DOCUMENTATION = r'''
---
module: iptables
short_description: Modify iptables rules
version_added: "2.0"
author:
- Linus Unnebäck (@LinusU) <linus@folkdatorn.se>
- Sébastien DA ROCHA (@sebastiendarocha)
description:
- C(iptables) is used to set up, maintain, and inspect the tables of IP packet
filter rules in the Linux kernel.
- This module does not handle the saving and/or loading of rules, but rather
only manipulates the current rules that are present in memory. This is the
same as the behaviour of the C(iptables) and C(ip6tables) command which
this module uses internally.
notes:
- This module just deals with individual rules.If you need advanced
chaining of rules the recommended way is to template the iptables restore
file.
options:
table:
description:
- This option specifies the packet matching table which the command should operate on.
- If the kernel is configured with automatic module loading, an attempt will be made
to load the appropriate module for that table if it is not already there.
type: str
choices: [ filter, nat, mangle, raw, security ]
default: filter
state:
description:
- Whether the rule should be absent or present.
type: str
choices: [ absent, present ]
default: present
action:
description:
- Whether the rule should be appended at the bottom or inserted at the top.
- If the rule already exists the chain will not be modified.
type: str
choices: [ append, insert ]
default: append
version_added: "2.2"
rule_num:
description:
- Insert the rule as the given rule number.
- This works only with C(action=insert).
type: str
version_added: "2.5"
ip_version:
description:
- Which version of the IP protocol this rule should apply to.
type: str
choices: [ ipv4, ipv6 ]
default: ipv4
chain:
description:
- Specify the iptables chain to modify.
- This could be a user-defined chain or one of the standard iptables chains, like
C(INPUT), C(FORWARD), C(OUTPUT), C(PREROUTING), C(POSTROUTING), C(SECMARK) or C(CONNSECMARK).
type: str
protocol:
description:
- The protocol of the rule or of the packet to check.
- The specified protocol can be one of C(tcp), C(udp), C(udplite), C(icmp), C(esp),
C(ah), C(sctp) or the special keyword C(all), or it can be a numeric value,
representing one of these protocols or a different one.
- A protocol name from I(/etc/protocols) is also allowed.
- A C(!) argument before the protocol inverts the test.
- The number zero is equivalent to all.
- C(all) will match with all protocols and is taken as default when this option is omitted.
type: str
source:
description:
- Source specification.
- Address can be either a network name, a hostname, a network IP address
(with /mask), or a plain IP address.
- Hostnames will be resolved once only, before the rule is submitted to
the kernel. Please note that specifying any name to be resolved with
a remote query such as DNS is a really bad idea.
- The mask can be either a network mask or a plain number, specifying
the number of 1's at the left side of the network mask. Thus, a mask
of 24 is equivalent to 255.255.255.0. A C(!) argument before the
address specification inverts the sense of the address.
type: str
destination:
description:
- Destination specification.
- Address can be either a network name, a hostname, a network IP address
(with /mask), or a plain IP address.
- Hostnames will be resolved once only, before the rule is submitted to
the kernel. Please note that specifying any name to be resolved with
a remote query such as DNS is a really bad idea.
- The mask can be either a network mask or a plain number, specifying
the number of 1's at the left side of the network mask. Thus, a mask
of 24 is equivalent to 255.255.255.0. A C(!) argument before the
address specification inverts the sense of the address.
type: str
tcp_flags:
description:
- TCP flags specification.
- C(tcp_flags) expects a dict with the two keys C(flags) and C(flags_set).
type: dict
default: {}
version_added: "2.4"
suboptions:
flags:
description:
- List of flags you want to examine.
type: list
flags_set:
description:
- Flags to be set.
type: list
match:
description:
- Specifies a match to use, that is, an extension module that tests for
a specific property.
- The set of matches make up the condition under which a target is invoked.
- Matches are evaluated first to last if specified as an array and work in short-circuit
fashion, i.e. if one extension yields false, evaluation will stop.
type: list
default: []
jump:
description:
- This specifies the target of the rule; i.e., what to do if the packet matches it.
- The target can be a user-defined chain (other than the one
this rule is in), one of the special builtin targets which decide the
fate of the packet immediately, or an extension (see EXTENSIONS
below).
- If this option is omitted in a rule (and the goto parameter
is not used), then matching the rule will have no effect on the
packet's fate, but the counters on the rule will be incremented.
type: str
gateway:
description:
- This specifies the IP address of host to send the cloned packets.
- This option is only valid when C(jump) is set to C(TEE).
type: str
version_added: "2.8"
log_prefix:
description:
- Specifies a log text for the rule. Only make sense with a LOG jump.
type: str
version_added: "2.5"
log_level:
description:
- Logging level according to the syslogd-defined priorities.
- The value can be strings or numbers from 1-8.
- This parameter is only applicable if C(jump) is set to C(LOG).
type: str
version_added: "2.8"
choices: [ '0', '1', '2', '3', '4', '5', '6', '7', 'emerg', 'alert', 'crit', 'error', 'warning', 'notice', 'info', 'debug' ]
goto:
description:
- This specifies that the processing should continue in a user specified chain.
- Unlike the jump argument return will not continue processing in
this chain but instead in the chain that called us via jump.
type: str
in_interface:
description:
- Name of an interface via which a packet was received (only for packets
entering the C(INPUT), C(FORWARD) and C(PREROUTING) chains).
- When the C(!) argument is used before the interface name, the sense is inverted.
- If the interface name ends in a C(+), then any interface which begins with
this name will match.
- If this option is omitted, any interface name will match.
type: str
out_interface:
description:
- Name of an interface via which a packet is going to be sent (for
packets entering the C(FORWARD), C(OUTPUT) and C(POSTROUTING) chains).
- When the C(!) argument is used before the interface name, the sense is inverted.
- If the interface name ends in a C(+), then any interface which begins
with this name will match.
- If this option is omitted, any interface name will match.
type: str
fragment:
description:
- This means that the rule only refers to second and further fragments
of fragmented packets.
- Since there is no way to tell the source or destination ports of such
a packet (or ICMP type), such a packet will not match any rules which specify them.
- When the "!" argument precedes fragment argument, the rule will only match head fragments,
or unfragmented packets.
type: str
set_counters:
description:
- This enables the administrator to initialize the packet and byte
counters of a rule (during C(INSERT), C(APPEND), C(REPLACE) operations).
type: str
source_port:
description:
- Source port or port range specification.
- This can either be a service name or a port number.
- An inclusive range can also be specified, using the format C(first:last).
- If the first port is omitted, C(0) is assumed; if the last is omitted, C(65535) is assumed.
- If the first port is greater than the second one they will be swapped.
type: str
destination_port:
description:
- "Destination port or port range specification. This can either be
a service name or a port number. An inclusive range can also be
specified, using the format first:last. If the first port is omitted,
'0' is assumed; if the last is omitted, '65535' is assumed. If the
first port is greater than the second one they will be swapped.
This is only valid if the rule also specifies one of the following
protocols: tcp, udp, dccp or sctp."
type: str
to_ports:
description:
- This specifies a destination port or range of ports to use, without
this, the destination port is never altered.
- This is only valid if the rule also specifies one of the protocol
C(tcp), C(udp), C(dccp) or C(sctp).
type: str
to_destination:
description:
- This specifies a destination address to use with C(DNAT).
- Without this, the destination address is never altered.
type: str
version_added: "2.1"
to_source:
description:
- This specifies a source address to use with C(SNAT).
- Without this, the source address is never altered.
type: str
version_added: "2.2"
syn:
description:
- This allows matching packets that have the SYN bit set and the ACK
and RST bits unset.
- When negated, this matches all packets with the RST or the ACK bits set.
type: str
choices: [ ignore, match, negate ]
default: ignore
version_added: "2.5"
set_dscp_mark:
description:
- This allows specifying a DSCP mark to be added to packets.
It takes either an integer or hex value.
- Mutually exclusive with C(set_dscp_mark_class).
type: str
version_added: "2.1"
set_dscp_mark_class:
description:
- This allows specifying a predefined DiffServ class which will be
translated to the corresponding DSCP mark.
- Mutually exclusive with C(set_dscp_mark).
type: str
version_added: "2.1"
comment:
description:
- This specifies a comment that will be added to the rule.
type: str
ctstate:
description:
- C(ctstate) is a list of the connection states to match in the conntrack module.
- Possible states are C(INVALID), C(NEW), C(ESTABLISHED), C(RELATED), C(UNTRACKED), C(SNAT), C(DNAT)
type: list
default: []
src_range:
description:
- Specifies the source IP range to match in the iprange module.
type: str
version_added: "2.8"
dst_range:
description:
- Specifies the destination IP range to match in the iprange module.
type: str
version_added: "2.8"
limit:
description:
- Specifies the maximum average number of matches to allow per second.
- The number can specify units explicitly, using `/second', `/minute',
`/hour' or `/day', or parts of them (so `5/second' is the same as
`5/s').
type: str
limit_burst:
description:
- Specifies the maximum burst before the above limit kicks in.
type: str
version_added: "2.1"
uid_owner:
description:
- Specifies the UID or username to use in match by owner rule.
- From Ansible 2.6 when the C(!) argument is prepended then the it inverts
the rule to apply instead to all users except that one specified.
type: str
version_added: "2.1"
gid_owner:
description:
- Specifies the GID or group to use in match by owner rule.
type: str
version_added: "2.9"
reject_with:
description:
- 'Specifies the error packet type to return while rejecting. It implies
"jump: REJECT"'
type: str
version_added: "2.1"
icmp_type:
description:
- This allows specification of the ICMP type, which can be a numeric
ICMP type, type/code pair, or one of the ICMP type names shown by the
command 'iptables -p icmp -h'
type: str
version_added: "2.2"
flush:
description:
- Flushes the specified table and chain of all rules.
- If no chain is specified then the entire table is purged.
- Ignores all other parameters.
type: bool
version_added: "2.2"
policy:
description:
- Set the policy for the chain to the given target.
- Only built-in chains can have policies.
- This parameter requires the C(chain) parameter.
- Ignores all other parameters.
type: str
choices: [ ACCEPT, DROP, QUEUE, RETURN ]
version_added: "2.2"
wait:
description:
- Wait N seconds for the xtables lock to prevent multiple instances of
the program from running concurrently.
type: str
version_added: "2.10"
'''
EXAMPLES = r'''
- name: Block specific IP
iptables:
chain: INPUT
source: 8.8.8.8
jump: DROP
become: yes
- name: Forward port 80 to 8600
iptables:
table: nat
chain: PREROUTING
in_interface: eth0
protocol: tcp
match: tcp
destination_port: 80
jump: REDIRECT
to_ports: 8600
comment: Redirect web traffic to port 8600
become: yes
- name: Allow related and established connections
iptables:
chain: INPUT
ctstate: ESTABLISHED,RELATED
jump: ACCEPT
become: yes
- name: Allow new incoming SYN packets on TCP port 22 (SSH).
iptables:
chain: INPUT
protocol: tcp
destination_port: 22
ctstate: NEW
syn: match
jump: ACCEPT
comment: Accept new SSH connections.
- name: Match on IP ranges
iptables:
chain: FORWARD
src_range: 192.168.1.100-192.168.1.199
dst_range: 10.0.0.1-10.0.0.50
jump: ACCEPT
- name: Tag all outbound tcp packets with DSCP mark 8
iptables:
chain: OUTPUT
jump: DSCP
table: mangle
set_dscp_mark: 8
protocol: tcp
- name: Tag all outbound tcp packets with DSCP DiffServ class CS1
iptables:
chain: OUTPUT
jump: DSCP
table: mangle
set_dscp_mark_class: CS1
protocol: tcp
- name: Insert a rule on line 5
iptables:
chain: INPUT
protocol: tcp
destination_port: 8080
jump: ACCEPT
action: insert
rule_num: 5
- name: Set the policy for the INPUT chain to DROP
iptables:
chain: INPUT
policy: DROP
- name: Reject tcp with tcp-reset
iptables:
chain: INPUT
protocol: tcp
reject_with: tcp-reset
ip_version: ipv4
- name: Set tcp flags
iptables:
chain: OUTPUT
jump: DROP
protocol: tcp
tcp_flags:
flags: ALL
flags_set:
- ACK
- RST
- SYN
- FIN
- name: iptables flush filter
iptables:
chain: "{{ item }}"
flush: yes
with_items: [ 'INPUT', 'FORWARD', 'OUTPUT' ]
- name: iptables flush nat
iptables:
table: nat
chain: '{{ item }}'
flush: yes
with_items: [ 'INPUT', 'OUTPUT', 'PREROUTING', 'POSTROUTING' ]
- name: Log packets arriving into an user-defined chain
iptables:
chain: LOGGING
action: append
state: present
limit: 2/second
limit_burst: 20
log_prefix: "IPTABLES:INFO: "
log_level: info
'''
import re
from distutils.version import LooseVersion
from ansible.module_utils.basic import AnsibleModule
IPTABLES_WAIT_SUPPORT_ADDED = '1.4.20'
IPTABLES_WAIT_WITH_SECONDS_SUPPORT_ADDED = '1.6.0'
BINS = dict(
ipv4='iptables',
ipv6='ip6tables',
)
ICMP_TYPE_OPTIONS = dict(
ipv4='--icmp-type',
ipv6='--icmpv6-type',
)
def append_param(rule, param, flag, is_list):
if is_list:
for item in param:
append_param(rule, item, flag, False)
else:
if param is not None:
if param[0] == '!':
rule.extend(['!', flag, param[1:]])
else:
rule.extend([flag, param])
def append_tcp_flags(rule, param, flag):
if param:
if 'flags' in param and 'flags_set' in param:
rule.extend([flag, ','.join(param['flags']), ','.join(param['flags_set'])])
def append_match_flag(rule, param, flag, negatable):
if param == 'match':
rule.extend([flag])
elif negatable and param == 'negate':
rule.extend(['!', flag])
def append_csv(rule, param, flag):
if param:
rule.extend([flag, ','.join(param)])
def append_match(rule, param, match):
if param:
rule.extend(['-m', match])
def append_jump(rule, param, jump):
if param:
rule.extend(['-j', jump])
def append_wait(rule, param, flag):
if param:
rule.extend([flag, param])
def construct_rule(params):
rule = []
append_wait(rule, params['wait'], '-w')
append_param(rule, params['protocol'], '-p', False)
append_param(rule, params['source'], '-s', False)
append_param(rule, params['destination'], '-d', False)
append_param(rule, params['match'], '-m', True)
append_tcp_flags(rule, params['tcp_flags'], '--tcp-flags')
append_param(rule, params['jump'], '-j', False)
if params.get('jump') and params['jump'].lower() == 'tee':
append_param(rule, params['gateway'], '--gateway', False)
append_param(rule, params['log_prefix'], '--log-prefix', False)
append_param(rule, params['log_level'], '--log-level', False)
append_param(rule, params['to_destination'], '--to-destination', False)
append_param(rule, params['to_source'], '--to-source', False)
append_param(rule, params['goto'], '-g', False)
append_param(rule, params['in_interface'], '-i', False)
append_param(rule, params['out_interface'], '-o', False)
append_param(rule, params['fragment'], '-f', False)
append_param(rule, params['set_counters'], '-c', False)
append_param(rule, params['source_port'], '--source-port', False)
append_param(rule, params['destination_port'], '--destination-port', False)
append_param(rule, params['to_ports'], '--to-ports', False)
append_param(rule, params['set_dscp_mark'], '--set-dscp', False)
append_param(
rule,
params['set_dscp_mark_class'],
'--set-dscp-class',
False)
append_match_flag(rule, params['syn'], '--syn', True)
append_match(rule, params['comment'], 'comment')
append_param(rule, params['comment'], '--comment', False)
if 'conntrack' in params['match']:
append_csv(rule, params['ctstate'], '--ctstate')
elif 'state' in params['match']:
append_csv(rule, params['ctstate'], '--state')
elif params['ctstate']:
append_match(rule, params['ctstate'], 'conntrack')
append_csv(rule, params['ctstate'], '--ctstate')
if 'iprange' in params['match']:
append_param(rule, params['src_range'], '--src-range', False)
append_param(rule, params['dst_range'], '--dst-range', False)
elif params['src_range'] or params['dst_range']:
append_match(rule, params['src_range'] or params['dst_range'], 'iprange')
append_param(rule, params['src_range'], '--src-range', False)
append_param(rule, params['dst_range'], '--dst-range', False)
append_match(rule, params['limit'] or params['limit_burst'], 'limit')
append_param(rule, params['limit'], '--limit', False)
append_param(rule, params['limit_burst'], '--limit-burst', False)
append_match(rule, params['uid_owner'], 'owner')
append_match_flag(rule, params['uid_owner'], '--uid-owner', True)
append_param(rule, params['uid_owner'], '--uid-owner', False)
append_match(rule, params['gid_owner'], 'owner')
append_match_flag(rule, params['gid_owner'], '--gid-owner', True)
append_param(rule, params['gid_owner'], '--gid-owner', False)
if params['jump'] is None:
append_jump(rule, params['reject_with'], 'REJECT')
append_param(rule, params['reject_with'], '--reject-with', False)
append_param(
rule,
params['icmp_type'],
ICMP_TYPE_OPTIONS[params['ip_version']],
False)
return rule
def push_arguments(iptables_path, action, params, make_rule=True):
cmd = [iptables_path]
cmd.extend(['-t', params['table']])
cmd.extend([action, params['chain']])
if action == '-I' and params['rule_num']:
cmd.extend([params['rule_num']])
if make_rule:
cmd.extend(construct_rule(params))
return cmd
def check_present(iptables_path, module, params):
cmd = push_arguments(iptables_path, '-C', params)
rc, _, __ = module.run_command(cmd, check_rc=False)
return (rc == 0)
def append_rule(iptables_path, module, params):
cmd = push_arguments(iptables_path, '-A', params)
module.run_command(cmd, check_rc=True)
def insert_rule(iptables_path, module, params):
cmd = push_arguments(iptables_path, '-I', params)
module.run_command(cmd, check_rc=True)
def remove_rule(iptables_path, module, params):
cmd = push_arguments(iptables_path, '-D', params)
module.run_command(cmd, check_rc=True)
def flush_table(iptables_path, module, params):
cmd = push_arguments(iptables_path, '-F', params, make_rule=False)
module.run_command(cmd, check_rc=True)
def set_chain_policy(iptables_path, module, params):
cmd = push_arguments(iptables_path, '-P', params, make_rule=False)
cmd.append(params['policy'])
module.run_command(cmd, check_rc=True)
def get_chain_policy(iptables_path, module, params):
cmd = push_arguments(iptables_path, '-L', params)
rc, out, _ = module.run_command(cmd, check_rc=True)
chain_header = out.split("\n")[0]
result = re.search(r'\(policy ([A-Z]+)\)', chain_header)
if result:
return result.group(1)
return None
def get_iptables_version(iptables_path, module):
cmd = [iptables_path, '--version']
rc, out, _ = module.run_command(cmd, check_rc=True)
return out.split('v')[1].rstrip('\n')
def main():
module = AnsibleModule(
supports_check_mode=True,
argument_spec=dict(
table=dict(type='str', default='filter', choices=['filter', 'nat', 'mangle', 'raw', 'security']),
state=dict(type='str', default='present', choices=['absent', 'present']),
action=dict(type='str', default='append', choices=['append', 'insert']),
ip_version=dict(type='str', default='ipv4', choices=['ipv4', 'ipv6']),
chain=dict(type='str'),
rule_num=dict(type='str'),
protocol=dict(type='str'),
wait=dict(type='str'),
source=dict(type='str'),
to_source=dict(type='str'),
destination=dict(type='str'),
to_destination=dict(type='str'),
match=dict(type='list', default=[]),
tcp_flags=dict(type='dict',
options=dict(
flags=dict(type='list'),
flags_set=dict(type='list'))
),
jump=dict(type='str'),
gateway=dict(type='str'),
log_prefix=dict(type='str'),
log_level=dict(type='str',
choices=['0', '1', '2', '3', '4', '5', '6', '7',
'emerg', 'alert', 'crit', 'error',
'warning', 'notice', 'info', 'debug'],
default=None,
),
goto=dict(type='str'),
in_interface=dict(type='str'),
out_interface=dict(type='str'),
fragment=dict(type='str'),
set_counters=dict(type='str'),
source_port=dict(type='str'),
destination_port=dict(type='str'),
to_ports=dict(type='str'),
set_dscp_mark=dict(type='str'),
set_dscp_mark_class=dict(type='str'),
comment=dict(type='str'),
ctstate=dict(type='list', default=[]),
src_range=dict(type='str'),
dst_range=dict(type='str'),
limit=dict(type='str'),
limit_burst=dict(type='str'),
uid_owner=dict(type='str'),
gid_owner=dict(type='str'),
reject_with=dict(type='str'),
icmp_type=dict(type='str'),
syn=dict(type='str', default='ignore', choices=['ignore', 'match', 'negate']),
flush=dict(type='bool', default=False),
policy=dict(type='str', choices=['ACCEPT', 'DROP', 'QUEUE', 'RETURN']),
),
mutually_exclusive=(
['set_dscp_mark', 'set_dscp_mark_class'],
['flush', 'policy'],
),
required_if=[
['jump', 'TEE', ['gateway']],
['jump', 'tee', ['gateway']],
]
)
args = dict(
changed=False,
failed=False,
ip_version=module.params['ip_version'],
table=module.params['table'],
chain=module.params['chain'],
flush=module.params['flush'],
rule=' '.join(construct_rule(module.params)),
state=module.params['state'],
)
ip_version = module.params['ip_version']
iptables_path = module.get_bin_path(BINS[ip_version], True)
# Check if chain option is required
if args['flush'] is False and args['chain'] is None:
module.fail_json(msg="Either chain or flush parameter must be specified.")
if module.params.get('log_prefix', None) or module.params.get('log_level', None):
if module.params['jump'] is None:
module.params['jump'] = 'LOG'
elif module.params['jump'] != 'LOG':
module.fail_json(msg="Logging options can only be used with the LOG jump target.")
# Check if wait option is supported
iptables_version = LooseVersion(get_iptables_version(iptables_path, module))
if iptables_version >= LooseVersion(IPTABLES_WAIT_SUPPORT_ADDED):
if iptables_version < LooseVersion(IPTABLES_WAIT_WITH_SECONDS_SUPPORT_ADDED):
module.params['wait'] = ''
else:
module.params['wait'] = None
# Flush the table
if args['flush'] is True:
args['changed'] = True
if not module.check_mode:
flush_table(iptables_path, module, module.params)
# Set the policy
elif module.params['policy']:
current_policy = get_chain_policy(iptables_path, module, module.params)
if not current_policy:
module.fail_json(msg='Can\'t detect current policy')
changed = current_policy != module.params['policy']
args['changed'] = changed
if changed and not module.check_mode:
set_chain_policy(iptables_path, module, module.params)
else:
insert = (module.params['action'] == 'insert')
rule_is_present = check_present(iptables_path, module, module.params)
should_be_present = (args['state'] == 'present')
# Check if target is up to date
args['changed'] = (rule_is_present != should_be_present)
if args['changed'] is False:
# Target is already up to date
module.exit_json(**args)
# Check only; don't modify
if not module.check_mode:
if should_be_present:
if insert:
insert_rule(iptables_path, module, module.params)
else:
append_rule(iptables_path, module, module.params)
else:
remove_rule(iptables_path, module, module.params)
module.exit_json(**args)
if __name__ == '__main__':
main()
| gpl-3.0 |
bratatidas9/Impala-1 | tests/unittests/test_file_parser.py | 16 | 2882 | # Copyright (c) 2012 Cloudera, Inc. All rights reserved.
# Unit tests for the test file parser
#
import logging
import pytest
from tests.util.test_file_parser import *
from tests.common.base_test_suite import BaseTestSuite
test_text = """
# Text before in the header (before the first ====) should be ignored
# so put this here to test it out.
====
---- QUERY
# comment
SELECT blah from Foo
s
---- RESULTS
'Hi'
---- TYPES
string
====
---- QUERY
SELECT 2
---- RESULTS
'Hello'
---- TYPES
string
#====
# SHOULD PARSE COMMENTED OUT TEST PROPERLY
#---- QUERY: TEST_WORKLOAD_Q2
#SELECT int_col from Bar
#---- RESULTS
#231
#---- TYPES
#int
====
---- QUERY: TEST_WORKLOAD_Q2
SELECT int_col from Bar
---- RESULTS
231
---- TYPES
int
====
"""
VALID_SECTIONS = ['QUERY', 'RESULTS', 'TYPES']
class TestTestFileParser(BaseTestSuite):
def test_valid_parse(self):
results = parse_test_file_text(test_text, VALID_SECTIONS)
assert len(results) == 3
print results[0]
expected_results = {'QUERY': '# comment\nSELECT blah from Foo\ns',
'TYPES': 'string', 'RESULTS': "'Hi'"}
assert results[0] == expected_results
def test_invalid_section(self):
# Restrict valid sections to exclude one of the section names.
valid_sections = ['QUERY', 'RESULTS']
results = parse_test_file_text(test_text, valid_sections, skip_unknown_sections=True)
assert len(results) == 3
expected_results = {'QUERY': '# comment\nSELECT blah from Foo\ns',
'RESULTS': "'Hi'"}
assert results[0] == expected_results
# In this case, instead of ignoring the invalid section we should get an error
try:
results = parse_test_file_text(test_text, valid_sections,
skip_unknown_sections=False)
assert 0, 'Expected error due to invalid section'
except RuntimeError as re:
assert re.message == "Unknown subsection: TYPES"
def test_parse_query_name(self):
results = parse_test_file_text(test_text, VALID_SECTIONS, False)
assert len(results) == 3
expected_results = {'QUERY': 'SELECT int_col from Bar',
'TYPES': 'int', 'RESULTS': '231',
'QUERY_NAME': 'TEST_WORKLOAD_Q2'}
assert results[2] == expected_results
def test_parse_commented_out_test_as_comment(self):
results = parse_test_file_text(test_text, VALID_SECTIONS)
assert len(results) == 3
expected_results = {'QUERY': 'SELECT 2', 'RESULTS': "'Hello'",
'TYPES': "string\n#====\n"\
"# SHOULD PARSE COMMENTED OUT TEST PROPERLY\n"\
"#---- QUERY: TEST_WORKLOAD_Q2\n"\
"#SELECT int_col from Bar\n"\
"#---- RESULTS\n#231\n#---- TYPES\n#int"}
print expected_results
print results[1]
assert results[1] == expected_results
| apache-2.0 |
TheoChevalier/bedrock | bedrock/mozorg/middleware.py | 11 | 2753 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import datetime
from email.utils import formatdate
import time
from django.conf import settings
from django.core.exceptions import MiddlewareNotUsed
from django_statsd.middleware import GraphiteRequestTimingMiddleware
class CacheMiddleware(object):
def process_response(self, request, response):
cache = (request.method != 'POST' and
response.status_code != 404 and
'Cache-Control' not in response)
if cache:
d = datetime.datetime.now() + datetime.timedelta(minutes=10)
stamp = time.mktime(d.timetuple())
response['Cache-Control'] = 'max-age=600'
response['Expires'] = formatdate(timeval=stamp, localtime=False,
usegmt=True)
return response
class MozorgRequestTimingMiddleware(GraphiteRequestTimingMiddleware):
def process_view(self, request, view, view_args, view_kwargs):
if hasattr(view, 'page_name'):
request._view_module = 'page'
request._view_name = view.page_name.replace('/', '.')
request._start_time = time.time()
else:
f = super(MozorgRequestTimingMiddleware, self)
f.process_view(request, view, view_args, view_kwargs)
class ClacksOverheadMiddleware(object):
# bug 1144901
@staticmethod
def process_response(request, response):
if response.status_code == 200:
response['X-Clacks-Overhead'] = 'GNU Terry Pratchett'
return response
class HostnameMiddleware(object):
def __init__(self):
if not settings.ENABLE_HOSTNAME_MIDDLEWARE:
raise MiddlewareNotUsed
values = [getattr(settings, x) for x in ['HOSTNAME', 'DEIS_APP', 'DEIS_DOMAIN']]
self.backend_server = '.'.join(x for x in values if x)
def process_response(self, request, response):
response['X-Backend-Server'] = self.backend_server
return response
class VaryNoCacheMiddleware(object):
def __init__(self):
if not settings.ENABLE_VARY_NOCACHE_MIDDLEWARE:
raise MiddlewareNotUsed
@staticmethod
def process_response(request, response):
if 'vary' in response:
path = request.path
if path != '/' and not any(path.startswith(x) for x in
settings.VARY_NOCACHE_EXEMPT_URL_PREFIXES):
del response['vary']
del response['expires']
response['Cache-Control'] = 'max-age=0'
return response
| mpl-2.0 |
wcmckee/moejobs-site | cache/.mako.tmp/comments_helper_googleplus.tmpl.py | 1 | 2430 | # -*- coding:utf-8 -*-
from mako import runtime, filters, cache
UNDEFINED = runtime.UNDEFINED
STOP_RENDERING = runtime.STOP_RENDERING
__M_dict_builtin = dict
__M_locals_builtin = locals
_magic_number = 10
_modified_time = 1443802885.4031692
_enable_loop = True
_template_filename = '/usr/local/lib/python3.4/dist-packages/nikola/data/themes/base/templates/comments_helper_googleplus.tmpl'
_template_uri = 'comments_helper_googleplus.tmpl'
_source_encoding = 'utf-8'
_exports = ['comment_link_script', 'comment_form', 'comment_link']
def render_body(context,**pageargs):
__M_caller = context.caller_stack._push_frame()
try:
__M_locals = __M_dict_builtin(pageargs=pageargs)
__M_writer = context.writer()
__M_writer('\n\n')
__M_writer('\n\n')
__M_writer('\n')
return ''
finally:
context.caller_stack._pop_frame()
def render_comment_link_script(context):
__M_caller = context.caller_stack._push_frame()
try:
__M_writer = context.writer()
__M_writer('\n')
return ''
finally:
context.caller_stack._pop_frame()
def render_comment_form(context,url,title,identifier):
__M_caller = context.caller_stack._push_frame()
try:
__M_writer = context.writer()
__M_writer('\n<script src="https://apis.google.com/js/plusone.js"></script>\n<div class="g-comments"\n data-href="')
__M_writer(str(url))
__M_writer('"\n data-first_party_property="BLOGGER"\n data-view_type="FILTERED_POSTMOD">\n</div>\n')
return ''
finally:
context.caller_stack._pop_frame()
def render_comment_link(context,link,identifier):
__M_caller = context.caller_stack._push_frame()
try:
__M_writer = context.writer()
__M_writer('\n<div class="g-commentcount" data-href="')
__M_writer(str(link))
__M_writer('"></div>\n<script src="https://apis.google.com/js/plusone.js"></script>\n')
return ''
finally:
context.caller_stack._pop_frame()
"""
__M_BEGIN_METADATA
{"uri": "comments_helper_googleplus.tmpl", "source_encoding": "utf-8", "filename": "/usr/local/lib/python3.4/dist-packages/nikola/data/themes/base/templates/comments_helper_googleplus.tmpl", "line_map": {"33": 16, "39": 2, "57": 12, "43": 2, "44": 5, "45": 5, "16": 0, "51": 11, "21": 9, "22": 14, "23": 17, "56": 12, "55": 11, "29": 16, "63": 57}}
__M_END_METADATA
"""
| mit |
sesamesushi/desatisrevu | modules/oeditor/oeditor.py | 9 | 10589 | # Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generic object editor view that uses REST services."""
__author__ = 'Pavel Simakov (psimakov@google.com)'
import os
import urllib
import appengine_config
from common import jinja_utils
from common import schema_fields
from common import tags
from controllers import utils
import jinja2
from models import custom_modules
from models import transforms
import webapp2
# a set of YUI and inputex modules required by the editor
COMMON_REQUIRED_MODULES = [
'inputex-group', 'inputex-form', 'inputex-jsonschema']
ALL_MODULES = [
'querystring-stringify-simple', 'inputex-select', 'inputex-string',
'inputex-radio', 'inputex-date', 'inputex-datepicker', 'inputex-checkbox',
'inputex-list', 'inputex-color', 'gcb-rte', 'inputex-textarea',
'inputex-url', 'inputex-uneditable', 'inputex-integer', 'inputex-hidden',
'inputex-file', 'io-upload-iframe']
class ObjectEditor(object):
"""Generic object editor powered by jsonschema."""
@classmethod
def get_html_for(
cls, handler, schema_json, annotations, object_key,
rest_url, exit_url,
extra_args=None,
save_method='put',
delete_url=None, delete_message=None, delete_method='post',
auto_return=False, read_only=False,
required_modules=None,
extra_js_files=None,
delete_button_caption='Delete',
save_button_caption='Save',
exit_button_caption='Close'):
"""Creates an HTML code needed to embed and operate this form.
This method creates an HTML, JS and CSS required to embed JSON
schema-based object editor into a view.
Args:
handler: a BaseHandler class, which will host this HTML, JS and CSS
schema_json: a text of JSON schema for the object being edited
annotations: schema annotations dictionary
object_key: a key of an object being edited
rest_url: a REST endpoint for object GET/PUT operation
exit_url: a URL to go to after the editor form is dismissed
extra_args: extra request params passed back in GET and POST
save_method: how the data should be saved to the server (put|upload)
delete_url: optional URL for delete operation
delete_message: string. Optional custom delete confirmation message
delete_method: optional HTTP method for delete operation
auto_return: whether to return to the exit_url on successful save
read_only: optional flag; if set, removes Save and Delete operations
required_modules: list of inputex modules required for this editor
extra_js_files: list of extra JS files to be included
delete_button_caption: string. A caption for the 'Delete' button
save_button_caption: a caption for the 'Save' button
exit_button_caption: a caption for the 'Close' button
Returns:
The HTML, JS and CSS text that will instantiate an object editor.
"""
required_modules = required_modules or ALL_MODULES
if not delete_message:
kind = transforms.loads(schema_json).get('description')
if not kind:
kind = 'Generic Object'
delete_message = 'Are you sure you want to delete this %s?' % kind
# construct parameters
get_url = rest_url
get_args = {'key': object_key}
post_url = rest_url
post_args = {'key': object_key}
if extra_args:
get_args.update(extra_args)
post_args.update(extra_args)
if read_only:
post_url = ''
post_args = ''
custom_rte_tag_icons = []
for tag, tag_class in tags.get_tag_bindings().items():
custom_rte_tag_icons.append({
'name': tag,
'iconUrl': tag_class().get_icon_url()})
template_values = {
'enabled': custom_module.enabled,
'schema': schema_json,
'get_url': '%s?%s' % (get_url, urllib.urlencode(get_args, True)),
'save_url': post_url,
'save_args': transforms.dumps(post_args),
'exit_button_caption': exit_button_caption,
'exit_url': exit_url,
'required_modules': COMMON_REQUIRED_MODULES + required_modules,
'extra_js_files': extra_js_files or [],
'schema_annotations': [
(item[0], transforms.dumps(item[1])) for item in annotations],
'save_method': save_method,
'auto_return': auto_return,
'delete_button_caption': delete_button_caption,
'save_button_caption': save_button_caption,
'custom_rte_tag_icons': transforms.dumps(custom_rte_tag_icons),
'delete_message': delete_message,
}
if delete_url and not read_only:
template_values['delete_url'] = delete_url
if delete_method:
template_values['delete_method'] = delete_method
if appengine_config.BUNDLE_LIB_FILES:
template_values['bundle_lib_files'] = True
return jinja2.utils.Markup(handler.get_template(
'oeditor.html', [os.path.dirname(__file__)]
).render(template_values))
class PopupHandler(webapp2.RequestHandler, utils.ReflectiveRequestHandler):
"""A handler to serve the content of the popup subeditor."""
default_action = 'custom_tag'
get_actions = ['edit_custom_tag', 'add_custom_tag']
post_actions = []
def get_template(self, template_name, dirs):
"""Sets up an environment and Gets jinja template."""
return jinja_utils.get_template(
template_name, dirs + [os.path.dirname(__file__)])
def get_edit_custom_tag(self):
"""Return the the page used to edit a custom HTML tag in a popup."""
tag_name = self.request.get('tag_name')
tag_bindings = tags.get_tag_bindings()
tag_class = tag_bindings[tag_name]
schema = tag_class().get_schema(self)
if schema.has_subregistries():
raise NotImplementedError()
template_values = {}
template_values['form_html'] = ObjectEditor.get_html_for(
self, schema.get_json_schema(), schema.get_schema_dict(), None,
None, None)
self.response.out.write(
self.get_template('popup.html', []).render(template_values))
def get_add_custom_tag(self):
"""Return the page for the popup used to add a custom HTML tag."""
tag_name = self.request.get('tag_name')
tag_bindings = tags.get_tag_bindings()
select_data = []
for name in tag_bindings.keys():
clazz = tag_bindings[name]
select_data.append((name, '%s: %s' % (
clazz.vendor(), clazz.name())))
select_data = sorted(select_data, key=lambda pair: pair[1])
if tag_name:
tag_class = tag_bindings[tag_name]
else:
tag_class = tag_bindings[select_data[0][0]]
tag_schema = tag_class().get_schema(self)
schema = schema_fields.FieldRegistry('Add a Component')
type_select = schema.add_sub_registry('type', 'Component Type')
type_select.add_property(schema_fields.SchemaField(
'tag', 'Name', 'string', select_data=select_data))
schema.add_sub_registry('attributes', registry=tag_schema)
template_values = {}
template_values['form_html'] = ObjectEditor.get_html_for(
self, schema.get_json_schema(), schema.get_schema_dict(), None,
None, None, required_modules=tag_class.required_modules(),
extra_js_files=['add_custom_tag.js'])
self.response.out.write(
self.get_template('popup.html', []).render(template_values))
def create_bool_select_annotation(
keys_list, label, true_label, false_label, class_name=None,
description=None):
"""Creates inputex annotation to display bool type as a select."""
properties = {
'label': label, 'choices': [
{'value': True, 'label': true_label},
{'value': False, 'label': false_label}]}
if class_name:
properties['className'] = class_name
if description:
properties['description'] = description
return (keys_list, {'type': 'select', '_inputex': properties})
custom_module = None
def register_module():
"""Registers this module in the registry."""
from controllers import sites # pylint: disable-msg=g-import-not-at-top
yui_handlers = [
('/static/inputex-3.1.0/(.*)', sites.make_zip_handler(
os.path.join(
appengine_config.BUNDLE_ROOT, 'lib/inputex-3.1.0.zip'))),
('/static/yui_3.6.0/(.*)', sites.make_zip_handler(
os.path.join(
appengine_config.BUNDLE_ROOT, 'lib/yui_3.6.0.zip'))),
('/static/2in3/(.*)', sites.make_zip_handler(
os.path.join(
appengine_config.BUNDLE_ROOT, 'lib/yui_2in3-2.9.0.zip')))]
if appengine_config.BUNDLE_LIB_FILES:
yui_handlers += [
('/static/combo/inputex', sites.make_css_combo_zip_handler(
os.path.join(
appengine_config.BUNDLE_ROOT, 'lib/inputex-3.1.0.zip'),
'/static/inputex-3.1.0/')),
('/static/combo/yui', sites.make_css_combo_zip_handler(
os.path.join(appengine_config.BUNDLE_ROOT, 'lib/yui_3.6.0.zip'),
'/yui/')),
('/static/combo/2in3', sites.make_css_combo_zip_handler(
os.path.join(
appengine_config.BUNDLE_ROOT, 'lib/yui_2in3-2.9.0.zip'),
'/static/2in3/'))]
oeditor_handlers = [('/oeditorpopup', PopupHandler)]
global custom_module
custom_module = custom_modules.Module(
'Object Editor',
'A visual editor for editing various types of objects.',
yui_handlers, oeditor_handlers)
return custom_module
| apache-2.0 |
muffinresearch/olympia | conftest.py | 6 | 4031 | from django import http, test
from django.conf import settings
from django.core.cache import cache
from django.utils import translation
import caching
import pytest
import amo
from access.models import Group, GroupUser
from translations.hold import clean_translations
from users.models import UserProfile
@pytest.fixture(autouse=True)
def mock_inline_css(monkeypatch):
"""Mock jingo_minify.helpers.is_external: don't break on missing files.
When testing, we don't want nor need the bundled/minified css files, so
pretend that all the css files are external.
Mocking this will prevent amo.helpers.inline_css to believe it should
bundle the css.
"""
import amo.helpers
monkeypatch.setattr(amo.helpers, 'is_external', lambda css: True)
def prefix_indexes(config):
"""Prefix all ES index names and cache keys with `test_` and, if running
under xdist, the ID of the current slave."""
if hasattr(config, 'slaveinput'):
prefix = 'test_{[slaveid]}'.format(config.slaveinput)
else:
prefix = 'test'
from django.conf import settings
# Ideally, this should be a session-scoped fixture that gets injected into
# any test that requires ES. This would be especially useful, as it would
# allow xdist to transparently group all ES tests into a single process.
# Unfurtunately, it's surprisingly difficult to achieve with our current
# unittest-based setup.
for key, index in settings.ES_INDEXES.items():
if not index.startswith(prefix):
settings.ES_INDEXES[key] = '{prefix}_amo_{index}'.format(
prefix=prefix, index=index)
settings.CACHE_PREFIX = 'amo:{0}:'.format(prefix)
settings.KEY_PREFIX = settings.CACHE_PREFIX
def pytest_configure(config):
prefix_indexes(config)
@pytest.fixture(autouse=True, scope='session')
def instrument_jinja():
"""Make sure the "templates" list in a response is properly updated, even
though we're using Jinja2 and not the default django template engine."""
import jinja2
old_render = jinja2.Template.render
def instrumented_render(self, *args, **kwargs):
context = dict(*args, **kwargs)
test.signals.template_rendered.send(
sender=self, template=self, context=context)
return old_render(self, *args, **kwargs)
jinja2.Template.render = instrumented_render
def default_prefixer():
"""Make sure each test starts with a default URL prefixer."""
request = http.HttpRequest()
request.META['SCRIPT_NAME'] = ''
prefixer = amo.urlresolvers.Prefixer(request)
prefixer.app = settings.DEFAULT_APP
prefixer.locale = settings.LANGUAGE_CODE
amo.urlresolvers.set_url_prefix(prefixer)
@pytest.fixture(autouse=True)
def test_pre_setup():
cache.clear()
# Override django-cache-machine caching.base.TIMEOUT because it's
# computed too early, before settings_test.py is imported.
caching.base.TIMEOUT = settings.CACHE_COUNT_TIMEOUT
translation.trans_real.deactivate()
# Django fails to clear this cache.
translation.trans_real._translations = {}
translation.trans_real.activate(settings.LANGUAGE_CODE)
# Reset the prefixer.
default_prefixer()
@pytest.fixture(autouse=True)
def test_post_teardown():
amo.set_user(None)
clean_translations(None) # Make sure queued translations are removed.
# Make sure we revert everything we might have changed to prefixers.
amo.urlresolvers.clean_url_prefixes()
@pytest.fixture
def admin_group(db):
"""Create the Admins group."""
return Group.objects.create(name='Admins', rules='*:*')
@pytest.fixture
def mozilla_user(admin_group):
"""Create a "Mozilla User"."""
user = UserProfile.objects.create(pk=settings.TASK_USER_ID,
email='admin@mozilla.com',
username='admin')
user.set_password('password')
user.save()
GroupUser.objects.create(user=user, group=admin_group)
return user
| bsd-3-clause |
m039/Void | third-party/void-boost/tools/build/src/build/scanner.py | 8 | 6258 | # Status: ported.
# Base revision: 45462
#
# Copyright 2003 Dave Abrahams
# Copyright 2002, 2003, 2004, 2005 Vladimir Prus
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt)
# Implements scanners: objects that compute implicit dependencies for
# files, such as includes in C++.
#
# Scanner has a regular expression used to find dependencies, some
# data needed to interpret those dependencies (for example, include
# paths), and a code which actually established needed relationship
# between actual jam targets.
#
# Scanner objects are created by actions, when they try to actualize
# virtual targets, passed to 'virtual-target.actualize' method and are
# then associated with actual targets. It is possible to use
# several scanners for a virtual-target. For example, a single source
# might be used by to compile actions, with different include paths.
# In this case, two different actual targets will be created, each
# having scanner of its own.
#
# Typically, scanners are created from target type and action's
# properties, using the rule 'get' in this module. Directly creating
# scanners is not recommended, because it might create many equvivalent
# but different instances, and lead in unneeded duplication of
# actual targets. However, actions can also create scanners in a special
# way, instead of relying on just target type.
import property
import bjam
import os
from b2.manager import get_manager
from b2.util import is_iterable_typed
def reset ():
""" Clear the module state. This is mainly for testing purposes.
"""
global __scanners, __rv_cache, __scanner_cache
# Maps registered scanner classes to relevant properties
__scanners = {}
# A cache of scanners.
# The key is: class_name.properties_tag, where properties_tag is the concatenation
# of all relevant properties, separated by '-'
__scanner_cache = {}
reset ()
def register(scanner_class, relevant_properties):
""" Registers a new generator class, specifying a set of
properties relevant to this scanner. Ctor for that class
should have one parameter: list of properties.
"""
assert issubclass(scanner_class, Scanner)
assert isinstance(relevant_properties, basestring)
__scanners[str(scanner_class)] = relevant_properties
def registered(scanner_class):
""" Returns true iff a scanner of that class is registered
"""
return str(scanner_class) in __scanners
def get(scanner_class, properties):
""" Returns an instance of previously registered scanner
with the specified properties.
"""
assert issubclass(scanner_class, Scanner)
assert is_iterable_typed(properties, basestring)
scanner_name = str(scanner_class)
if not registered(scanner_name):
raise BaseException ("attempt to get unregisted scanner: %s" % scanner_name)
relevant_properties = __scanners[scanner_name]
r = property.select(relevant_properties, properties)
scanner_id = scanner_name + '.' + '-'.join(r)
if scanner_id not in __scanner_cache:
__scanner_cache[scanner_id] = scanner_class(r)
return __scanner_cache[scanner_id]
class Scanner:
""" Base scanner class.
"""
def __init__ (self):
pass
def pattern (self):
""" Returns a pattern to use for scanning.
"""
raise BaseException ("method must be overriden")
def process (self, target, matches, binding):
""" Establish necessary relationship between targets,
given actual target beeing scanned, and a list of
pattern matches in that file.
"""
raise BaseException ("method must be overriden")
# Common scanner class, which can be used when there's only one
# kind of includes (unlike C, where "" and <> includes have different
# search paths).
class CommonScanner(Scanner):
def __init__ (self, includes):
Scanner.__init__(self)
self.includes = includes
def process(self, target, matches, binding):
target_path = os.path.normpath(os.path.dirname(binding[0]))
bjam.call("mark-included", target, matches)
get_manager().engine().set_target_variable(matches, "SEARCH",
[target_path] + self.includes)
get_manager().scanners().propagate(self, matches)
class ScannerRegistry:
def __init__ (self, manager):
self.manager_ = manager
self.count_ = 0
self.exported_scanners_ = {}
def install (self, scanner, target, vtarget):
""" Installs the specified scanner on actual target 'target'.
vtarget: virtual target from which 'target' was actualized.
"""
assert isinstance(scanner, Scanner)
assert isinstance(target, basestring)
assert isinstance(vtarget, basestring)
engine = self.manager_.engine()
engine.set_target_variable(target, "HDRSCAN", scanner.pattern())
if scanner not in self.exported_scanners_:
exported_name = "scanner_" + str(self.count_)
self.count_ = self.count_ + 1
self.exported_scanners_[scanner] = exported_name
bjam.import_rule("", exported_name, scanner.process)
else:
exported_name = self.exported_scanners_[scanner]
engine.set_target_variable(target, "HDRRULE", exported_name)
# scanner reflects difference in properties affecting
# binding of 'target', which will be known when processing
# includes for it, will give information on how to
# interpret quoted includes.
engine.set_target_variable(target, "HDRGRIST", str(id(scanner)))
pass
def propagate(self, scanner, targets):
assert isinstance(scanner, Scanner)
assert is_iterable_typed(targets, basestring) or isinstance(targets, basestring)
engine = self.manager_.engine()
engine.set_target_variable(targets, "HDRSCAN", scanner.pattern())
engine.set_target_variable(targets, "HDRRULE",
self.exported_scanners_[scanner])
engine.set_target_variable(targets, "HDRGRIST", str(id(scanner)))
| mit |
tellesnobrega/horizon | openstack_dashboard/dashboards/project/access_and_security/security_groups/tests.py | 23 | 34856 | # Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import cgi
from django.conf import settings
from django.core.urlresolvers import reverse
from django import http
from mox import IsA # noqa
from openstack_dashboard import api
from openstack_dashboard.test import helpers as test
from openstack_dashboard.dashboards.project.access_and_security.\
security_groups import tables
INDEX_URL = reverse('horizon:project:access_and_security:index')
SG_CREATE_URL = reverse('horizon:project:access_and_security:'
'security_groups:create')
SG_VIEW_PATH = 'horizon:project:access_and_security:security_groups:%s'
SG_DETAIL_VIEW = SG_VIEW_PATH % 'detail'
SG_UPDATE_VIEW = SG_VIEW_PATH % 'update'
SG_ADD_RULE_VIEW = SG_VIEW_PATH % 'add_rule'
SG_TEMPLATE_PATH = 'project/access_and_security/security_groups/%s'
SG_DETAIL_TEMPLATE = SG_TEMPLATE_PATH % 'detail.html'
SG_CREATE_TEMPLATE = SG_TEMPLATE_PATH % 'create.html'
SG_UPDATE_TEMPLATE = SG_TEMPLATE_PATH % '_update.html'
def strip_absolute_base(uri):
return uri.split(settings.TESTSERVER, 1)[-1]
class SecurityGroupsViewTests(test.TestCase):
secgroup_backend = 'nova'
def setUp(self):
super(SecurityGroupsViewTests, self).setUp()
sec_group = self.security_groups.first()
self.detail_url = reverse(SG_DETAIL_VIEW, args=[sec_group.id])
self.edit_url = reverse(SG_ADD_RULE_VIEW, args=[sec_group.id])
self.update_url = reverse(SG_UPDATE_VIEW, args=[sec_group.id])
@test.create_stubs({api.network: ('security_group_rule_create',
'security_group_list',
'security_group_backend')})
def _add_security_group_rule_fixture(self, **kwargs):
sec_group = self.security_groups.first()
sec_group_list = self.security_groups.list()
rule = self.security_group_rules.first()
api.network.security_group_backend(
IsA(http.HttpRequest)).AndReturn(self.secgroup_backend)
api.network.security_group_rule_create(
IsA(http.HttpRequest),
kwargs.get('sec_group', sec_group.id),
kwargs.get('ingress', 'ingress'),
kwargs.get('ethertype', 'IPv4'),
kwargs.get('ip_protocol', rule.ip_protocol),
kwargs.get('from_port', int(rule.from_port)),
kwargs.get('to_port', int(rule.to_port)),
kwargs.get('cidr', rule.ip_range['cidr']),
kwargs.get('security_group', u'%s' % sec_group.id)).AndReturn(rule)
api.network.security_group_list(
IsA(http.HttpRequest)).AndReturn(sec_group_list)
return sec_group, rule
@test.create_stubs({api.network: ('security_group_get',)})
def test_update_security_groups_get(self):
sec_group = self.security_groups.first()
api.network.security_group_get(IsA(http.HttpRequest),
sec_group.id).AndReturn(sec_group)
self.mox.ReplayAll()
res = self.client.get(self.update_url)
self.assertTemplateUsed(res, SG_UPDATE_TEMPLATE)
self.assertEqual(res.context['security_group'].name,
sec_group.name)
@test.create_stubs({api.network: ('security_group_update',
'security_group_get')})
def test_update_security_groups_post(self):
"""Ensure that we can change a group name.
The name must not be restricted to alphanumeric characters.
bug #1233501 Security group names cannot contain at characters
bug #1224576 Security group names cannot contain spaces
"""
sec_group = self.security_groups.first()
sec_group.name = "@new name"
api.network.security_group_update(
IsA(http.HttpRequest),
str(sec_group.id),
sec_group.name,
sec_group.description).AndReturn(sec_group)
api.network.security_group_get(
IsA(http.HttpRequest), sec_group.id).AndReturn(sec_group)
self.mox.ReplayAll()
form_data = {'method': 'UpdateGroup',
'id': sec_group.id,
'name': sec_group.name,
'description': sec_group.description}
res = self.client.post(self.update_url, form_data)
self.assertRedirectsNoFollow(res, INDEX_URL)
def test_create_security_groups_get(self):
res = self.client.get(SG_CREATE_URL)
self.assertTemplateUsed(res, SG_CREATE_TEMPLATE)
def test_create_security_groups_post(self):
sec_group = self.security_groups.first()
self._create_security_group(sec_group)
def test_create_security_groups_special_chars(self):
"""Ensure that a group name is not restricted to alphanumeric
characters.
bug #1233501 Security group names cannot contain at characters
bug #1224576 Security group names cannot contain spaces
"""
sec_group = self.security_groups.first()
sec_group.name = '@group name'
self._create_security_group(sec_group)
@test.create_stubs({api.network: ('security_group_create',)})
def _create_security_group(self, sec_group):
api.network.security_group_create(
IsA(http.HttpRequest),
sec_group.name,
sec_group.description).AndReturn(sec_group)
self.mox.ReplayAll()
form_data = {'method': 'CreateGroup',
'name': sec_group.name,
'description': sec_group.description}
res = self.client.post(SG_CREATE_URL, form_data)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.network: ('security_group_create',)})
def test_create_security_groups_post_exception(self):
sec_group = self.security_groups.first()
api.network.security_group_create(
IsA(http.HttpRequest),
sec_group.name,
sec_group.description).AndRaise(self.exceptions.nova)
self.mox.ReplayAll()
formData = {'method': 'CreateGroup',
'name': sec_group.name,
'description': sec_group.description}
res = self.client.post(SG_CREATE_URL, formData)
self.assertMessageCount(error=1)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.network: ('security_group_create',)})
def test_create_security_groups_non_printable(self):
"""Ensure that group names can only contain printable
ASCII characters.
Only 95 characters are allowed: from 0x20 (space) to 0x7E (~).
"""
sec_group = self.security_groups.first()
# 0x7F is a control character (DELETE)
fail_name = sec_group.name + ' \x7F'
self.mox.ReplayAll()
form_data = {'method': 'CreateGroup',
'name': fail_name,
'description': sec_group.description}
res = self.client.post(SG_CREATE_URL, form_data)
self.assertTemplateUsed(res, SG_CREATE_TEMPLATE)
self.assertContains(res, "ASCII")
@test.create_stubs({api.network: ('security_group_get',)})
def test_detail_get(self):
sec_group = self.security_groups.first()
api.network.security_group_get(IsA(http.HttpRequest),
sec_group.id).AndReturn(sec_group)
self.mox.ReplayAll()
res = self.client.get(self.detail_url)
self.assertTemplateUsed(res, SG_DETAIL_TEMPLATE)
@test.create_stubs({api.network: ('security_group_get',)})
def test_detail_get_exception(self):
sec_group = self.security_groups.first()
api.network.security_group_get(
IsA(http.HttpRequest),
sec_group.id).AndRaise(self.exceptions.nova)
self.mox.ReplayAll()
res = self.client.get(self.detail_url)
self.assertRedirectsNoFollow(res, INDEX_URL)
def test_detail_add_rule_cidr(self):
sec_group, rule = self._add_security_group_rule_fixture(
security_group=None)
self.mox.ReplayAll()
formData = {'method': 'AddRule',
'id': sec_group.id,
'port_or_range': 'port',
'port': rule.from_port,
'rule_menu': rule.ip_protocol,
'cidr': rule.ip_range['cidr'],
'remote': 'cidr'}
res = self.client.post(self.edit_url, formData)
self.assertRedirectsNoFollow(res, self.detail_url)
def test_detail_add_rule_cidr_with_invalid_unused_fields(self):
sec_group, rule = self._add_security_group_rule_fixture(
security_group=None)
self.mox.ReplayAll()
formData = {'method': 'AddRule',
'id': sec_group.id,
'port_or_range': 'port',
'port': rule.from_port,
'to_port': 'INVALID',
'from_port': 'INVALID',
'icmp_code': 'INVALID',
'icmp_type': 'INVALID',
'security_group': 'INVALID',
'ip_protocol': 'INVALID',
'rule_menu': rule.ip_protocol,
'cidr': rule.ip_range['cidr'],
'remote': 'cidr'}
res = self.client.post(self.edit_url, formData)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, self.detail_url)
def test_detail_add_rule_securitygroup_with_invalid_unused_fields(self):
sec_group, rule = self._add_security_group_rule_fixture(
cidr=None, ethertype='')
self.mox.ReplayAll()
formData = {'method': 'AddRule',
'id': sec_group.id,
'port_or_range': 'port',
'port': rule.from_port,
'to_port': 'INVALID',
'from_port': 'INVALID',
'icmp_code': 'INVALID',
'icmp_type': 'INVALID',
'security_group': sec_group.id,
'ip_protocol': 'INVALID',
'rule_menu': rule.ip_protocol,
'cidr': 'INVALID',
'remote': 'sg'}
res = self.client.post(self.edit_url, formData)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, self.detail_url)
def test_detail_add_rule_icmp_with_invalid_unused_fields(self):
sec_group, rule = self._add_security_group_rule_fixture(
ip_protocol='icmp', security_group=None)
self.mox.ReplayAll()
formData = {'method': 'AddRule',
'id': sec_group.id,
'port_or_range': 'port',
'port': 'INVALID',
'to_port': 'INVALID',
'from_port': 'INVALID',
'icmp_code': rule.to_port,
'icmp_type': rule.from_port,
'security_group': sec_group.id,
'ip_protocol': 'INVALID',
'rule_menu': 'icmp',
'cidr': rule.ip_range['cidr'],
'remote': 'cidr'}
res = self.client.post(self.edit_url, formData)
self.assertNoFormErrors(res)
self.assertRedirectsNoFollow(res, self.detail_url)
@test.create_stubs({api.network: ('security_group_rule_create',
'security_group_list',
'security_group_backend')})
def test_detail_add_rule_cidr_with_template(self):
sec_group = self.security_groups.first()
sec_group_list = self.security_groups.list()
rule = self.security_group_rules.first()
api.network.security_group_backend(
IsA(http.HttpRequest)).AndReturn(self.secgroup_backend)
api.network.security_group_rule_create(IsA(http.HttpRequest),
sec_group.id,
'ingress', 'IPv4',
rule.ip_protocol,
int(rule.from_port),
int(rule.to_port),
rule.ip_range['cidr'],
None).AndReturn(rule)
api.network.security_group_list(
IsA(http.HttpRequest)).AndReturn(sec_group_list)
self.mox.ReplayAll()
formData = {'method': 'AddRule',
'id': sec_group.id,
'rule_menu': 'http',
'port_or_range': 'port',
'cidr': rule.ip_range['cidr'],
'remote': 'cidr'}
res = self.client.post(self.edit_url, formData)
self.assertRedirectsNoFollow(res, self.detail_url)
def _get_source_group_rule(self):
return self.security_group_rules.get(id=3)
@test.create_stubs({api.network: ('security_group_rule_create',
'security_group_list',
'security_group_backend')})
def test_detail_add_rule_self_as_source_group(self):
sec_group = self.security_groups.first()
sec_group_list = self.security_groups.list()
rule = self._get_source_group_rule()
api.network.security_group_backend(
IsA(http.HttpRequest)).AndReturn(self.secgroup_backend)
api.network.security_group_rule_create(
IsA(http.HttpRequest),
sec_group.id,
'ingress',
# ethertype is empty for source_group of Nova Security Group
'',
rule.ip_protocol,
int(rule.from_port),
int(rule.to_port),
None,
u'%s' % sec_group.id).AndReturn(rule)
api.network.security_group_list(
IsA(http.HttpRequest)).AndReturn(sec_group_list)
self.mox.ReplayAll()
formData = {'method': 'AddRule',
'id': sec_group.id,
'port_or_range': 'port',
'port': rule.from_port,
'rule_menu': rule.ip_protocol,
'cidr': '0.0.0.0/0',
'security_group': sec_group.id,
'remote': 'sg'}
res = self.client.post(self.edit_url, formData)
self.assertRedirectsNoFollow(res, self.detail_url)
@test.create_stubs({api.network: ('security_group_rule_create',
'security_group_list',
'security_group_backend')})
def test_detail_add_rule_self_as_source_group_with_template(self):
sec_group = self.security_groups.first()
sec_group_list = self.security_groups.list()
rule = self._get_source_group_rule()
api.network.security_group_backend(
IsA(http.HttpRequest)).AndReturn(self.secgroup_backend)
api.network.security_group_rule_create(
IsA(http.HttpRequest),
sec_group.id,
'ingress',
# ethertype is empty for source_group of Nova Security Group
'',
rule.ip_protocol,
int(rule.from_port),
int(rule.to_port),
None,
u'%s' % sec_group.id).AndReturn(rule)
api.network.security_group_list(
IsA(http.HttpRequest)).AndReturn(sec_group_list)
self.mox.ReplayAll()
formData = {'method': 'AddRule',
'id': sec_group.id,
'rule_menu': 'http',
'port_or_range': 'port',
'cidr': '0.0.0.0/0',
'security_group': sec_group.id,
'remote': 'sg'}
res = self.client.post(self.edit_url, formData)
self.assertRedirectsNoFollow(res, self.detail_url)
@test.create_stubs({api.network: ('security_group_list',
'security_group_backend')})
def test_detail_invalid_port(self):
sec_group = self.security_groups.first()
sec_group_list = self.security_groups.list()
rule = self.security_group_rules.first()
api.network.security_group_backend(
IsA(http.HttpRequest)).AndReturn(self.secgroup_backend)
api.network.security_group_list(
IsA(http.HttpRequest)).AndReturn(sec_group_list)
self.mox.ReplayAll()
formData = {'method': 'AddRule',
'id': sec_group.id,
'port_or_range': 'port',
'port': None,
'rule_menu': rule.ip_protocol,
'cidr': rule.ip_range['cidr'],
'remote': 'cidr'}
res = self.client.post(self.edit_url, formData)
self.assertNoMessages()
self.assertContains(res, "The specified port is invalid")
@test.create_stubs({api.network: ('security_group_list',
'security_group_backend')})
def test_detail_invalid_port_range(self):
sec_group = self.security_groups.first()
sec_group_list = self.security_groups.list()
rule = self.security_group_rules.first()
for i in range(3):
api.network.security_group_backend(
IsA(http.HttpRequest)).AndReturn(self.secgroup_backend)
api.network.security_group_list(
IsA(http.HttpRequest)).AndReturn(sec_group_list)
self.mox.ReplayAll()
formData = {'method': 'AddRule',
'id': sec_group.id,
'port_or_range': 'range',
'from_port': rule.from_port,
'to_port': int(rule.from_port) - 1,
'rule_menu': rule.ip_protocol,
'cidr': rule.ip_range['cidr'],
'remote': 'cidr'}
res = self.client.post(self.edit_url, formData)
self.assertNoMessages()
self.assertContains(res, "greater than or equal to")
formData = {'method': 'AddRule',
'id': sec_group.id,
'port_or_range': 'range',
'from_port': None,
'to_port': rule.to_port,
'rule_menu': rule.ip_protocol,
'cidr': rule.ip_range['cidr'],
'remote': 'cidr'}
res = self.client.post(self.edit_url, formData)
self.assertNoMessages()
self.assertContains(res, cgi.escape('"from" port number is invalid',
quote=True))
formData = {'method': 'AddRule',
'id': sec_group.id,
'port_or_range': 'range',
'from_port': rule.from_port,
'to_port': None,
'rule_menu': rule.ip_protocol,
'cidr': rule.ip_range['cidr'],
'remote': 'cidr'}
res = self.client.post(self.edit_url, formData)
self.assertNoMessages()
self.assertContains(res, cgi.escape('"to" port number is invalid',
quote=True))
@test.create_stubs({api.network: ('security_group_get',
'security_group_list',
'security_group_backend')})
def test_detail_invalid_icmp_rule(self):
sec_group = self.security_groups.first()
sec_group_list = self.security_groups.list()
icmp_rule = self.security_group_rules.list()[1]
# Call POST 4 times
for i in range(4):
api.network.security_group_backend(
IsA(http.HttpRequest)).AndReturn(self.secgroup_backend)
api.network.security_group_list(
IsA(http.HttpRequest)).AndReturn(sec_group_list)
self.mox.ReplayAll()
formData = {'method': 'AddRule',
'id': sec_group.id,
'port_or_range': 'port',
'icmp_type': 256,
'icmp_code': icmp_rule.to_port,
'rule_menu': icmp_rule.ip_protocol,
'cidr': icmp_rule.ip_range['cidr'],
'remote': 'cidr'}
res = self.client.post(self.edit_url, formData)
self.assertNoMessages()
self.assertContains(res, "The ICMP type not in range (-1, 255)")
formData = {'method': 'AddRule',
'id': sec_group.id,
'port_or_range': 'port',
'icmp_type': icmp_rule.from_port,
'icmp_code': 256,
'rule_menu': icmp_rule.ip_protocol,
'cidr': icmp_rule.ip_range['cidr'],
'remote': 'cidr'}
res = self.client.post(self.edit_url, formData)
self.assertNoMessages()
self.assertContains(res, "The ICMP code not in range (-1, 255)")
formData = {'method': 'AddRule',
'id': sec_group.id,
'port_or_range': 'port',
'icmp_type': icmp_rule.from_port,
'icmp_code': None,
'rule_menu': icmp_rule.ip_protocol,
'cidr': icmp_rule.ip_range['cidr'],
'remote': 'cidr'}
res = self.client.post(self.edit_url, formData)
self.assertNoMessages()
self.assertContains(res, "The ICMP code is invalid")
formData = {'method': 'AddRule',
'id': sec_group.id,
'port_or_range': 'port',
'icmp_type': None,
'icmp_code': icmp_rule.to_port,
'rule_menu': icmp_rule.ip_protocol,
'cidr': icmp_rule.ip_range['cidr'],
'remote': 'cidr'}
res = self.client.post(self.edit_url, formData)
self.assertNoMessages()
self.assertContains(res, "The ICMP type is invalid")
@test.create_stubs({api.network: ('security_group_rule_create',
'security_group_list',
'security_group_backend')})
def test_detail_add_rule_exception(self):
sec_group = self.security_groups.first()
sec_group_list = self.security_groups.list()
rule = self.security_group_rules.first()
api.network.security_group_backend(
IsA(http.HttpRequest)).AndReturn(self.secgroup_backend)
api.network.security_group_rule_create(
IsA(http.HttpRequest),
sec_group.id, 'ingress', 'IPv4',
rule.ip_protocol,
int(rule.from_port),
int(rule.to_port),
rule.ip_range['cidr'],
None).AndRaise(self.exceptions.nova)
api.network.security_group_list(
IsA(http.HttpRequest)).AndReturn(sec_group_list)
self.mox.ReplayAll()
formData = {'method': 'AddRule',
'id': sec_group.id,
'port_or_range': 'port',
'port': rule.from_port,
'rule_menu': rule.ip_protocol,
'cidr': rule.ip_range['cidr'],
'remote': 'cidr'}
res = self.client.post(self.edit_url, formData)
self.assertRedirectsNoFollow(res, self.detail_url)
@test.create_stubs({api.network: ('security_group_rule_delete',)})
def test_detail_delete_rule(self):
sec_group = self.security_groups.first()
rule = self.security_group_rules.first()
api.network.security_group_rule_delete(IsA(http.HttpRequest), rule.id)
self.mox.ReplayAll()
form_data = {"action": "rules__delete__%s" % rule.id}
req = self.factory.post(self.edit_url, form_data)
kwargs = {'security_group_id': sec_group.id}
table = tables.RulesTable(req, sec_group.rules, **kwargs)
handled = table.maybe_handle()
self.assertEqual(strip_absolute_base(handled['location']),
self.detail_url)
@test.create_stubs({api.network: ('security_group_rule_delete',)})
def test_detail_delete_rule_exception(self):
sec_group = self.security_groups.first()
rule = self.security_group_rules.first()
api.network.security_group_rule_delete(
IsA(http.HttpRequest),
rule.id).AndRaise(self.exceptions.nova)
self.mox.ReplayAll()
form_data = {"action": "rules__delete__%s" % rule.id}
req = self.factory.post(self.edit_url, form_data)
kwargs = {'security_group_id': sec_group.id}
table = tables.RulesTable(
req, self.security_group_rules.list(), **kwargs)
handled = table.maybe_handle()
self.assertEqual(strip_absolute_base(handled['location']),
self.detail_url)
@test.create_stubs({api.network: ('security_group_delete',)})
def test_delete_group(self):
sec_group = self.security_groups.get(name="other_group")
api.network.security_group_delete(IsA(http.HttpRequest), sec_group.id)
self.mox.ReplayAll()
form_data = {"action": "security_groups__delete__%s" % sec_group.id}
req = self.factory.post(INDEX_URL, form_data)
table = tables.SecurityGroupsTable(req, self.security_groups.list())
handled = table.maybe_handle()
self.assertEqual(strip_absolute_base(handled['location']),
INDEX_URL)
@test.create_stubs({api.network: ('security_group_delete',)})
def test_delete_group_exception(self):
sec_group = self.security_groups.get(name="other_group")
api.network.security_group_delete(
IsA(http.HttpRequest),
sec_group.id).AndRaise(self.exceptions.nova)
self.mox.ReplayAll()
form_data = {"action": "security_groups__delete__%s" % sec_group.id}
req = self.factory.post(INDEX_URL, form_data)
table = tables.SecurityGroupsTable(req, self.security_groups.list())
handled = table.maybe_handle()
self.assertEqual(strip_absolute_base(handled['location']),
INDEX_URL)
class SecurityGroupsNovaNeutronDriverTests(SecurityGroupsViewTests):
secgroup_backend = 'nova'
def setUp(self):
super(SecurityGroupsNovaNeutronDriverTests, self).setUp()
self._sec_groups_orig = self.security_groups
self.security_groups = self.security_groups_uuid
self._sec_group_rules_orig = self.security_group_rules
self.security_group_rules = self.security_group_rules_uuid
sec_group = self.security_groups.first()
self.detail_url = reverse(SG_DETAIL_VIEW, args=[sec_group.id])
self.edit_url = reverse(SG_ADD_RULE_VIEW, args=[sec_group.id])
self.update_url = reverse(SG_UPDATE_VIEW, args=[sec_group.id])
def tearDown(self):
self.security_groups = self._sec_groups_orig
self.security_group_rules = self._sec_group_rules_orig
super(SecurityGroupsNovaNeutronDriverTests, self).tearDown()
class SecurityGroupsNeutronTests(SecurityGroupsViewTests):
secgroup_backend = 'neutron'
def setUp(self):
super(SecurityGroupsNeutronTests, self).setUp()
self._sec_groups_orig = self.security_groups
self.security_groups = self.q_secgroups
self._sec_group_rules_orig = self.security_group_rules
self.security_group_rules = self.q_secgroup_rules
sec_group = self.security_groups.first()
self.detail_url = reverse(SG_DETAIL_VIEW, args=[sec_group.id])
self.edit_url = reverse(SG_ADD_RULE_VIEW, args=[sec_group.id])
self.update_url = reverse(SG_UPDATE_VIEW, args=[sec_group.id])
def tearDown(self):
self.security_groups = self._sec_groups_orig
self.security_group_rules = self._sec_group_rules_orig
super(SecurityGroupsNeutronTests, self).tearDown()
def _get_source_group_rule(self):
for rule in self.security_group_rules.list():
if rule.group:
return rule
raise Exception("No matches found.")
# Additional tests for Neutron Security Group original features
@test.create_stubs({api.network: ('security_group_rule_create',
'security_group_list',
'security_group_backend')})
def test_detail_add_rule_custom_protocol(self):
sec_group = self.security_groups.first()
sec_group_list = self.security_groups.list()
rule = self.security_group_rules.first()
api.network.security_group_backend(
IsA(http.HttpRequest)).AndReturn(self.secgroup_backend)
api.network.security_group_rule_create(IsA(http.HttpRequest),
sec_group.id, 'ingress', 'IPv6',
37, None, None, 'fe80::/48',
None).AndReturn(rule)
api.network.security_group_list(
IsA(http.HttpRequest)).AndReturn(sec_group_list)
self.mox.ReplayAll()
formData = {'method': 'AddRule',
'id': sec_group.id,
'rule_menu': 'custom',
'direction': 'ingress',
'port_or_range': 'port',
'ip_protocol': 37,
'cidr': 'fe80::/48',
'remote': 'cidr'}
res = self.client.post(self.edit_url, formData)
self.assertRedirectsNoFollow(res, self.detail_url)
@test.create_stubs({api.network: ('security_group_rule_create',
'security_group_list',
'security_group_backend')})
def test_detail_add_rule_egress(self):
sec_group = self.security_groups.first()
sec_group_list = self.security_groups.list()
rule = self.security_group_rules.first()
api.network.security_group_backend(
IsA(http.HttpRequest)).AndReturn(self.secgroup_backend)
api.network.security_group_rule_create(IsA(http.HttpRequest),
sec_group.id, 'egress', 'IPv4',
'udp', 80, 80, '10.1.1.0/24',
None).AndReturn(rule)
api.network.security_group_list(
IsA(http.HttpRequest)).AndReturn(sec_group_list)
self.mox.ReplayAll()
formData = {'method': 'AddRule',
'id': sec_group.id,
'direction': 'egress',
'rule_menu': 'udp',
'port_or_range': 'port',
'port': 80,
'cidr': '10.1.1.0/24',
'remote': 'cidr'}
res = self.client.post(self.edit_url, formData)
self.assertRedirectsNoFollow(res, self.detail_url)
@test.create_stubs({api.network: ('security_group_rule_create',
'security_group_list',
'security_group_backend')})
def test_detail_add_rule_egress_with_all_tcp(self):
sec_group = self.security_groups.first()
sec_group_list = self.security_groups.list()
rule = self.security_group_rules.list()[3]
api.network.security_group_backend(
IsA(http.HttpRequest)).AndReturn(self.secgroup_backend)
api.network.security_group_rule_create(IsA(http.HttpRequest),
sec_group.id, 'egress', 'IPv4',
rule.ip_protocol,
int(rule.from_port),
int(rule.to_port),
rule.ip_range['cidr'],
None).AndReturn(rule)
api.network.security_group_list(
IsA(http.HttpRequest)).AndReturn(sec_group_list)
self.mox.ReplayAll()
formData = {'method': 'AddRule',
'id': sec_group.id,
'direction': 'egress',
'port_or_range': 'range',
'rule_menu': 'all_tcp',
'cidr': rule.ip_range['cidr'],
'remote': 'cidr'}
res = self.client.post(self.edit_url, formData)
self.assertRedirectsNoFollow(res, self.detail_url)
@test.create_stubs({api.network: ('security_group_rule_create',
'security_group_list',
'security_group_backend')})
def test_detail_add_rule_source_group_with_direction_ethertype(self):
sec_group = self.security_groups.first()
sec_group_list = self.security_groups.list()
rule = self._get_source_group_rule()
api.network.security_group_backend(
IsA(http.HttpRequest)).AndReturn(self.secgroup_backend)
api.network.security_group_rule_create(
IsA(http.HttpRequest),
sec_group.id,
'egress',
# ethertype is empty for source_group of Nova Security Group
'IPv6',
rule.ip_protocol,
int(rule.from_port),
int(rule.to_port),
None,
u'%s' % sec_group.id).AndReturn(rule)
api.network.security_group_list(
IsA(http.HttpRequest)).AndReturn(sec_group_list)
self.mox.ReplayAll()
formData = {'method': 'AddRule',
'id': sec_group.id,
'direction': 'egress',
'port_or_range': 'port',
'port': rule.from_port,
'rule_menu': rule.ip_protocol,
'cidr': '0.0.0.0/0',
'security_group': sec_group.id,
'remote': 'sg',
'ethertype': 'IPv6'}
res = self.client.post(self.edit_url, formData)
self.assertRedirectsNoFollow(res, self.detail_url)
| apache-2.0 |
dieterich-lab/DCC | DCC/circFilter.py | 1 | 5910 | import numpy as np
import os
import sys
import HTSeq
from IntervalTree import IntervalTree
##########################
# Input of this script #
##########################
# This script input a count table:
# chr start end junctiontype count1 count2 ... countn
# and a repeatitive region file in gtf format
# specify minimum circular RNA length
class Circfilter(object):
def __init__(self, length, countthreshold, replicatethreshold, tmp_dir):
'''
counttable: the circular RNA count file, typically generated by findcircRNA.py: chr start end junctiontype count1 count2 ... countn
rep_file: the gtf file to specify the region of repeatitive reagion of analyzed genome
length: the minimum length of circular RNAs
countthreshold: the minimum expression level of junction type 1 circular RNAs
'''
# self.counttable = counttable
# self.rep_file = rep_file
self.length = int(length)
# self.level0 = int(level0)
self.countthreshold = int(countthreshold)
# self.threshold0 = int(threshold0)
self.replicatethreshold = int(replicatethreshold)
self.tmp_dir = tmp_dir
# Read circRNA count and coordinates information to numpy array
def readcirc(self, countfile, coordinates):
# Read the circRNA count file
circ = open(countfile, 'r')
coor = open(coordinates, 'r')
count = []
indx = []
for line in circ:
fields = line.split('\t')
# row_indx = [str(itm) for itm in fields[0:4]]
# print row_indx
try:
row_count = [int(itm) for itm in fields[4:]]
except ValueError:
row_count = [float(itm) for itm in fields[4:]]
count.append(row_count)
# indx.append(row_indx)
for line in coor:
fields = line.split('\t')
row_indx = [str(itm).strip() for itm in fields[0:6]]
indx.append(row_indx)
count = np.array(count)
indx = np.array(indx)
circ.close()
return count, indx
# Do filtering
def filtercount(self, count, indx):
print 'Filtering by read counts'
sel = [] # store the passed filtering rows
for itm in range(len(count)):
if indx[itm][4] == '0':
# if sum( count[itm] >= self.level0 ) >= self.threshold0:
# sel.append(itm)
pass
elif indx[itm][4] != '0':
if sum(count[itm] >= self.countthreshold) >= self.replicatethreshold:
sel.append(itm)
# splicing the passed filtering rows
if len(sel) == 0:
sys.exit("No circRNA passed the expression threshold filtering.")
return count[sel], indx[sel]
def read_rep_region(self, regionfile):
regions = HTSeq.GFF_Reader(regionfile, end_included=True)
rep_tree = IntervalTree()
for feature in regions:
iv = feature.iv
rep_tree.insert(iv, annotation='.')
return rep_tree
def filter_nonrep(self, regionfile, indx0, count0):
if not regionfile is None:
rep_tree = self.read_rep_region(regionfile)
def numpy_array_2_GenomiInterval(array):
left = HTSeq.GenomicInterval(str(array[0]), int(array[1]), int(array[1]) + self.length, str(array[5]))
right = HTSeq.GenomicInterval(str(array[0]), int(array[2]) - self.length, int(array[2]), str(array[5]))
return left, right
keep_index = []
for i, j in enumerate(indx0):
out = []
left, right = numpy_array_2_GenomiInterval(j)
rep_tree.intersect(left, lambda x: out.append(x))
rep_tree.intersect(right, lambda x: out.append(x))
if not out:
# not in repetitive region
keep_index.append(i)
indx0 = indx0[keep_index]
count0 = count0[keep_index]
nonrep = np.column_stack((indx0, count0))
# write the result
np.savetxt(self.tmp_dir + 'tmp_unsortedWithChrM', nonrep, delimiter='\t', newline='\n', fmt='%s')
def dummy_filter(self, indx0, count0):
nonrep = np.column_stack((indx0, count0))
# write the result
np.savetxt(self.tmp_dir + 'tmp_unsortedWithChrM', nonrep, delimiter='\t', newline='\n', fmt='%s')
def removeChrM(self, withChrM):
print 'Remove ChrM'
unremoved = open(withChrM, 'r').readlines()
removed = []
for lines in unremoved:
if not lines.startswith('chrM') and not lines.startswith('MT'):
removed.append(lines)
removedfile = open(self.tmp_dir + 'tmp_unsortedNoChrM', 'w')
removedfile.writelines(removed)
removedfile.close()
def sortOutput(self, unsorted, outCount, outCoordinates, samplelist=None):
# Sample list is a string with sample names seperated by \t.
# Split used to split if coordinates information and count information are integrated
count = open(outCount, 'w')
coor = open(outCoordinates, 'w')
if samplelist:
count.write('Chr\tStart\tEnd\t' + samplelist + '\n')
lines = open(unsorted).readlines()
for line in lines:
linesplit = [x.strip() for x in line.split('\t')]
count.write('\t'.join(linesplit[0:3] + list(linesplit[6:])) + '\n')
coor.write('\t'.join(linesplit[0:6]) + '\n')
coor.close()
count.close()
def remove_tmp(self):
try:
os.remove(self.tmp_dir + 'tmp_left')
os.remove(self.tmp_dir + 'tmp_right')
os.remove(self.tmp_dir + 'tmp_unsortedWithChrM')
os.remove(self.tmp_dir + 'tmp_unsortedNoChrM')
except OSError:
pass
| gpl-3.0 |
gx1997/chrome-loongson | net/tools/testserver/chromiumsync.py | 9 | 50327 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""An implementation of the server side of the Chromium sync protocol.
The details of the protocol are described mostly by comments in the protocol
buffer definition at chrome/browser/sync/protocol/sync.proto.
"""
import cgi
import copy
import operator
import pickle
import random
import sys
import threading
import time
import urlparse
import app_notification_specifics_pb2
import app_setting_specifics_pb2
import app_specifics_pb2
import autofill_specifics_pb2
import bookmark_specifics_pb2
import get_updates_caller_info_pb2
import extension_setting_specifics_pb2
import extension_specifics_pb2
import nigori_specifics_pb2
import password_specifics_pb2
import preference_specifics_pb2
import search_engine_specifics_pb2
import session_specifics_pb2
import sync_pb2
import sync_enums_pb2
import theme_specifics_pb2
import typed_url_specifics_pb2
# An enumeration of the various kinds of data that can be synced.
# Over the wire, this enumeration is not used: a sync object's type is
# inferred by which EntitySpecifics field it has. But in the context
# of a program, it is useful to have an enumeration.
ALL_TYPES = (
TOP_LEVEL, # The type of the 'Google Chrome' folder.
APPS,
APP_NOTIFICATION,
APP_SETTINGS,
AUTOFILL,
AUTOFILL_PROFILE,
BOOKMARK,
EXTENSIONS,
NIGORI,
PASSWORD,
PREFERENCE,
SEARCH_ENGINE,
SESSION,
THEME,
TYPED_URL,
EXTENSION_SETTINGS) = range(16)
# An eumeration on the frequency at which the server should send errors
# to the client. This would be specified by the url that triggers the error.
# Note: This enum should be kept in the same order as the enum in sync_test.h.
SYNC_ERROR_FREQUENCY = (
ERROR_FREQUENCY_NONE,
ERROR_FREQUENCY_ALWAYS,
ERROR_FREQUENCY_TWO_THIRDS) = range(3)
# Well-known server tag of the top level 'Google Chrome' folder.
TOP_LEVEL_FOLDER_TAG = 'google_chrome'
# Given a sync type from ALL_TYPES, find the FieldDescriptor corresponding
# to that datatype. Note that TOP_LEVEL has no such token.
SYNC_TYPE_FIELDS = sync_pb2.EntitySpecifics.DESCRIPTOR.fields_by_name
SYNC_TYPE_TO_DESCRIPTOR = {
APP_NOTIFICATION: SYNC_TYPE_FIELDS['app_notification'],
APP_SETTINGS: SYNC_TYPE_FIELDS['app_setting'],
APPS: SYNC_TYPE_FIELDS['app'],
AUTOFILL: SYNC_TYPE_FIELDS['autofill'],
AUTOFILL_PROFILE: SYNC_TYPE_FIELDS['autofill_profile'],
BOOKMARK: SYNC_TYPE_FIELDS['bookmark'],
EXTENSION_SETTINGS: SYNC_TYPE_FIELDS['extension_setting'],
EXTENSIONS: SYNC_TYPE_FIELDS['extension'],
NIGORI: SYNC_TYPE_FIELDS['nigori'],
PASSWORD: SYNC_TYPE_FIELDS['password'],
PREFERENCE: SYNC_TYPE_FIELDS['preference'],
SEARCH_ENGINE: SYNC_TYPE_FIELDS['search_engine'],
SESSION: SYNC_TYPE_FIELDS['session'],
THEME: SYNC_TYPE_FIELDS['theme'],
TYPED_URL: SYNC_TYPE_FIELDS['typed_url'],
}
# The parent ID used to indicate a top-level node.
ROOT_ID = '0'
# Unix time epoch in struct_time format. The tuple corresponds to UTC Wednesday
# Jan 1 1970, 00:00:00, non-dst.
UNIX_TIME_EPOCH = (1970, 1, 1, 0, 0, 0, 3, 1, 0)
class Error(Exception):
"""Error class for this module."""
class ProtobufDataTypeFieldNotUnique(Error):
"""An entry should not have more than one data type present."""
class DataTypeIdNotRecognized(Error):
"""The requested data type is not recognized."""
class MigrationDoneError(Error):
"""A server-side migration occurred; clients must re-sync some datatypes.
Attributes:
datatypes: a list of the datatypes (python enum) needing migration.
"""
def __init__(self, datatypes):
self.datatypes = datatypes
class StoreBirthdayError(Error):
"""The client sent a birthday that doesn't correspond to this server."""
class TransientError(Error):
"""The client would be sent a transient error."""
class SyncInducedError(Error):
"""The client would be sent an error."""
class InducedErrorFrequencyNotDefined(Error):
"""The error frequency defined is not handled."""
def GetEntryType(entry):
"""Extract the sync type from a SyncEntry.
Args:
entry: A SyncEntity protobuf object whose type to determine.
Returns:
An enum value from ALL_TYPES if the entry's type can be determined, or None
if the type cannot be determined.
Raises:
ProtobufDataTypeFieldNotUnique: More than one type was indicated by
the entry.
"""
if entry.server_defined_unique_tag == TOP_LEVEL_FOLDER_TAG:
return TOP_LEVEL
entry_types = GetEntryTypesFromSpecifics(entry.specifics)
if not entry_types:
return None
# If there is more than one, either there's a bug, or else the caller
# should use GetEntryTypes.
if len(entry_types) > 1:
raise ProtobufDataTypeFieldNotUnique
return entry_types[0]
def GetEntryTypesFromSpecifics(specifics):
"""Determine the sync types indicated by an EntitySpecifics's field(s).
If the specifics have more than one recognized data type field (as commonly
happens with the requested_types field of GetUpdatesMessage), all types
will be returned. Callers must handle the possibility of the returned
value having more than one item.
Args:
specifics: A EntitySpecifics protobuf message whose extensions to
enumerate.
Returns:
A list of the sync types (values from ALL_TYPES) associated with each
recognized extension of the specifics message.
"""
return [data_type for data_type, field_descriptor
in SYNC_TYPE_TO_DESCRIPTOR.iteritems()
if specifics.HasField(field_descriptor.name)]
def SyncTypeToProtocolDataTypeId(data_type):
"""Convert from a sync type (python enum) to the protocol's data type id."""
return SYNC_TYPE_TO_DESCRIPTOR[data_type].number
def ProtocolDataTypeIdToSyncType(protocol_data_type_id):
"""Convert from the protocol's data type id to a sync type (python enum)."""
for data_type, field_descriptor in SYNC_TYPE_TO_DESCRIPTOR.iteritems():
if field_descriptor.number == protocol_data_type_id:
return data_type
raise DataTypeIdNotRecognized
def DataTypeStringToSyncTypeLoose(data_type_string):
"""Converts a human-readable string to a sync type (python enum).
Capitalization and pluralization don't matter; this function is appropriate
for values that might have been typed by a human being; e.g., command-line
flags or query parameters.
"""
if data_type_string.isdigit():
return ProtocolDataTypeIdToSyncType(int(data_type_string))
name = data_type_string.lower().rstrip('s')
for data_type, field_descriptor in SYNC_TYPE_TO_DESCRIPTOR.iteritems():
if field_descriptor.name.lower().rstrip('s') == name:
return data_type
raise DataTypeIdNotRecognized
def SyncTypeToString(data_type):
"""Formats a sync type enum (from ALL_TYPES) to a human-readable string."""
return SYNC_TYPE_TO_DESCRIPTOR[data_type].name
def CallerInfoToString(caller_info_source):
"""Formats a GetUpdatesSource enum value to a readable string."""
return get_updates_caller_info_pb2.GetUpdatesCallerInfo \
.DESCRIPTOR.enum_types_by_name['GetUpdatesSource'] \
.values_by_number[caller_info_source].name
def ShortDatatypeListSummary(data_types):
"""Formats compactly a list of sync types (python enums) for human eyes.
This function is intended for use by logging. If the list of datatypes
contains almost all of the values, the return value will be expressed
in terms of the datatypes that aren't set.
"""
included = set(data_types) - set([TOP_LEVEL])
if not included:
return 'nothing'
excluded = set(ALL_TYPES) - included - set([TOP_LEVEL])
if not excluded:
return 'everything'
simple_text = '+'.join(sorted([SyncTypeToString(x) for x in included]))
all_but_text = 'all except %s' % (
'+'.join(sorted([SyncTypeToString(x) for x in excluded])))
if len(included) < len(excluded) or len(simple_text) <= len(all_but_text):
return simple_text
else:
return all_but_text
def GetDefaultEntitySpecifics(data_type):
"""Get an EntitySpecifics having a sync type's default field value."""
specifics = sync_pb2.EntitySpecifics()
if data_type in SYNC_TYPE_TO_DESCRIPTOR:
descriptor = SYNC_TYPE_TO_DESCRIPTOR[data_type]
getattr(specifics, descriptor.name).SetInParent()
return specifics
class PermanentItem(object):
"""A specification of one server-created permanent item.
Attributes:
tag: A known-to-the-client value that uniquely identifies a server-created
permanent item.
name: The human-readable display name for this item.
parent_tag: The tag of the permanent item's parent. If ROOT_ID, indicates
a top-level item. Otherwise, this must be the tag value of some other
server-created permanent item.
sync_type: A value from ALL_TYPES, giving the datatype of this permanent
item. This controls which types of client GetUpdates requests will
cause the permanent item to be created and returned.
create_by_default: Whether the permanent item is created at startup or not.
This value is set to True in the default case. Non-default permanent items
are those that are created only when a client explicitly tells the server
to do so.
"""
def __init__(self, tag, name, parent_tag, sync_type, create_by_default=True):
self.tag = tag
self.name = name
self.parent_tag = parent_tag
self.sync_type = sync_type
self.create_by_default = create_by_default
class MigrationHistory(object):
"""A record of the migration events associated with an account.
Each migration event invalidates one or more datatypes on all clients
that had synced the datatype before the event. Such clients will continue
to receive MigrationDone errors until they throw away their progress and
re-sync that datatype from the beginning.
"""
def __init__(self):
self._migrations = {}
for datatype in ALL_TYPES:
self._migrations[datatype] = [1]
self._next_migration_version = 2
def GetLatestVersion(self, datatype):
return self._migrations[datatype][-1]
def CheckAllCurrent(self, versions_map):
"""Raises an error if any the provided versions are out of date.
This function intentionally returns migrations in the order that they were
triggered. Doing it this way allows the client to queue up two migrations
in a row, so the second one is received while responding to the first.
Arguments:
version_map: a map whose keys are datatypes and whose values are versions.
Raises:
MigrationDoneError: if a mismatch is found.
"""
problems = {}
for datatype, client_migration in versions_map.iteritems():
for server_migration in self._migrations[datatype]:
if client_migration < server_migration:
problems.setdefault(server_migration, []).append(datatype)
if problems:
raise MigrationDoneError(problems[min(problems.keys())])
def Bump(self, datatypes):
"""Add a record of a migration, to cause errors on future requests."""
for idx, datatype in enumerate(datatypes):
self._migrations[datatype].append(self._next_migration_version)
self._next_migration_version += 1
class UpdateSieve(object):
"""A filter to remove items the client has already seen."""
def __init__(self, request, migration_history=None):
self._original_request = request
self._state = {}
self._migration_history = migration_history or MigrationHistory()
self._migration_versions_to_check = {}
if request.from_progress_marker:
for marker in request.from_progress_marker:
data_type = ProtocolDataTypeIdToSyncType(marker.data_type_id)
if marker.HasField('timestamp_token_for_migration'):
timestamp = marker.timestamp_token_for_migration
if timestamp:
self._migration_versions_to_check[data_type] = 1
elif marker.token:
(timestamp, version) = pickle.loads(marker.token)
self._migration_versions_to_check[data_type] = version
elif marker.HasField('token'):
timestamp = 0
else:
raise ValueError('No timestamp information in progress marker.')
data_type = ProtocolDataTypeIdToSyncType(marker.data_type_id)
self._state[data_type] = timestamp
elif request.HasField('from_timestamp'):
for data_type in GetEntryTypesFromSpecifics(request.requested_types):
self._state[data_type] = request.from_timestamp
self._migration_versions_to_check[data_type] = 1
if self._state:
self._state[TOP_LEVEL] = min(self._state.itervalues())
def SummarizeRequest(self):
timestamps = {}
for data_type, timestamp in self._state.iteritems():
if data_type == TOP_LEVEL:
continue
timestamps.setdefault(timestamp, []).append(data_type)
return ', '.join('<%s>@%d' % (ShortDatatypeListSummary(types), stamp)
for stamp, types in sorted(timestamps.iteritems()))
def CheckMigrationState(self):
self._migration_history.CheckAllCurrent(self._migration_versions_to_check)
def ClientWantsItem(self, item):
"""Return true if the client hasn't already seen an item."""
return self._state.get(GetEntryType(item), sys.maxint) < item.version
def HasAnyTimestamp(self):
"""Return true if at least one datatype was requested."""
return bool(self._state)
def GetMinTimestamp(self):
"""Return true the smallest timestamp requested across all datatypes."""
return min(self._state.itervalues())
def GetFirstTimeTypes(self):
"""Return a list of datatypes requesting updates from timestamp zero."""
return [datatype for datatype, timestamp in self._state.iteritems()
if timestamp == 0]
def SaveProgress(self, new_timestamp, get_updates_response):
"""Write the new_timestamp or new_progress_marker fields to a response."""
if self._original_request.from_progress_marker:
for data_type, old_timestamp in self._state.iteritems():
if data_type == TOP_LEVEL:
continue
new_marker = sync_pb2.DataTypeProgressMarker()
new_marker.data_type_id = SyncTypeToProtocolDataTypeId(data_type)
final_stamp = max(old_timestamp, new_timestamp)
final_migration = self._migration_history.GetLatestVersion(data_type)
new_marker.token = pickle.dumps((final_stamp, final_migration))
if new_marker not in self._original_request.from_progress_marker:
get_updates_response.new_progress_marker.add().MergeFrom(new_marker)
elif self._original_request.HasField('from_timestamp'):
if self._original_request.from_timestamp < new_timestamp:
get_updates_response.new_timestamp = new_timestamp
class SyncDataModel(object):
"""Models the account state of one sync user."""
_BATCH_SIZE = 100
# Specify all the permanent items that a model might need.
_PERMANENT_ITEM_SPECS = [
PermanentItem('google_chrome', name='Google Chrome',
parent_tag=ROOT_ID, sync_type=TOP_LEVEL),
PermanentItem('google_chrome_bookmarks', name='Bookmarks',
parent_tag='google_chrome', sync_type=BOOKMARK),
PermanentItem('bookmark_bar', name='Bookmark Bar',
parent_tag='google_chrome_bookmarks', sync_type=BOOKMARK),
PermanentItem('other_bookmarks', name='Other Bookmarks',
parent_tag='google_chrome_bookmarks', sync_type=BOOKMARK),
PermanentItem('synced_bookmarks', name='Synced Bookmarks',
parent_tag='google_chrome_bookmarks', sync_type=BOOKMARK,
create_by_default=False),
PermanentItem('google_chrome_preferences', name='Preferences',
parent_tag='google_chrome', sync_type=PREFERENCE),
PermanentItem('google_chrome_autofill', name='Autofill',
parent_tag='google_chrome', sync_type=AUTOFILL),
PermanentItem('google_chrome_autofill_profiles', name='Autofill Profiles',
parent_tag='google_chrome', sync_type=AUTOFILL_PROFILE),
PermanentItem('google_chrome_app_settings',
name='App Settings',
parent_tag='google_chrome', sync_type=APP_SETTINGS),
PermanentItem('google_chrome_extension_settings',
name='Extension Settings',
parent_tag='google_chrome', sync_type=EXTENSION_SETTINGS),
PermanentItem('google_chrome_extensions', name='Extensions',
parent_tag='google_chrome', sync_type=EXTENSIONS),
PermanentItem('google_chrome_passwords', name='Passwords',
parent_tag='google_chrome', sync_type=PASSWORD),
PermanentItem('google_chrome_search_engines', name='Search Engines',
parent_tag='google_chrome', sync_type=SEARCH_ENGINE),
PermanentItem('google_chrome_sessions', name='Sessions',
parent_tag='google_chrome', sync_type=SESSION),
PermanentItem('google_chrome_themes', name='Themes',
parent_tag='google_chrome', sync_type=THEME),
PermanentItem('google_chrome_typed_urls', name='Typed URLs',
parent_tag='google_chrome', sync_type=TYPED_URL),
PermanentItem('google_chrome_nigori', name='Nigori',
parent_tag='google_chrome', sync_type=NIGORI),
PermanentItem('google_chrome_apps', name='Apps',
parent_tag='google_chrome', sync_type=APPS),
PermanentItem('google_chrome_app_notifications', name='App Notifications',
parent_tag='google_chrome', sync_type=APP_NOTIFICATION),
]
def __init__(self):
# Monotonically increasing version number. The next object change will
# take on this value + 1.
self._version = 0
# The definitive copy of this client's items: a map from ID string to a
# SyncEntity protocol buffer.
self._entries = {}
self.ResetStoreBirthday()
self.migration_history = MigrationHistory()
self.induced_error = sync_pb2.ClientToServerResponse.Error()
self.induced_error_frequency = 0
self.sync_count_before_errors = 0
def _SaveEntry(self, entry):
"""Insert or update an entry in the change log, and give it a new version.
The ID fields of this entry are assumed to be valid server IDs. This
entry will be updated with a new version number and sync_timestamp.
Args:
entry: The entry to be added or updated.
"""
self._version += 1
# Maintain a global (rather than per-item) sequence number and use it
# both as the per-entry version as well as the update-progress timestamp.
# This simulates the behavior of the original server implementation.
entry.version = self._version
entry.sync_timestamp = self._version
# Preserve the originator info, which the client is not required to send
# when updating.
base_entry = self._entries.get(entry.id_string)
if base_entry:
entry.originator_cache_guid = base_entry.originator_cache_guid
entry.originator_client_item_id = base_entry.originator_client_item_id
self._entries[entry.id_string] = copy.deepcopy(entry)
def _ServerTagToId(self, tag):
"""Determine the server ID from a server-unique tag.
The resulting value is guaranteed not to collide with the other ID
generation methods.
Args:
datatype: The sync type (python enum) of the identified object.
tag: The unique, known-to-the-client tag of a server-generated item.
Returns:
The string value of the computed server ID.
"""
if not tag or tag == ROOT_ID:
return tag
spec = [x for x in self._PERMANENT_ITEM_SPECS if x.tag == tag][0]
return self._MakeCurrentId(spec.sync_type, '<server tag>%s' % tag)
def _ClientTagToId(self, datatype, tag):
"""Determine the server ID from a client-unique tag.
The resulting value is guaranteed not to collide with the other ID
generation methods.
Args:
datatype: The sync type (python enum) of the identified object.
tag: The unique, opaque-to-the-server tag of a client-tagged item.
Returns:
The string value of the computed server ID.
"""
return self._MakeCurrentId(datatype, '<client tag>%s' % tag)
def _ClientIdToId(self, datatype, client_guid, client_item_id):
"""Compute a unique server ID from a client-local ID tag.
The resulting value is guaranteed not to collide with the other ID
generation methods.
Args:
datatype: The sync type (python enum) of the identified object.
client_guid: A globally unique ID that identifies the client which
created this item.
client_item_id: An ID that uniquely identifies this item on the client
which created it.
Returns:
The string value of the computed server ID.
"""
# Using the client ID info is not required here (we could instead generate
# a random ID), but it's useful for debugging.
return self._MakeCurrentId(datatype,
'<server ID originally>%s/%s' % (client_guid, client_item_id))
def _MakeCurrentId(self, datatype, inner_id):
return '%d^%d^%s' % (datatype,
self.migration_history.GetLatestVersion(datatype),
inner_id)
def _ExtractIdInfo(self, id_string):
if not id_string or id_string == ROOT_ID:
return None
datatype_string, separator, remainder = id_string.partition('^')
migration_version_string, separator, inner_id = remainder.partition('^')
return (int(datatype_string), int(migration_version_string), inner_id)
def _WritePosition(self, entry, parent_id):
"""Ensure the entry has an absolute, numeric position and parent_id.
Historically, clients would specify positions using the predecessor-based
references in the insert_after_item_id field; starting July 2011, this
was changed and Chrome now sends up the absolute position. The server
must store a position_in_parent value and must not maintain
insert_after_item_id.
Args:
entry: The entry for which to write a position. Its ID field are
assumed to be server IDs. This entry will have its parent_id_string
and position_in_parent fields updated; its insert_after_item_id field
will be cleared.
parent_id: The ID of the entry intended as the new parent.
"""
entry.parent_id_string = parent_id
if not entry.HasField('position_in_parent'):
entry.position_in_parent = 1337 # A debuggable, distinctive default.
entry.ClearField('insert_after_item_id')
def _ItemExists(self, id_string):
"""Determine whether an item exists in the changelog."""
return id_string in self._entries
def _CreatePermanentItem(self, spec):
"""Create one permanent item from its spec, if it doesn't exist.
The resulting item is added to the changelog.
Args:
spec: A PermanentItem object holding the properties of the item to create.
"""
id_string = self._ServerTagToId(spec.tag)
if self._ItemExists(id_string):
return
print 'Creating permanent item: %s' % spec.name
entry = sync_pb2.SyncEntity()
entry.id_string = id_string
entry.non_unique_name = spec.name
entry.name = spec.name
entry.server_defined_unique_tag = spec.tag
entry.folder = True
entry.deleted = False
entry.specifics.CopyFrom(GetDefaultEntitySpecifics(spec.sync_type))
self._WritePosition(entry, self._ServerTagToId(spec.parent_tag))
self._SaveEntry(entry)
def _CreateDefaultPermanentItems(self, requested_types):
"""Ensure creation of all default permanent items for a given set of types.
Args:
requested_types: A list of sync data types from ALL_TYPES.
All default permanent items of only these types will be created.
"""
for spec in self._PERMANENT_ITEM_SPECS:
if spec.sync_type in requested_types and spec.create_by_default:
self._CreatePermanentItem(spec)
def ResetStoreBirthday(self):
"""Resets the store birthday to a random value."""
# TODO(nick): uuid.uuid1() is better, but python 2.5 only.
self.store_birthday = '%0.30f' % random.random()
def StoreBirthday(self):
"""Gets the store birthday."""
return self.store_birthday
def GetChanges(self, sieve):
"""Get entries which have changed, oldest first.
The returned entries are limited to being _BATCH_SIZE many. The entries
are returned in strict version order.
Args:
sieve: An update sieve to use to filter out updates the client
has already seen.
Returns:
A tuple of (version, entries, changes_remaining). Version is a new
timestamp value, which should be used as the starting point for the
next query. Entries is the batch of entries meeting the current
timestamp query. Changes_remaining indicates the number of changes
left on the server after this batch.
"""
if not sieve.HasAnyTimestamp():
return (0, [], 0)
min_timestamp = sieve.GetMinTimestamp()
self._CreateDefaultPermanentItems(sieve.GetFirstTimeTypes())
change_log = sorted(self._entries.values(),
key=operator.attrgetter('version'))
new_changes = [x for x in change_log if x.version > min_timestamp]
# Pick batch_size new changes, and then filter them. This matches
# the RPC behavior of the production sync server.
batch = new_changes[:self._BATCH_SIZE]
if not batch:
# Client is up to date.
return (min_timestamp, [], 0)
# Restrict batch to requested types. Tombstones are untyped
# and will always get included.
filtered = [copy.deepcopy(item) for item in batch
if item.deleted or sieve.ClientWantsItem(item)]
# The new client timestamp is the timestamp of the last item in the
# batch, even if that item was filtered out.
return (batch[-1].version, filtered, len(new_changes) - len(batch))
def _CopyOverImmutableFields(self, entry):
"""Preserve immutable fields by copying pre-commit state.
Args:
entry: A sync entity from the client.
"""
if entry.id_string in self._entries:
if self._entries[entry.id_string].HasField(
'server_defined_unique_tag'):
entry.server_defined_unique_tag = (
self._entries[entry.id_string].server_defined_unique_tag)
def _CheckVersionForCommit(self, entry):
"""Perform an optimistic concurrency check on the version number.
Clients are only allowed to commit if they report having seen the most
recent version of an object.
Args:
entry: A sync entity from the client. It is assumed that ID fields
have been converted to server IDs.
Returns:
A boolean value indicating whether the client's version matches the
newest server version for the given entry.
"""
if entry.id_string in self._entries:
# Allow edits/deletes if the version matches, and any undeletion.
return (self._entries[entry.id_string].version == entry.version or
self._entries[entry.id_string].deleted)
else:
# Allow unknown ID only if the client thinks it's new too.
return entry.version == 0
def _CheckParentIdForCommit(self, entry):
"""Check that the parent ID referenced in a SyncEntity actually exists.
Args:
entry: A sync entity from the client. It is assumed that ID fields
have been converted to server IDs.
Returns:
A boolean value indicating whether the entity's parent ID is an object
that actually exists (and is not deleted) in the current account state.
"""
if entry.parent_id_string == ROOT_ID:
# This is generally allowed.
return True
if entry.parent_id_string not in self._entries:
print 'Warning: Client sent unknown ID. Should never happen.'
return False
if entry.parent_id_string == entry.id_string:
print 'Warning: Client sent circular reference. Should never happen.'
return False
if self._entries[entry.parent_id_string].deleted:
# This can happen in a race condition between two clients.
return False
if not self._entries[entry.parent_id_string].folder:
print 'Warning: Client sent non-folder parent. Should never happen.'
return False
return True
def _RewriteIdsAsServerIds(self, entry, cache_guid, commit_session):
"""Convert ID fields in a client sync entry to server IDs.
A commit batch sent by a client may contain new items for which the
server has not generated IDs yet. And within a commit batch, later
items are allowed to refer to earlier items. This method will
generate server IDs for new items, as well as rewrite references
to items whose server IDs were generated earlier in the batch.
Args:
entry: The client sync entry to modify.
cache_guid: The globally unique ID of the client that sent this
commit request.
commit_session: A dictionary mapping the original IDs to the new server
IDs, for any items committed earlier in the batch.
"""
if entry.version == 0:
data_type = GetEntryType(entry)
if entry.HasField('client_defined_unique_tag'):
# When present, this should determine the item's ID.
new_id = self._ClientTagToId(data_type, entry.client_defined_unique_tag)
else:
new_id = self._ClientIdToId(data_type, cache_guid, entry.id_string)
entry.originator_cache_guid = cache_guid
entry.originator_client_item_id = entry.id_string
commit_session[entry.id_string] = new_id # Remember the remapping.
entry.id_string = new_id
if entry.parent_id_string in commit_session:
entry.parent_id_string = commit_session[entry.parent_id_string]
if entry.insert_after_item_id in commit_session:
entry.insert_after_item_id = commit_session[entry.insert_after_item_id]
def ValidateCommitEntries(self, entries):
"""Raise an exception if a commit batch contains any global errors.
Arguments:
entries: an iterable containing commit-form SyncEntity protocol buffers.
Raises:
MigrationDoneError: if any of the entries reference a recently-migrated
datatype.
"""
server_ids_in_commit = set()
local_ids_in_commit = set()
for entry in entries:
if entry.version:
server_ids_in_commit.add(entry.id_string)
else:
local_ids_in_commit.add(entry.id_string)
if entry.HasField('parent_id_string'):
if entry.parent_id_string not in local_ids_in_commit:
server_ids_in_commit.add(entry.parent_id_string)
versions_present = {}
for server_id in server_ids_in_commit:
parsed = self._ExtractIdInfo(server_id)
if parsed:
datatype, version, _ = parsed
versions_present.setdefault(datatype, []).append(version)
self.migration_history.CheckAllCurrent(
dict((k, min(v)) for k, v in versions_present.iteritems()))
def CommitEntry(self, entry, cache_guid, commit_session):
"""Attempt to commit one entry to the user's account.
Args:
entry: A SyncEntity protobuf representing desired object changes.
cache_guid: A string value uniquely identifying the client; this
is used for ID generation and will determine the originator_cache_guid
if the entry is new.
commit_session: A dictionary mapping client IDs to server IDs for any
objects committed earlier this session. If the entry gets a new ID
during commit, the change will be recorded here.
Returns:
A SyncEntity reflecting the post-commit value of the entry, or None
if the entry was not committed due to an error.
"""
entry = copy.deepcopy(entry)
# Generate server IDs for this entry, and write generated server IDs
# from earlier entries into the message's fields, as appropriate. The
# ID generation state is stored in 'commit_session'.
self._RewriteIdsAsServerIds(entry, cache_guid, commit_session)
# Perform the optimistic concurrency check on the entry's version number.
# Clients are not allowed to commit unless they indicate that they've seen
# the most recent version of an object.
if not self._CheckVersionForCommit(entry):
return None
# Check the validity of the parent ID; it must exist at this point.
# TODO(nick): Implement cycle detection and resolution.
if not self._CheckParentIdForCommit(entry):
return None
self._CopyOverImmutableFields(entry);
# At this point, the commit is definitely going to happen.
# Deletion works by storing a limited record for an entry, called a
# tombstone. A sync server must track deleted IDs forever, since it does
# not keep track of client knowledge (there's no deletion ACK event).
if entry.deleted:
def MakeTombstone(id_string):
"""Make a tombstone entry that will replace the entry being deleted.
Args:
id_string: Index of the SyncEntity to be deleted.
Returns:
A new SyncEntity reflecting the fact that the entry is deleted.
"""
# Only the ID, version and deletion state are preserved on a tombstone.
# TODO(nick): Does the production server not preserve the type? Not
# doing so means that tombstones cannot be filtered based on
# requested_types at GetUpdates time.
tombstone = sync_pb2.SyncEntity()
tombstone.id_string = id_string
tombstone.deleted = True
tombstone.name = ''
return tombstone
def IsChild(child_id):
"""Check if a SyncEntity is a child of entry, or any of its children.
Args:
child_id: Index of the SyncEntity that is a possible child of entry.
Returns:
True if it is a child; false otherwise.
"""
if child_id not in self._entries:
return False
if self._entries[child_id].parent_id_string == entry.id_string:
return True
return IsChild(self._entries[child_id].parent_id_string)
# Identify any children entry might have.
child_ids = [child.id_string for child in self._entries.itervalues()
if IsChild(child.id_string)]
# Mark all children that were identified as deleted.
for child_id in child_ids:
self._SaveEntry(MakeTombstone(child_id))
# Delete entry itself.
entry = MakeTombstone(entry.id_string)
else:
# Comments in sync.proto detail how the representation of positional
# ordering works: either the 'insert_after_item_id' field or the
# 'position_in_parent' field may determine the sibling order during
# Commit operations. The 'position_in_parent' field provides an absolute
# ordering in GetUpdates contexts. Here we assume the client will
# always send a valid position_in_parent (this is the newer style), and
# we ignore insert_after_item_id (an older style).
self._WritePosition(entry, entry.parent_id_string)
# Preserve the originator info, which the client is not required to send
# when updating.
base_entry = self._entries.get(entry.id_string)
if base_entry and not entry.HasField('originator_cache_guid'):
entry.originator_cache_guid = base_entry.originator_cache_guid
entry.originator_client_item_id = base_entry.originator_client_item_id
# Store the current time since the Unix epoch in milliseconds.
entry.mtime = (int((time.mktime(time.gmtime()) -
time.mktime(UNIX_TIME_EPOCH))*1000))
# Commit the change. This also updates the version number.
self._SaveEntry(entry)
return entry
def _RewriteVersionInId(self, id_string):
"""Rewrites an ID so that its migration version becomes current."""
parsed_id = self._ExtractIdInfo(id_string)
if not parsed_id:
return id_string
datatype, old_migration_version, inner_id = parsed_id
return self._MakeCurrentId(datatype, inner_id)
def TriggerMigration(self, datatypes):
"""Cause a migration to occur for a set of datatypes on this account.
Clients will see the MIGRATION_DONE error for these datatypes until they
resync them.
"""
versions_to_remap = self.migration_history.Bump(datatypes)
all_entries = self._entries.values()
self._entries.clear()
for entry in all_entries:
new_id = self._RewriteVersionInId(entry.id_string)
entry.id_string = new_id
if entry.HasField('parent_id_string'):
entry.parent_id_string = self._RewriteVersionInId(
entry.parent_id_string)
self._entries[entry.id_string] = entry
def TriggerSyncTabs(self):
"""Set the 'sync_tabs' field to this account's nigori node.
If the field is not currently set, will write a new nigori node entry
with the field set. Else does nothing.
"""
nigori_tag = "google_chrome_nigori"
nigori_original = self._entries.get(self._ServerTagToId(nigori_tag))
if (nigori_original.specifics.nigori.sync_tabs):
return
nigori_new = copy.deepcopy(nigori_original)
nigori_new.specifics.nigori.sync_tabs = True
self._SaveEntry(nigori_new)
def TriggerCreateSyncedBookmarks(self):
"""Create the Synced Bookmarks folder under the Bookmarks permanent item.
Clients will then receive the Synced Bookmarks folder on future
GetUpdates, and new bookmarks can be added within the Synced Bookmarks
folder.
"""
synced_bookmarks_spec, = [spec for spec in self._PERMANENT_ITEM_SPECS
if spec.tag == "synced_bookmarks"]
self._CreatePermanentItem(synced_bookmarks_spec)
def SetInducedError(self, error, error_frequency,
sync_count_before_errors):
self.induced_error = error
self.induced_error_frequency = error_frequency
self.sync_count_before_errors = sync_count_before_errors
def GetInducedError(self):
return self.induced_error
class TestServer(object):
"""An object to handle requests for one (and only one) Chrome Sync account.
TestServer consumes the sync command messages that are the outermost
layers of the protocol, performs the corresponding actions on its
SyncDataModel, and constructs an appropropriate response message.
"""
def __init__(self):
# The implementation supports exactly one account; its state is here.
self.account = SyncDataModel()
self.account_lock = threading.Lock()
# Clients that have talked to us: a map from the full client ID
# to its nickname.
self.clients = {}
self.client_name_generator = ('+' * times + chr(c)
for times in xrange(0, sys.maxint) for c in xrange(ord('A'), ord('Z')))
self.transient_error = False
self.sync_count = 0
def GetShortClientName(self, query):
parsed = cgi.parse_qs(query[query.find('?')+1:])
client_id = parsed.get('client_id')
if not client_id:
return '?'
client_id = client_id[0]
if client_id not in self.clients:
self.clients[client_id] = self.client_name_generator.next()
return self.clients[client_id]
def CheckStoreBirthday(self, request):
"""Raises StoreBirthdayError if the request's birthday is a mismatch."""
if not request.HasField('store_birthday'):
return
if self.account.StoreBirthday() != request.store_birthday:
raise StoreBirthdayError
def CheckTransientError(self):
"""Raises TransientError if transient_error variable is set."""
if self.transient_error:
raise TransientError
def CheckSendError(self):
"""Raises SyncInducedError if needed."""
if (self.account.induced_error.error_type !=
sync_enums_pb2.SyncEnums.UNKNOWN):
# Always means return the given error for all requests.
if self.account.induced_error_frequency == ERROR_FREQUENCY_ALWAYS:
raise SyncInducedError
# This means the FIRST 2 requests of every 3 requests
# return an error. Don't switch the order of failures. There are
# test cases that rely on the first 2 being the failure rather than
# the last 2.
elif (self.account.induced_error_frequency ==
ERROR_FREQUENCY_TWO_THIRDS):
if (((self.sync_count -
self.account.sync_count_before_errors) % 3) != 0):
raise SyncInducedError
else:
raise InducedErrorFrequencyNotDefined
def HandleMigrate(self, path):
query = urlparse.urlparse(path)[4]
code = 200
self.account_lock.acquire()
try:
datatypes = [DataTypeStringToSyncTypeLoose(x)
for x in urlparse.parse_qs(query).get('type',[])]
if datatypes:
self.account.TriggerMigration(datatypes)
response = 'Migrated datatypes %s' % (
' and '.join(SyncTypeToString(x).upper() for x in datatypes))
else:
response = 'Please specify one or more <i>type=name</i> parameters'
code = 400
except DataTypeIdNotRecognized, error:
response = 'Could not interpret datatype name'
code = 400
finally:
self.account_lock.release()
return (code, '<html><title>Migration: %d</title><H1>%d %s</H1></html>' %
(code, code, response))
def HandleSetInducedError(self, path):
query = urlparse.urlparse(path)[4]
self.account_lock.acquire()
code = 200;
response = 'Success'
error = sync_pb2.ClientToServerResponse.Error()
try:
error_type = urlparse.parse_qs(query)['error']
action = urlparse.parse_qs(query)['action']
error.error_type = int(error_type[0])
error.action = int(action[0])
try:
error.url = (urlparse.parse_qs(query)['url'])[0]
except KeyError:
error.url = ''
try:
error.error_description =(
(urlparse.parse_qs(query)['error_description'])[0])
except KeyError:
error.error_description = ''
try:
error_frequency = int((urlparse.parse_qs(query)['frequency'])[0])
except KeyError:
error_frequency = ERROR_FREQUENCY_ALWAYS
self.account.SetInducedError(error, error_frequency, self.sync_count)
response = ('Error = %d, action = %d, url = %s, description = %s' %
(error.error_type, error.action,
error.url,
error.error_description))
except error:
response = 'Could not parse url'
code = 400
finally:
self.account_lock.release()
return (code, '<html><title>SetError: %d</title><H1>%d %s</H1></html>' %
(code, code, response))
def HandleCreateBirthdayError(self):
self.account.ResetStoreBirthday()
return (
200,
'<html><title>Birthday error</title><H1>Birthday error</H1></html>')
def HandleSetTransientError(self):
self.transient_error = True
return (
200,
'<html><title>Transient error</title><H1>Transient error</H1></html>')
def HandleSetSyncTabs(self):
"""Set the 'sync_tab' field of the nigori node for this account."""
self.account.TriggerSyncTabs()
return (
200,
'<html><title>Sync Tabs</title><H1>Sync Tabs</H1></html>')
def HandleCreateSyncedBookmarks(self):
"""Create the Synced Bookmarks folder under Bookmarks."""
self.account.TriggerCreateSyncedBookmarks()
return (
200,
'<html><title>Synced Bookmarks</title><H1>Synced Bookmarks</H1></html>')
def HandleCommand(self, query, raw_request):
"""Decode and handle a sync command from a raw input of bytes.
This is the main entry point for this class. It is safe to call this
method from multiple threads.
Args:
raw_request: An iterable byte sequence to be interpreted as a sync
protocol command.
Returns:
A tuple (response_code, raw_response); the first value is an HTTP
result code, while the second value is a string of bytes which is the
serialized reply to the command.
"""
self.account_lock.acquire()
self.sync_count += 1
def print_context(direction):
print '[Client %s %s %s.py]' % (self.GetShortClientName(query), direction,
__name__),
try:
request = sync_pb2.ClientToServerMessage()
request.MergeFromString(raw_request)
contents = request.message_contents
response = sync_pb2.ClientToServerResponse()
response.error_code = sync_enums_pb2.SyncEnums.SUCCESS
self.CheckStoreBirthday(request)
response.store_birthday = self.account.store_birthday
self.CheckTransientError();
self.CheckSendError();
print_context('->')
if contents == sync_pb2.ClientToServerMessage.AUTHENTICATE:
print 'Authenticate'
# We accept any authentication token, and support only one account.
# TODO(nick): Mock out the GAIA authentication as well; hook up here.
response.authenticate.user.email = 'syncjuser@chromium'
response.authenticate.user.display_name = 'Sync J User'
elif contents == sync_pb2.ClientToServerMessage.COMMIT:
print 'Commit %d item(s)' % len(request.commit.entries)
self.HandleCommit(request.commit, response.commit)
elif contents == sync_pb2.ClientToServerMessage.GET_UPDATES:
print 'GetUpdates',
self.HandleGetUpdates(request.get_updates, response.get_updates)
print_context('<-')
print '%d update(s)' % len(response.get_updates.entries)
else:
print 'Unrecognizable sync request!'
return (400, None) # Bad request.
return (200, response.SerializeToString())
except MigrationDoneError, error:
print_context('<-')
print 'MIGRATION_DONE: <%s>' % (ShortDatatypeListSummary(error.datatypes))
response = sync_pb2.ClientToServerResponse()
response.store_birthday = self.account.store_birthday
response.error_code = sync_enums_pb2.SyncEnums.MIGRATION_DONE
response.migrated_data_type_id[:] = [
SyncTypeToProtocolDataTypeId(x) for x in error.datatypes]
return (200, response.SerializeToString())
except StoreBirthdayError, error:
print_context('<-')
print 'NOT_MY_BIRTHDAY'
response = sync_pb2.ClientToServerResponse()
response.store_birthday = self.account.store_birthday
response.error_code = sync_enums_pb2.SyncEnums.NOT_MY_BIRTHDAY
return (200, response.SerializeToString())
except TransientError, error:
### This is deprecated now. Would be removed once test cases are removed.
print_context('<-')
print 'TRANSIENT_ERROR'
response.store_birthday = self.account.store_birthday
response.error_code = sync_enums_pb2.SyncEnums.TRANSIENT_ERROR
return (200, response.SerializeToString())
except SyncInducedError, error:
print_context('<-')
print 'INDUCED_ERROR'
response.store_birthday = self.account.store_birthday
error = self.account.GetInducedError()
response.error.error_type = error.error_type
response.error.url = error.url
response.error.error_description = error.error_description
response.error.action = error.action
return (200, response.SerializeToString())
finally:
self.account_lock.release()
def HandleCommit(self, commit_message, commit_response):
"""Respond to a Commit request by updating the user's account state.
Commit attempts stop after the first error, returning a CONFLICT result
for any unattempted entries.
Args:
commit_message: A sync_pb.CommitMessage protobuf holding the content
of the client's request.
commit_response: A sync_pb.CommitResponse protobuf into which a reply
to the client request will be written.
"""
commit_response.SetInParent()
batch_failure = False
session = {} # Tracks ID renaming during the commit operation.
guid = commit_message.cache_guid
self.account.ValidateCommitEntries(commit_message.entries)
for entry in commit_message.entries:
server_entry = None
if not batch_failure:
# Try to commit the change to the account.
server_entry = self.account.CommitEntry(entry, guid, session)
# An entryresponse is returned in both success and failure cases.
reply = commit_response.entryresponse.add()
if not server_entry:
reply.response_type = sync_pb2.CommitResponse.CONFLICT
reply.error_message = 'Conflict.'
batch_failure = True # One failure halts the batch.
else:
reply.response_type = sync_pb2.CommitResponse.SUCCESS
# These are the properties that the server is allowed to override
# during commit; the client wants to know their values at the end
# of the operation.
reply.id_string = server_entry.id_string
if not server_entry.deleted:
# Note: the production server doesn't actually send the
# parent_id_string on commit responses, so we don't either.
reply.position_in_parent = server_entry.position_in_parent
reply.version = server_entry.version
reply.name = server_entry.name
reply.non_unique_name = server_entry.non_unique_name
else:
reply.version = entry.version + 1
def HandleGetUpdates(self, update_request, update_response):
"""Respond to a GetUpdates request by querying the user's account.
Args:
update_request: A sync_pb.GetUpdatesMessage protobuf holding the content
of the client's request.
update_response: A sync_pb.GetUpdatesResponse protobuf into which a reply
to the client request will be written.
"""
update_response.SetInParent()
update_sieve = UpdateSieve(update_request, self.account.migration_history)
print CallerInfoToString(update_request.caller_info.source),
print update_sieve.SummarizeRequest()
update_sieve.CheckMigrationState()
new_timestamp, entries, remaining = self.account.GetChanges(update_sieve)
update_response.changes_remaining = remaining
for entry in entries:
reply = update_response.entries.add()
reply.CopyFrom(entry)
update_sieve.SaveProgress(new_timestamp, update_response)
| bsd-3-clause |
ncf-ds/chloroform | samples/build_fixtures.py | 1 | 6130 | import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), ".."))
from chloroform import db
from chloroform.models import *
#Things this has:
#Same client different retail chains
#Same retail chain different clients
#question_groups containing a question_group
#same qquestion_groups across forms
#Question with a free formed response
#Things this doesn't have:
#questions containing question_groups
#question_groups containing multiple question_groups
# Form 1
form = Form(title="Palermos for CVS")
form.form_context = FormContext(name = "CVS")
form.client = Client(name="Palermos")
# Questions
question1 = Question("Did you find the ${name}?")
question2 = Question("Was the ${dname} full stocked and organized?")
question3 = Question("How many {$product} are on the ${dname}?")
quest_mad1 = QuestionMadlib("name")
quest_mad2 = QuestionMadlib("dname")
quest_mad3 = QuestionMadlib("product")
quest_mad4 = QuestionMadlib("dname2")
question1.choices = [Choice("Yes"),Choice("No"),Choice("Did not look")]
question1.madlib_associations = [quest_mad1]
quest_mad1.madlib = Madlib("display")
question2.choices = [Choice("Yes"),Choice("No, poorly organized"),Choice("No, not enough items")]
question2.madlib_associations = [quest_mad2]
quest_mad2.madlib = Madlib("display")
question3.choices = [Choice("Write in the number")]
question3.madlib_associations = [quest_mad3, quest_mad4]
quest_mad3.madlib = Madlib("frozen pizzas")
quest_mad4.madlib = Madlib("display")
# Question Groups
question_group1 = QuestionGroup("QuestGroup title")
question_group2 = QuestionGroup("QuestGroup title")
question_group1.question_groups = [question_group2]
question_group2.questions = [question1, question2, question3]
# question_group1.questions = [question1]
# question_group2.questions = [question2, question3]
form.question_group = question_group1
# Add to session
db.session.add(form)
db.session.add(question_group1)
db.session.add(question_group2)
db.session.add(question1)
db.session.add(question2)
db.session.add(question3)
# Form 2
form = Form(title="L'Oreal for CVS")
form.form_context = FormContext(name = "CVS")
form.client = Client(name="L'Oreal")
# Questions
question1 = Question("Did you find the ${dname}?")
question2 = Question("Was the ${dname} fully stocked and organized?")
question3 = Question("How many {$product} are on the ${dname2}?")
quest_mad1 = QuestionMadlib("dname")
quest_mad2 = QuestionMadlib("dname")
quest_mad3 = QuestionMadlib("product")
quest_mad4 = QuestionMadlib("dname2")
question1.choices = [Choice("Yes"),Choice("No"),Choice("Did not look")]
question1.madlib_associations = [quest_mad1]
quest_mad1.madlib = Madlib("display")
question2.choices = [Choice("Yes"),Choice("No, poorly organized"),Choice("No, not enough items")]
question2.madlib_associations = [quest_mad2]
quest_mad2.madlib = Madlib("display")
question3.choices = [Choice("Write in the number")]
question3.madlib_associations = [quest_mad3, quest_mad4]
quest_mad3.madlib = Madlib("shampoo")
quest_mad4.madlib = Madlib("display")
# Question Groups
question_group1 = QuestionGroup("QuestGroup title")
question_group2 = QuestionGroup("QuestGroup title")
question_group1.question_groups = [question_group2]
question_group2.questions = [question1, question2, question3]
# question_group1.questions = [question1]
# question_group2.questions = [question2, question3]
form.question_group = question_group1
# Add to session
db.session.add(form)
db.session.add(question_group2)
db.session.add(question2)
db.session.add(question3)
db.session.add(quest_mad1)
db.session.add(quest_mad2)
db.session.add(quest_mad3)
# Form 3
form = Form(title="Palermos for Publix")
form.form_context = FormContext(name = "Publix")
form.client = Client(name="Palermos1")
# Questions
question1 = Question("Did you find the ${name}?")
question2 = Question("Are there ${pname1} on the ${dname1}?")
question3 = Question("Are there ${pname2} on the ${dname2}?")
question4 = Question("Are there ${pname3} on the ${dname3}?")
quest_mad1 = QuestionMadlib("name")
quest_mad2 = QuestionMadlib("pname1")
quest_mad3 = QuestionMadlib("dname1")
quest_mad4 = QuestionMadlib("pname2")
quest_mad5 = QuestionMadlib("dname2")
quest_mad6 = QuestionMadlib("pname3")
quest_mad7 = QuestionMadlib("dname3")
question1.choices = [Choice("Yes"),Choice("No"),Choice("Did not look")]
question1.madlib_associations = [quest_mad1]
quest_mad1.madlib = Madlib("display")
question2.choices = [Choice("Yes, there are many items"),Choice("Yes, but there are only a few items"),Choice("No")]
question2.madlib_associations = [quest_mad2, quest_mad3]
quest_mad2.madlib = Madlib("Palermos Pepperoni Pizza")
quest_mad3.madlib = Madlib("display")
question3.choices = [Choice("Yes, there are many items"),Choice("Yes, but there are only a few items"),Choice("No")]
question3.madlib_associations = [quest_mad4, quest_mad5]
quest_mad4.madlib = Madlib("Palermos Cheese Pizza")
quest_mad5.madlib = Madlib("display")
question4.choices = [Choice("Yes, there are many items"),Choice("Yes, but there are only a few items"),Choice("No")]
question4.madlib_associations = [quest_mad6, quest_mad7]
quest_mad6.madlib = Madlib("Palermos Sausage Pizza")
quest_mad7.madlib = Madlib("display")
# Question Groups
question_group1 = QuestionGroup("QuestGroup title")
question_group2 = QuestionGroup("QuestGroup title")
question_group1.question_groups = [question_group2]
question_group2.questions = [question1, question2, question3, question4]
# question_group1.questions = [question1]
# question_group2.questions = [question2, question3, question4]
form.question_group = question_group1
# Add to session
db.session.add(form)
# db.session.add(question_group2)
db.session.add(question2)
db.session.add(question3)
db.session.add(question4)
db.session.add(quest_mad1)
db.session.add(quest_mad2)
db.session.add(quest_mad3)
db.session.add(quest_mad4)
db.session.add(quest_mad5)
db.session.add(quest_mad6)
db.session.add(quest_mad7)
db.session.commit()
# form = Form.query.filter_by(title='Palermos for CVS').first()
# form.question_group
# form.question_group.questions
| agpl-3.0 |
JFriel/honours_project | venv/lib/python2.7/site-packages/numpy/f2py/f2py2e.py | 174 | 22908 | #!/usr/bin/env python
"""
f2py2e - Fortran to Python C/API generator. 2nd Edition.
See __usage__ below.
Copyright 1999--2011 Pearu Peterson all rights reserved,
Pearu Peterson <pearu@cens.ioc.ee>
Permission to use, modify, and distribute this software is given under the
terms of the NumPy License.
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
$Date: 2005/05/06 08:31:19 $
Pearu Peterson
"""
from __future__ import division, absolute_import, print_function
import sys
import os
import pprint
import re
from . import crackfortran
from . import rules
from . import cb_rules
from . import auxfuncs
from . import cfuncs
from . import f90mod_rules
from . import __version__
f2py_version = __version__.version
errmess = sys.stderr.write
# outmess=sys.stdout.write
show = pprint.pprint
outmess = auxfuncs.outmess
try:
from numpy import __version__ as numpy_version
except ImportError:
numpy_version = 'N/A'
__usage__ = """\
Usage:
1) To construct extension module sources:
f2py [<options>] <fortran files> [[[only:]||[skip:]] \\
<fortran functions> ] \\
[: <fortran files> ...]
2) To compile fortran files and build extension modules:
f2py -c [<options>, <build_flib options>, <extra options>] <fortran files>
3) To generate signature files:
f2py -h <filename.pyf> ...< same options as in (1) >
Description: This program generates a Python C/API file (<modulename>module.c)
that contains wrappers for given fortran functions so that they
can be called from Python. With the -c option the corresponding
extension modules are built.
Options:
--2d-numpy Use numpy.f2py tool with NumPy support. [DEFAULT]
--2d-numeric Use f2py2e tool with Numeric support.
--2d-numarray Use f2py2e tool with Numarray support.
--g3-numpy Use 3rd generation f2py from the separate f2py package.
[NOT AVAILABLE YET]
-h <filename> Write signatures of the fortran routines to file <filename>
and exit. You can then edit <filename> and use it instead
of <fortran files>. If <filename>==stdout then the
signatures are printed to stdout.
<fortran functions> Names of fortran routines for which Python C/API
functions will be generated. Default is all that are found
in <fortran files>.
<fortran files> Paths to fortran/signature files that will be scanned for
<fortran functions> in order to determine their signatures.
skip: Ignore fortran functions that follow until `:'.
only: Use only fortran functions that follow until `:'.
: Get back to <fortran files> mode.
-m <modulename> Name of the module; f2py generates a Python/C API
file <modulename>module.c or extension module <modulename>.
Default is 'untitled'.
--[no-]lower Do [not] lower the cases in <fortran files>. By default,
--lower is assumed with -h key, and --no-lower without -h key.
--build-dir <dirname> All f2py generated files are created in <dirname>.
Default is tempfile.mkdtemp().
--overwrite-signature Overwrite existing signature file.
--[no-]latex-doc Create (or not) <modulename>module.tex.
Default is --no-latex-doc.
--short-latex Create 'incomplete' LaTeX document (without commands
\\documentclass, \\tableofcontents, and \\begin{document},
\\end{document}).
--[no-]rest-doc Create (or not) <modulename>module.rst.
Default is --no-rest-doc.
--debug-capi Create C/API code that reports the state of the wrappers
during runtime. Useful for debugging.
--[no-]wrap-functions Create Fortran subroutine wrappers to Fortran 77
functions. --wrap-functions is default because it ensures
maximum portability/compiler independence.
--include-paths <path1>:<path2>:... Search include files from the given
directories.
--help-link [..] List system resources found by system_info.py. See also
--link-<resource> switch below. [..] is optional list
of resources names. E.g. try 'f2py --help-link lapack_opt'.
--quiet Run quietly.
--verbose Run with extra verbosity.
-v Print f2py version ID and exit.
numpy.distutils options (only effective with -c):
--fcompiler= Specify Fortran compiler type by vendor
--compiler= Specify C compiler type (as defined by distutils)
--help-fcompiler List available Fortran compilers and exit
--f77exec= Specify the path to F77 compiler
--f90exec= Specify the path to F90 compiler
--f77flags= Specify F77 compiler flags
--f90flags= Specify F90 compiler flags
--opt= Specify optimization flags
--arch= Specify architecture specific optimization flags
--noopt Compile without optimization
--noarch Compile without arch-dependent optimization
--debug Compile with debugging information
Extra options (only effective with -c):
--link-<resource> Link extension module with <resource> as defined
by numpy.distutils/system_info.py. E.g. to link
with optimized LAPACK libraries (vecLib on MacOSX,
ATLAS elsewhere), use --link-lapack_opt.
See also --help-link switch.
-L/path/to/lib/ -l<libname>
-D<define> -U<name>
-I/path/to/include/
<filename>.o <filename>.so <filename>.a
Using the following macros may be required with non-gcc Fortran
compilers:
-DPREPEND_FORTRAN -DNO_APPEND_FORTRAN -DUPPERCASE_FORTRAN
-DUNDERSCORE_G77
When using -DF2PY_REPORT_ATEXIT, a performance report of F2PY
interface is printed out at exit (platforms: Linux).
When using -DF2PY_REPORT_ON_ARRAY_COPY=<int>, a message is
sent to stderr whenever F2PY interface makes a copy of an
array. Integer <int> sets the threshold for array sizes when
a message should be shown.
Version: %s
numpy Version: %s
Requires: Python 2.3 or higher.
License: NumPy license (see LICENSE.txt in the NumPy source code)
Copyright 1999 - 2011 Pearu Peterson all rights reserved.
http://cens.ioc.ee/projects/f2py2e/""" % (f2py_version, numpy_version)
def scaninputline(inputline):
files, skipfuncs, onlyfuncs, debug = [], [], [], []
f, f2, f3, f5, f6, f7, f8, f9 = 1, 0, 0, 0, 0, 0, 0, 0
verbose = 1
dolc = -1
dolatexdoc = 0
dorestdoc = 0
wrapfuncs = 1
buildpath = '.'
include_paths = []
signsfile, modulename = None, None
options = {'buildpath': buildpath,
'coutput': None,
'f2py_wrapper_output': None}
for l in inputline:
if l == '':
pass
elif l == 'only:':
f = 0
elif l == 'skip:':
f = -1
elif l == ':':
f = 1
elif l[:8] == '--debug-':
debug.append(l[8:])
elif l == '--lower':
dolc = 1
elif l == '--build-dir':
f6 = 1
elif l == '--no-lower':
dolc = 0
elif l == '--quiet':
verbose = 0
elif l == '--verbose':
verbose += 1
elif l == '--latex-doc':
dolatexdoc = 1
elif l == '--no-latex-doc':
dolatexdoc = 0
elif l == '--rest-doc':
dorestdoc = 1
elif l == '--no-rest-doc':
dorestdoc = 0
elif l == '--wrap-functions':
wrapfuncs = 1
elif l == '--no-wrap-functions':
wrapfuncs = 0
elif l == '--short-latex':
options['shortlatex'] = 1
elif l == '--coutput':
f8 = 1
elif l == '--f2py-wrapper-output':
f9 = 1
elif l == '--overwrite-signature':
options['h-overwrite'] = 1
elif l == '-h':
f2 = 1
elif l == '-m':
f3 = 1
elif l[:2] == '-v':
print(f2py_version)
sys.exit()
elif l == '--show-compilers':
f5 = 1
elif l[:8] == '-include':
cfuncs.outneeds['userincludes'].append(l[9:-1])
cfuncs.userincludes[l[9:-1]] = '#include ' + l[8:]
elif l[:15] in '--include_paths':
outmess(
'f2py option --include_paths is deprecated, use --include-paths instead.\n')
f7 = 1
elif l[:15] in '--include-paths':
f7 = 1
elif l[0] == '-':
errmess('Unknown option %s\n' % repr(l))
sys.exit()
elif f2:
f2 = 0
signsfile = l
elif f3:
f3 = 0
modulename = l
elif f6:
f6 = 0
buildpath = l
elif f7:
f7 = 0
include_paths.extend(l.split(os.pathsep))
elif f8:
f8 = 0
options["coutput"] = l
elif f9:
f9 = 0
options["f2py_wrapper_output"] = l
elif f == 1:
try:
open(l).close()
files.append(l)
except IOError as detail:
errmess('IOError: %s. Skipping file "%s".\n' %
(str(detail), l))
elif f == -1:
skipfuncs.append(l)
elif f == 0:
onlyfuncs.append(l)
if not f5 and not files and not modulename:
print(__usage__)
sys.exit()
if not os.path.isdir(buildpath):
if not verbose:
outmess('Creating build directory %s' % (buildpath))
os.mkdir(buildpath)
if signsfile:
signsfile = os.path.join(buildpath, signsfile)
if signsfile and os.path.isfile(signsfile) and 'h-overwrite' not in options:
errmess(
'Signature file "%s" exists!!! Use --overwrite-signature to overwrite.\n' % (signsfile))
sys.exit()
options['debug'] = debug
options['verbose'] = verbose
if dolc == -1 and not signsfile:
options['do-lower'] = 0
else:
options['do-lower'] = dolc
if modulename:
options['module'] = modulename
if signsfile:
options['signsfile'] = signsfile
if onlyfuncs:
options['onlyfuncs'] = onlyfuncs
if skipfuncs:
options['skipfuncs'] = skipfuncs
options['dolatexdoc'] = dolatexdoc
options['dorestdoc'] = dorestdoc
options['wrapfuncs'] = wrapfuncs
options['buildpath'] = buildpath
options['include_paths'] = include_paths
return files, options
def callcrackfortran(files, options):
rules.options = options
crackfortran.debug = options['debug']
crackfortran.verbose = options['verbose']
if 'module' in options:
crackfortran.f77modulename = options['module']
if 'skipfuncs' in options:
crackfortran.skipfuncs = options['skipfuncs']
if 'onlyfuncs' in options:
crackfortran.onlyfuncs = options['onlyfuncs']
crackfortran.include_paths[:] = options['include_paths']
crackfortran.dolowercase = options['do-lower']
postlist = crackfortran.crackfortran(files)
if 'signsfile' in options:
outmess('Saving signatures to file "%s"\n' % (options['signsfile']))
pyf = crackfortran.crack2fortran(postlist)
if options['signsfile'][-6:] == 'stdout':
sys.stdout.write(pyf)
else:
f = open(options['signsfile'], 'w')
f.write(pyf)
f.close()
if options["coutput"] is None:
for mod in postlist:
mod["coutput"] = "%smodule.c" % mod["name"]
else:
for mod in postlist:
mod["coutput"] = options["coutput"]
if options["f2py_wrapper_output"] is None:
for mod in postlist:
mod["f2py_wrapper_output"] = "%s-f2pywrappers.f" % mod["name"]
else:
for mod in postlist:
mod["f2py_wrapper_output"] = options["f2py_wrapper_output"]
return postlist
def buildmodules(lst):
cfuncs.buildcfuncs()
outmess('Building modules...\n')
modules, mnames, isusedby = [], [], {}
for i in range(len(lst)):
if '__user__' in lst[i]['name']:
cb_rules.buildcallbacks(lst[i])
else:
if 'use' in lst[i]:
for u in lst[i]['use'].keys():
if u not in isusedby:
isusedby[u] = []
isusedby[u].append(lst[i]['name'])
modules.append(lst[i])
mnames.append(lst[i]['name'])
ret = {}
for i in range(len(mnames)):
if mnames[i] in isusedby:
outmess('\tSkipping module "%s" which is used by %s.\n' % (
mnames[i], ','.join(['"%s"' % s for s in isusedby[mnames[i]]])))
else:
um = []
if 'use' in modules[i]:
for u in modules[i]['use'].keys():
if u in isusedby and u in mnames:
um.append(modules[mnames.index(u)])
else:
outmess(
'\tModule "%s" uses nonexisting "%s" which will be ignored.\n' % (mnames[i], u))
ret[mnames[i]] = {}
dict_append(ret[mnames[i]], rules.buildmodule(modules[i], um))
return ret
def dict_append(d_out, d_in):
for (k, v) in d_in.items():
if k not in d_out:
d_out[k] = []
if isinstance(v, list):
d_out[k] = d_out[k] + v
else:
d_out[k].append(v)
def run_main(comline_list):
"""Run f2py as if string.join(comline_list,' ') is used as a command line.
In case of using -h flag, return None.
"""
crackfortran.reset_global_f2py_vars()
f2pydir = os.path.dirname(os.path.abspath(cfuncs.__file__))
fobjhsrc = os.path.join(f2pydir, 'src', 'fortranobject.h')
fobjcsrc = os.path.join(f2pydir, 'src', 'fortranobject.c')
files, options = scaninputline(comline_list)
auxfuncs.options = options
postlist = callcrackfortran(files, options)
isusedby = {}
for i in range(len(postlist)):
if 'use' in postlist[i]:
for u in postlist[i]['use'].keys():
if u not in isusedby:
isusedby[u] = []
isusedby[u].append(postlist[i]['name'])
for i in range(len(postlist)):
if postlist[i]['block'] == 'python module' and '__user__' in postlist[i]['name']:
if postlist[i]['name'] in isusedby:
# if not quiet:
outmess('Skipping Makefile build for module "%s" which is used by %s\n' % (
postlist[i]['name'], ','.join(['"%s"' % s for s in isusedby[postlist[i]['name']]])))
if 'signsfile' in options:
if options['verbose'] > 1:
outmess(
'Stopping. Edit the signature file and then run f2py on the signature file: ')
outmess('%s %s\n' %
(os.path.basename(sys.argv[0]), options['signsfile']))
return
for i in range(len(postlist)):
if postlist[i]['block'] != 'python module':
if 'python module' not in options:
errmess(
'Tip: If your original code is Fortran source then you must use -m option.\n')
raise TypeError('All blocks must be python module blocks but got %s' % (
repr(postlist[i]['block'])))
auxfuncs.debugoptions = options['debug']
f90mod_rules.options = options
auxfuncs.wrapfuncs = options['wrapfuncs']
ret = buildmodules(postlist)
for mn in ret.keys():
dict_append(ret[mn], {'csrc': fobjcsrc, 'h': fobjhsrc})
return ret
def filter_files(prefix, suffix, files, remove_prefix=None):
"""
Filter files by prefix and suffix.
"""
filtered, rest = [], []
match = re.compile(prefix + r'.*' + suffix + r'\Z').match
if remove_prefix:
ind = len(prefix)
else:
ind = 0
for file in [x.strip() for x in files]:
if match(file):
filtered.append(file[ind:])
else:
rest.append(file)
return filtered, rest
def get_prefix(module):
p = os.path.dirname(os.path.dirname(module.__file__))
return p
def run_compile():
"""
Do it all in one call!
"""
import tempfile
i = sys.argv.index('-c')
del sys.argv[i]
remove_build_dir = 0
try:
i = sys.argv.index('--build-dir')
except ValueError:
i = None
if i is not None:
build_dir = sys.argv[i + 1]
del sys.argv[i + 1]
del sys.argv[i]
else:
remove_build_dir = 1
build_dir = tempfile.mkdtemp()
_reg1 = re.compile(r'[-][-]link[-]')
sysinfo_flags = [_m for _m in sys.argv[1:] if _reg1.match(_m)]
sys.argv = [_m for _m in sys.argv if _m not in sysinfo_flags]
if sysinfo_flags:
sysinfo_flags = [f[7:] for f in sysinfo_flags]
_reg2 = re.compile(
r'[-][-]((no[-]|)(wrap[-]functions|lower)|debug[-]capi|quiet)|[-]include')
f2py_flags = [_m for _m in sys.argv[1:] if _reg2.match(_m)]
sys.argv = [_m for _m in sys.argv if _m not in f2py_flags]
f2py_flags2 = []
fl = 0
for a in sys.argv[1:]:
if a in ['only:', 'skip:']:
fl = 1
elif a == ':':
fl = 0
if fl or a == ':':
f2py_flags2.append(a)
if f2py_flags2 and f2py_flags2[-1] != ':':
f2py_flags2.append(':')
f2py_flags.extend(f2py_flags2)
sys.argv = [_m for _m in sys.argv if _m not in f2py_flags2]
_reg3 = re.compile(
r'[-][-]((f(90)?compiler([-]exec|)|compiler)=|help[-]compiler)')
flib_flags = [_m for _m in sys.argv[1:] if _reg3.match(_m)]
sys.argv = [_m for _m in sys.argv if _m not in flib_flags]
_reg4 = re.compile(
r'[-][-]((f(77|90)(flags|exec)|opt|arch)=|(debug|noopt|noarch|help[-]fcompiler))')
fc_flags = [_m for _m in sys.argv[1:] if _reg4.match(_m)]
sys.argv = [_m for _m in sys.argv if _m not in fc_flags]
if 1:
del_list = []
for s in flib_flags:
v = '--fcompiler='
if s[:len(v)] == v:
from numpy.distutils import fcompiler
fcompiler.load_all_fcompiler_classes()
allowed_keys = list(fcompiler.fcompiler_class.keys())
nv = ov = s[len(v):].lower()
if ov not in allowed_keys:
vmap = {} # XXX
try:
nv = vmap[ov]
except KeyError:
if ov not in vmap.values():
print('Unknown vendor: "%s"' % (s[len(v):]))
nv = ov
i = flib_flags.index(s)
flib_flags[i] = '--fcompiler=' + nv
continue
for s in del_list:
i = flib_flags.index(s)
del flib_flags[i]
assert len(flib_flags) <= 2, repr(flib_flags)
_reg5 = re.compile(r'[-][-](verbose)')
setup_flags = [_m for _m in sys.argv[1:] if _reg5.match(_m)]
sys.argv = [_m for _m in sys.argv if _m not in setup_flags]
if '--quiet' in f2py_flags:
setup_flags.append('--quiet')
modulename = 'untitled'
sources = sys.argv[1:]
for optname in ['--include_paths', '--include-paths']:
if optname in sys.argv:
i = sys.argv.index(optname)
f2py_flags.extend(sys.argv[i:i + 2])
del sys.argv[i + 1], sys.argv[i]
sources = sys.argv[1:]
if '-m' in sys.argv:
i = sys.argv.index('-m')
modulename = sys.argv[i + 1]
del sys.argv[i + 1], sys.argv[i]
sources = sys.argv[1:]
else:
from numpy.distutils.command.build_src import get_f2py_modulename
pyf_files, sources = filter_files('', '[.]pyf([.]src|)', sources)
sources = pyf_files + sources
for f in pyf_files:
modulename = get_f2py_modulename(f)
if modulename:
break
extra_objects, sources = filter_files('', '[.](o|a|so)', sources)
include_dirs, sources = filter_files('-I', '', sources, remove_prefix=1)
library_dirs, sources = filter_files('-L', '', sources, remove_prefix=1)
libraries, sources = filter_files('-l', '', sources, remove_prefix=1)
undef_macros, sources = filter_files('-U', '', sources, remove_prefix=1)
define_macros, sources = filter_files('-D', '', sources, remove_prefix=1)
for i in range(len(define_macros)):
name_value = define_macros[i].split('=', 1)
if len(name_value) == 1:
name_value.append(None)
if len(name_value) == 2:
define_macros[i] = tuple(name_value)
else:
print('Invalid use of -D:', name_value)
from numpy.distutils.system_info import get_info
num_info = {}
if num_info:
include_dirs.extend(num_info.get('include_dirs', []))
from numpy.distutils.core import setup, Extension
ext_args = {'name': modulename, 'sources': sources,
'include_dirs': include_dirs,
'library_dirs': library_dirs,
'libraries': libraries,
'define_macros': define_macros,
'undef_macros': undef_macros,
'extra_objects': extra_objects,
'f2py_options': f2py_flags,
}
if sysinfo_flags:
from numpy.distutils.misc_util import dict_append
for n in sysinfo_flags:
i = get_info(n)
if not i:
outmess('No %s resources found in system'
' (try `f2py --help-link`)\n' % (repr(n)))
dict_append(ext_args, **i)
ext = Extension(**ext_args)
sys.argv = [sys.argv[0]] + setup_flags
sys.argv.extend(['build',
'--build-temp', build_dir,
'--build-base', build_dir,
'--build-platlib', '.'])
if fc_flags:
sys.argv.extend(['config_fc'] + fc_flags)
if flib_flags:
sys.argv.extend(['build_ext'] + flib_flags)
setup(ext_modules=[ext])
if remove_build_dir and os.path.exists(build_dir):
import shutil
outmess('Removing build directory %s\n' % (build_dir))
shutil.rmtree(build_dir)
def main():
if '--help-link' in sys.argv[1:]:
sys.argv.remove('--help-link')
from numpy.distutils.system_info import show_all
show_all()
return
if '-c' in sys.argv[1:]:
run_compile()
else:
run_main(sys.argv[1:])
# if __name__ == "__main__":
# main()
# EOF
| gpl-3.0 |
hackerbot/DjangoDev | tests/template_tests/filter_tests/test_lower.py | 388 | 1155 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.template.defaultfilters import lower
from django.test import SimpleTestCase
from django.utils.safestring import mark_safe
from ..utils import setup
class LowerTests(SimpleTestCase):
@setup({'lower01': '{% autoescape off %}{{ a|lower }} {{ b|lower }}{% endautoescape %}'})
def test_lower01(self):
output = self.engine.render_to_string('lower01', {"a": "Apple & banana", "b": mark_safe("Apple & banana")})
self.assertEqual(output, "apple & banana apple & banana")
@setup({'lower02': '{{ a|lower }} {{ b|lower }}'})
def test_lower02(self):
output = self.engine.render_to_string('lower02', {"a": "Apple & banana", "b": mark_safe("Apple & banana")})
self.assertEqual(output, "apple & banana apple & banana")
class FunctionTests(SimpleTestCase):
def test_lower(self):
self.assertEqual(lower('TEST'), 'test')
def test_unicode(self):
# uppercase E umlaut
self.assertEqual(lower('\xcb'), '\xeb')
def test_non_string_input(self):
self.assertEqual(lower(123), '123')
| bsd-3-clause |
prasen-ftech/pywinauto | doc_src/build_autodoc_files.py | 16 | 2513 | "Build up the sphinx autodoc file for the python code"
import os
import sys
docs_folder = os.path.dirname(__file__)
pywin_folder = os.path.dirname(docs_folder)
sys.path.append(pywin_folder)
pywin_folder = os.path.join(pywin_folder, "pywinauto")
excluded_dirs = ["unittests"]
excluded_files = [
"_menux.py",
"__init__.py",
"win32defines.py",
"win32structures.py",
"win32functions.py"]
output_folder = os.path.join(docs_folder, "code")
try:
os.mkdir(output_folder)
except WindowsError:
pass
module_docs = []
for root, dirs, files in os.walk(pywin_folder):
# Skip over directories we don't want to document
for i, d in enumerate(dirs):
if d in excluded_dirs:
del dirs[i]
py_files = [f for f in files if f.endswith(".py")]
for py_filename in py_files:
# skip over py files we don't want to document
if py_filename in excluded_files:
continue
py_filepath = os.path.join(root, py_filename)
# find the last instance of 'pywinauto' to make a module name from
# the path
modulename = 'pywinauto' + py_filepath.rsplit("pywinauto", 1)[1]
modulename = os.path.splitext(modulename)[0]
modulename = modulename.replace('\\', '.')
# the final doc name is the modulename + .txt
doc_source_filename = os.path.join(output_folder, modulename + ".txt")
# skip files that are already generated
if os.path.exists(doc_source_filename):
continue
print py_filename
out = open(doc_source_filename, "w")
out.write(modulename + "\n")
out.write("-" * len(modulename) + "\n")
out.write(" .. automodule:: %s\n"% modulename)
out.write(" :members:\n")
out.write(" :undoc-members:\n\n")
#out.write(" :inherited-members:\n")
#out.write(" .. autoattribute:: %s\n"% modulename)
out.close()
module_docs.append(doc_source_filename)
# This section needs to be updated - I should idealy parse the
# existing file to see if any new docs have been added, if not then
# I should just leave the file alone rathre than re-create.
#
#c = open(os.path.join(output_folder, "code.txt"), "w")
#c.write("Source Code\n")
#c.write("=" * 30 + "\n")
#
#c.write(".. toctree::\n")
#c.write(" :maxdepth: 3\n\n")
#for doc in module_docs:
# c.write(" " + doc + "\n")
#
#c.close()
| lgpl-2.1 |
yukoba/sympy | sympy/polys/rootisolation.py | 78 | 55536 | """Real and complex root isolation and refinement algorithms. """
from __future__ import print_function, division
from sympy.polys.densebasic import (
dup_LC, dup_TC, dup_degree,
dup_strip, dup_reverse,
dup_convert,
dup_terms_gcd)
from sympy.polys.densearith import (
dup_neg, dup_rshift, dup_rem)
from sympy.polys.densetools import (
dup_clear_denoms,
dup_mirror, dup_scale, dup_shift,
dup_transform,
dup_diff,
dup_eval, dmp_eval_in,
dup_sign_variations,
dup_real_imag)
from sympy.polys.sqfreetools import (
dup_sqf_part, dup_sqf_list)
from sympy.polys.factortools import (
dup_factor_list)
from sympy.polys.polyerrors import (
RefinementFailed,
DomainError)
from sympy.core.compatibility import range
def dup_sturm(f, K):
"""
Computes the Sturm sequence of ``f`` in ``F[x]``.
Given a univariate, square-free polynomial ``f(x)`` returns the
associated Sturm sequence ``f_0(x), ..., f_n(x)`` defined by::
f_0(x), f_1(x) = f(x), f'(x)
f_n = -rem(f_{n-2}(x), f_{n-1}(x))
Examples
========
>>> from sympy.polys import ring, QQ
>>> R, x = ring("x", QQ)
>>> R.dup_sturm(x**3 - 2*x**2 + x - 3)
[x**3 - 2*x**2 + x - 3, 3*x**2 - 4*x + 1, 2/9*x + 25/9, -2079/4]
References
==========
1. [Davenport88]_
"""
if not K.has_Field:
raise DomainError("can't compute Sturm sequence over %s" % K)
f = dup_sqf_part(f, K)
sturm = [f, dup_diff(f, 1, K)]
while sturm[-1]:
s = dup_rem(sturm[-2], sturm[-1], K)
sturm.append(dup_neg(s, K))
return sturm[:-1]
def dup_root_upper_bound(f, K):
"""Compute the LMQ upper bound for the positive roots of `f`;
LMQ (Local Max Quadratic) was developed by Akritas-Strzebonski-Vigklas.
Reference:
==========
Alkiviadis G. Akritas: "Linear and Quadratic Complexity Bounds on the
Values of the Positive Roots of Polynomials"
Journal of Universal Computer Science, Vol. 15, No. 3, 523-537, 2009.
"""
n, P = len(f), []
t = n * [K.one]
if dup_LC(f, K) < 0:
f = dup_neg(f, K)
f = list(reversed(f))
for i in range(0, n):
if f[i] >= 0:
continue
a, QL = K.log(-f[i], 2), []
for j in range(i + 1, n):
if f[j] <= 0:
continue
q = t[j] + a - K.log(f[j], 2)
QL.append([q // (j - i) , j])
if not QL:
continue
q = min(QL)
t[q[1]] = t[q[1]] + 1
P.append(q[0])
if not P:
return None
else:
return K.get_field()(2)**(max(P) + 1)
def dup_root_lower_bound(f, K):
"""Compute the LMQ lower bound for the positive roots of `f`;
LMQ (Local Max Quadratic) was developed by Akritas-Strzebonski-Vigklas.
Reference:
==========
Alkiviadis G. Akritas: "Linear and Quadratic Complexity Bounds on the
Values of the Positive Roots of Polynomials"
Journal of Universal Computer Science, Vol. 15, No. 3, 523-537, 2009.
"""
bound = dup_root_upper_bound(dup_reverse(f), K)
if bound is not None:
return 1/bound
else:
return None
def _mobius_from_interval(I, field):
"""Convert an open interval to a Mobius transform. """
s, t = I
a, c = field.numer(s), field.denom(s)
b, d = field.numer(t), field.denom(t)
return a, b, c, d
def _mobius_to_interval(M, field):
"""Convert a Mobius transform to an open interval. """
a, b, c, d = M
s, t = field(a, c), field(b, d)
if s <= t:
return (s, t)
else:
return (t, s)
def dup_step_refine_real_root(f, M, K, fast=False):
"""One step of positive real root refinement algorithm. """
a, b, c, d = M
if a == b and c == d:
return f, (a, b, c, d)
A = dup_root_lower_bound(f, K)
if A is not None:
A = K(int(A))
else:
A = K.zero
if fast and A > 16:
f = dup_scale(f, A, K)
a, c, A = A*a, A*c, K.one
if A >= K.one:
f = dup_shift(f, A, K)
b, d = A*a + b, A*c + d
if not dup_eval(f, K.zero, K):
return f, (b, b, d, d)
f, g = dup_shift(f, K.one, K), f
a1, b1, c1, d1 = a, a + b, c, c + d
if not dup_eval(f, K.zero, K):
return f, (b1, b1, d1, d1)
k = dup_sign_variations(f, K)
if k == 1:
a, b, c, d = a1, b1, c1, d1
else:
f = dup_shift(dup_reverse(g), K.one, K)
if not dup_eval(f, K.zero, K):
f = dup_rshift(f, 1, K)
a, b, c, d = b, a + b, d, c + d
return f, (a, b, c, d)
def dup_inner_refine_real_root(f, M, K, eps=None, steps=None, disjoint=None, fast=False, mobius=False):
"""Refine a positive root of `f` given a Mobius transform or an interval. """
F = K.get_field()
if len(M) == 2:
a, b, c, d = _mobius_from_interval(M, F)
else:
a, b, c, d = M
while not c:
f, (a, b, c, d) = dup_step_refine_real_root(f, (a, b, c,
d), K, fast=fast)
if eps is not None and steps is not None:
for i in range(0, steps):
if abs(F(a, c) - F(b, d)) >= eps:
f, (a, b, c, d) = dup_step_refine_real_root(f, (a, b, c, d), K, fast=fast)
else:
break
else:
if eps is not None:
while abs(F(a, c) - F(b, d)) >= eps:
f, (a, b, c, d) = dup_step_refine_real_root(f, (a, b, c, d), K, fast=fast)
if steps is not None:
for i in range(0, steps):
f, (a, b, c, d) = dup_step_refine_real_root(f, (a, b, c, d), K, fast=fast)
if disjoint is not None:
while True:
u, v = _mobius_to_interval((a, b, c, d), F)
if v <= disjoint or disjoint <= u:
break
else:
f, (a, b, c, d) = dup_step_refine_real_root(f, (a, b, c, d), K, fast=fast)
if not mobius:
return _mobius_to_interval((a, b, c, d), F)
else:
return f, (a, b, c, d)
def dup_outer_refine_real_root(f, s, t, K, eps=None, steps=None, disjoint=None, fast=False):
"""Refine a positive root of `f` given an interval `(s, t)`. """
a, b, c, d = _mobius_from_interval((s, t), K.get_field())
f = dup_transform(f, dup_strip([a, b]),
dup_strip([c, d]), K)
if dup_sign_variations(f, K) != 1:
raise RefinementFailed("there should be exactly one root in (%s, %s) interval" % (s, t))
return dup_inner_refine_real_root(f, (a, b, c, d), K, eps=eps, steps=steps, disjoint=disjoint, fast=fast)
def dup_refine_real_root(f, s, t, K, eps=None, steps=None, disjoint=None, fast=False):
"""Refine real root's approximating interval to the given precision. """
if K.is_QQ:
(_, f), K = dup_clear_denoms(f, K, convert=True), K.get_ring()
elif not K.is_ZZ:
raise DomainError("real root refinement not supported over %s" % K)
if s == t:
return (s, t)
if s > t:
s, t = t, s
negative = False
if s < 0:
if t <= 0:
f, s, t, negative = dup_mirror(f, K), -t, -s, True
else:
raise ValueError("can't refine a real root in (%s, %s)" % (s, t))
if negative and disjoint is not None:
if disjoint < 0:
disjoint = -disjoint
else:
disjoint = None
s, t = dup_outer_refine_real_root(
f, s, t, K, eps=eps, steps=steps, disjoint=disjoint, fast=fast)
if negative:
return (-t, -s)
else:
return ( s, t)
def dup_inner_isolate_real_roots(f, K, eps=None, fast=False):
"""Internal function for isolation positive roots up to given precision.
References:
===========
1. Alkiviadis G. Akritas and Adam W. Strzebonski: A Comparative Study of Two Real Root
Isolation Methods . Nonlinear Analysis: Modelling and Control, Vol. 10, No. 4, 297-304, 2005.
2. Alkiviadis G. Akritas, Adam W. Strzebonski and Panagiotis S. Vigklas: Improving the
Performance of the Continued Fractions Method Using new Bounds of Positive Roots. Nonlinear
Analysis: Modelling and Control, Vol. 13, No. 3, 265-279, 2008.
"""
a, b, c, d = K.one, K.zero, K.zero, K.one
k = dup_sign_variations(f, K)
if k == 0:
return []
if k == 1:
roots = [dup_inner_refine_real_root(
f, (a, b, c, d), K, eps=eps, fast=fast, mobius=True)]
else:
roots, stack = [], [(a, b, c, d, f, k)]
while stack:
a, b, c, d, f, k = stack.pop()
A = dup_root_lower_bound(f, K)
if A is not None:
A = K(int(A))
else:
A = K.zero
if fast and A > 16:
f = dup_scale(f, A, K)
a, c, A = A*a, A*c, K.one
if A >= K.one:
f = dup_shift(f, A, K)
b, d = A*a + b, A*c + d
if not dup_TC(f, K):
roots.append((f, (b, b, d, d)))
f = dup_rshift(f, 1, K)
k = dup_sign_variations(f, K)
if k == 0:
continue
if k == 1:
roots.append(dup_inner_refine_real_root(
f, (a, b, c, d), K, eps=eps, fast=fast, mobius=True))
continue
f1 = dup_shift(f, K.one, K)
a1, b1, c1, d1, r = a, a + b, c, c + d, 0
if not dup_TC(f1, K):
roots.append((f1, (b1, b1, d1, d1)))
f1, r = dup_rshift(f1, 1, K), 1
k1 = dup_sign_variations(f1, K)
k2 = k - k1 - r
a2, b2, c2, d2 = b, a + b, d, c + d
if k2 > 1:
f2 = dup_shift(dup_reverse(f), K.one, K)
if not dup_TC(f2, K):
f2 = dup_rshift(f2, 1, K)
k2 = dup_sign_variations(f2, K)
else:
f2 = None
if k1 < k2:
a1, a2, b1, b2 = a2, a1, b2, b1
c1, c2, d1, d2 = c2, c1, d2, d1
f1, f2, k1, k2 = f2, f1, k2, k1
if not k1:
continue
if f1 is None:
f1 = dup_shift(dup_reverse(f), K.one, K)
if not dup_TC(f1, K):
f1 = dup_rshift(f1, 1, K)
if k1 == 1:
roots.append(dup_inner_refine_real_root(
f1, (a1, b1, c1, d1), K, eps=eps, fast=fast, mobius=True))
else:
stack.append((a1, b1, c1, d1, f1, k1))
if not k2:
continue
if f2 is None:
f2 = dup_shift(dup_reverse(f), K.one, K)
if not dup_TC(f2, K):
f2 = dup_rshift(f2, 1, K)
if k2 == 1:
roots.append(dup_inner_refine_real_root(
f2, (a2, b2, c2, d2), K, eps=eps, fast=fast, mobius=True))
else:
stack.append((a2, b2, c2, d2, f2, k2))
return roots
def _discard_if_outside_interval(f, M, inf, sup, K, negative, fast, mobius):
"""Discard an isolating interval if outside ``(inf, sup)``. """
F = K.get_field()
while True:
u, v = _mobius_to_interval(M, F)
if negative:
u, v = -v, -u
if (inf is None or u >= inf) and (sup is None or v <= sup):
if not mobius:
return u, v
else:
return f, M
elif (sup is not None and u > sup) or (inf is not None and v < inf):
return None
else:
f, M = dup_step_refine_real_root(f, M, K, fast=fast)
def dup_inner_isolate_positive_roots(f, K, eps=None, inf=None, sup=None, fast=False, mobius=False):
"""Iteratively compute disjoint positive root isolation intervals. """
if sup is not None and sup < 0:
return []
roots = dup_inner_isolate_real_roots(f, K, eps=eps, fast=fast)
F, results = K.get_field(), []
if inf is not None or sup is not None:
for f, M in roots:
result = _discard_if_outside_interval(f, M, inf, sup, K, False, fast, mobius)
if result is not None:
results.append(result)
elif not mobius:
for f, M in roots:
u, v = _mobius_to_interval(M, F)
results.append((u, v))
else:
results = roots
return results
def dup_inner_isolate_negative_roots(f, K, inf=None, sup=None, eps=None, fast=False, mobius=False):
"""Iteratively compute disjoint negative root isolation intervals. """
if inf is not None and inf >= 0:
return []
roots = dup_inner_isolate_real_roots(dup_mirror(f, K), K, eps=eps, fast=fast)
F, results = K.get_field(), []
if inf is not None or sup is not None:
for f, M in roots:
result = _discard_if_outside_interval(f, M, inf, sup, K, True, fast, mobius)
if result is not None:
results.append(result)
elif not mobius:
for f, M in roots:
u, v = _mobius_to_interval(M, F)
results.append((-v, -u))
else:
results = roots
return results
def _isolate_zero(f, K, inf, sup, basis=False, sqf=False):
"""Handle special case of CF algorithm when ``f`` is homogeneous. """
j, f = dup_terms_gcd(f, K)
if j > 0:
F = K.get_field()
if (inf is None or inf <= 0) and (sup is None or 0 <= sup):
if not sqf:
if not basis:
return [((F.zero, F.zero), j)], f
else:
return [((F.zero, F.zero), j, [K.one, K.zero])], f
else:
return [(F.zero, F.zero)], f
return [], f
def dup_isolate_real_roots_sqf(f, K, eps=None, inf=None, sup=None, fast=False, blackbox=False):
"""Isolate real roots of a square-free polynomial using the Vincent-Akritas-Strzebonski (VAS) CF approach.
References:
===========
1. Alkiviadis G. Akritas and Adam W. Strzebonski: A Comparative Study of Two Real Root Isolation Methods.
Nonlinear Analysis: Modelling and Control, Vol. 10, No. 4, 297-304, 2005.
2. Alkiviadis G. Akritas, Adam W. Strzebonski and Panagiotis S. Vigklas: Improving the Performance
of the Continued Fractions Method Using New Bounds of Positive Roots.
Nonlinear Analysis: Modelling and Control, Vol. 13, No. 3, 265-279, 2008.
"""
if K.is_QQ:
(_, f), K = dup_clear_denoms(f, K, convert=True), K.get_ring()
elif not K.is_ZZ:
raise DomainError("isolation of real roots not supported over %s" % K)
if dup_degree(f) <= 0:
return []
I_zero, f = _isolate_zero(f, K, inf, sup, basis=False, sqf=True)
I_neg = dup_inner_isolate_negative_roots(f, K, eps=eps, inf=inf, sup=sup, fast=fast)
I_pos = dup_inner_isolate_positive_roots(f, K, eps=eps, inf=inf, sup=sup, fast=fast)
roots = sorted(I_neg + I_zero + I_pos)
if not blackbox:
return roots
else:
return [ RealInterval((a, b), f, K) for (a, b) in roots ]
def dup_isolate_real_roots(f, K, eps=None, inf=None, sup=None, basis=False, fast=False):
"""Isolate real roots using Vincent-Akritas-Strzebonski (VAS) continued fractions approach.
References:
===========
1. Alkiviadis G. Akritas and Adam W. Strzebonski: A Comparative Study of Two Real Root Isolation Methods.
Nonlinear Analysis: Modelling and Control, Vol. 10, No. 4, 297-304, 2005.
2. Alkiviadis G. Akritas, Adam W. Strzebonski and Panagiotis S. Vigklas: Improving the Performance
of the Continued Fractions Method Using New Bounds of Positive Roots.
Nonlinear Analysis: Modelling and Control, Vol. 13, No. 3, 265-279, 2008.
"""
if K.is_QQ:
(_, f), K = dup_clear_denoms(f, K, convert=True), K.get_ring()
elif not K.is_ZZ:
raise DomainError("isolation of real roots not supported over %s" % K)
if dup_degree(f) <= 0:
return []
I_zero, f = _isolate_zero(f, K, inf, sup, basis=basis, sqf=False)
_, factors = dup_sqf_list(f, K)
if len(factors) == 1:
((f, k),) = factors
I_neg = dup_inner_isolate_negative_roots(f, K, eps=eps, inf=inf, sup=sup, fast=fast)
I_pos = dup_inner_isolate_positive_roots(f, K, eps=eps, inf=inf, sup=sup, fast=fast)
I_neg = [ ((u, v), k) for u, v in I_neg ]
I_pos = [ ((u, v), k) for u, v in I_pos ]
else:
I_neg, I_pos = _real_isolate_and_disjoin(factors, K,
eps=eps, inf=inf, sup=sup, basis=basis, fast=fast)
return sorted(I_neg + I_zero + I_pos)
def dup_isolate_real_roots_list(polys, K, eps=None, inf=None, sup=None, strict=False, basis=False, fast=False):
"""Isolate real roots of a list of square-free polynomial using Vincent-Akritas-Strzebonski (VAS) CF approach.
References:
===========
1. Alkiviadis G. Akritas and Adam W. Strzebonski: A Comparative Study of Two Real Root Isolation Methods.
Nonlinear Analysis: Modelling and Control, Vol. 10, No. 4, 297-304, 2005.
2. Alkiviadis G. Akritas, Adam W. Strzebonski and Panagiotis S. Vigklas: Improving the Performance
of the Continued Fractions Method Using New Bounds of Positive Roots.
Nonlinear Analysis: Modelling and Control, Vol. 13, No. 3, 265-279, 2008.
"""
if K.is_QQ:
K, F, polys = K.get_ring(), K, polys[:]
for i, p in enumerate(polys):
polys[i] = dup_clear_denoms(p, F, K, convert=True)[1]
elif not K.is_ZZ:
raise DomainError("isolation of real roots not supported over %s" % K)
zeros, factors_dict = False, {}
if (inf is None or inf <= 0) and (sup is None or 0 <= sup):
zeros, zero_indices = True, {}
for i, p in enumerate(polys):
j, p = dup_terms_gcd(p, K)
if zeros and j > 0:
zero_indices[i] = j
for f, k in dup_factor_list(p, K)[1]:
f = tuple(f)
if f not in factors_dict:
factors_dict[f] = {i: k}
else:
factors_dict[f][i] = k
factors_list = []
for f, indices in factors_dict.items():
factors_list.append((list(f), indices))
I_neg, I_pos = _real_isolate_and_disjoin(factors_list, K, eps=eps,
inf=inf, sup=sup, strict=strict, basis=basis, fast=fast)
F = K.get_field()
if not zeros or not zero_indices:
I_zero = []
else:
if not basis:
I_zero = [((F.zero, F.zero), zero_indices)]
else:
I_zero = [((F.zero, F.zero), zero_indices, [K.one, K.zero])]
return sorted(I_neg + I_zero + I_pos)
def _disjoint_p(M, N, strict=False):
"""Check if Mobius transforms define disjoint intervals. """
a1, b1, c1, d1 = M
a2, b2, c2, d2 = N
a1d1, b1c1 = a1*d1, b1*c1
a2d2, b2c2 = a2*d2, b2*c2
if a1d1 == b1c1 and a2d2 == b2c2:
return True
if a1d1 > b1c1:
a1, c1, b1, d1 = b1, d1, a1, c1
if a2d2 > b2c2:
a2, c2, b2, d2 = b2, d2, a2, c2
if not strict:
return a2*d1 >= c2*b1 or b2*c1 <= d2*a1
else:
return a2*d1 > c2*b1 or b2*c1 < d2*a1
def _real_isolate_and_disjoin(factors, K, eps=None, inf=None, sup=None, strict=False, basis=False, fast=False):
"""Isolate real roots of a list of polynomials and disjoin intervals. """
I_pos, I_neg = [], []
for i, (f, k) in enumerate(factors):
for F, M in dup_inner_isolate_positive_roots(f, K, eps=eps, inf=inf, sup=sup, fast=fast, mobius=True):
I_pos.append((F, M, k, f))
for G, N in dup_inner_isolate_negative_roots(f, K, eps=eps, inf=inf, sup=sup, fast=fast, mobius=True):
I_neg.append((G, N, k, f))
for i, (f, M, k, F) in enumerate(I_pos):
for j, (g, N, m, G) in enumerate(I_pos[i + 1:]):
while not _disjoint_p(M, N, strict=strict):
f, M = dup_inner_refine_real_root(f, M, K, steps=1, fast=fast, mobius=True)
g, N = dup_inner_refine_real_root(g, N, K, steps=1, fast=fast, mobius=True)
I_pos[i + j + 1] = (g, N, m, G)
I_pos[i] = (f, M, k, F)
for i, (f, M, k, F) in enumerate(I_neg):
for j, (g, N, m, G) in enumerate(I_neg[i + 1:]):
while not _disjoint_p(M, N, strict=strict):
f, M = dup_inner_refine_real_root(f, M, K, steps=1, fast=fast, mobius=True)
g, N = dup_inner_refine_real_root(g, N, K, steps=1, fast=fast, mobius=True)
I_neg[i + j + 1] = (g, N, m, G)
I_neg[i] = (f, M, k, F)
if strict:
for i, (f, M, k, F) in enumerate(I_neg):
if not M[0]:
while not M[0]:
f, M = dup_inner_refine_real_root(f, M, K, steps=1, fast=fast, mobius=True)
I_neg[i] = (f, M, k, F)
break
for j, (g, N, m, G) in enumerate(I_pos):
if not N[0]:
while not N[0]:
g, N = dup_inner_refine_real_root(g, N, K, steps=1, fast=fast, mobius=True)
I_pos[j] = (g, N, m, G)
break
field = K.get_field()
I_neg = [ (_mobius_to_interval(M, field), k, f) for (_, M, k, f) in I_neg ]
I_pos = [ (_mobius_to_interval(M, field), k, f) for (_, M, k, f) in I_pos ]
if not basis:
I_neg = [ ((-v, -u), k) for ((u, v), k, _) in I_neg ]
I_pos = [ (( u, v), k) for ((u, v), k, _) in I_pos ]
else:
I_neg = [ ((-v, -u), k, f) for ((u, v), k, f) in I_neg ]
I_pos = [ (( u, v), k, f) for ((u, v), k, f) in I_pos ]
return I_neg, I_pos
def dup_count_real_roots(f, K, inf=None, sup=None):
"""Returns the number of distinct real roots of ``f`` in ``[inf, sup]``. """
if dup_degree(f) <= 0:
return 0
if not K.has_Field:
R, K = K, K.get_field()
f = dup_convert(f, R, K)
sturm = dup_sturm(f, K)
if inf is None:
signs_inf = dup_sign_variations([ dup_LC(s, K)*(-1)**dup_degree(s) for s in sturm ], K)
else:
signs_inf = dup_sign_variations([ dup_eval(s, inf, K) for s in sturm ], K)
if sup is None:
signs_sup = dup_sign_variations([ dup_LC(s, K) for s in sturm ], K)
else:
signs_sup = dup_sign_variations([ dup_eval(s, sup, K) for s in sturm ], K)
count = abs(signs_inf - signs_sup)
if inf is not None and not dup_eval(f, inf, K):
count += 1
return count
OO = 'OO' # Origin of (re, im) coordinate system
Q1 = 'Q1' # Quadrant #1 (++): re > 0 and im > 0
Q2 = 'Q2' # Quadrant #2 (-+): re < 0 and im > 0
Q3 = 'Q3' # Quadrant #3 (--): re < 0 and im < 0
Q4 = 'Q4' # Quadrant #4 (+-): re > 0 and im < 0
A1 = 'A1' # Axis #1 (+0): re > 0 and im = 0
A2 = 'A2' # Axis #2 (0+): re = 0 and im > 0
A3 = 'A3' # Axis #3 (-0): re < 0 and im = 0
A4 = 'A4' # Axis #4 (0-): re = 0 and im < 0
_rules_simple = {
# Q --> Q (same) => no change
(Q1, Q1): 0,
(Q2, Q2): 0,
(Q3, Q3): 0,
(Q4, Q4): 0,
# A -- CCW --> Q => +1/4 (CCW)
(A1, Q1): 1,
(A2, Q2): 1,
(A3, Q3): 1,
(A4, Q4): 1,
# A -- CW --> Q => -1/4 (CCW)
(A1, Q4): 2,
(A2, Q1): 2,
(A3, Q2): 2,
(A4, Q3): 2,
# Q -- CCW --> A => +1/4 (CCW)
(Q1, A2): 3,
(Q2, A3): 3,
(Q3, A4): 3,
(Q4, A1): 3,
# Q -- CW --> A => -1/4 (CCW)
(Q1, A1): 4,
(Q2, A2): 4,
(Q3, A3): 4,
(Q4, A4): 4,
# Q -- CCW --> Q => +1/2 (CCW)
(Q1, Q2): +5,
(Q2, Q3): +5,
(Q3, Q4): +5,
(Q4, Q1): +5,
# Q -- CW --> Q => -1/2 (CW)
(Q1, Q4): -5,
(Q2, Q1): -5,
(Q3, Q2): -5,
(Q4, Q3): -5,
}
_rules_ambiguous = {
# A -- CCW --> Q => { +1/4 (CCW), -9/4 (CW) }
(A1, OO, Q1): -1,
(A2, OO, Q2): -1,
(A3, OO, Q3): -1,
(A4, OO, Q4): -1,
# A -- CW --> Q => { -1/4 (CCW), +7/4 (CW) }
(A1, OO, Q4): -2,
(A2, OO, Q1): -2,
(A3, OO, Q2): -2,
(A4, OO, Q3): -2,
# Q -- CCW --> A => { +1/4 (CCW), -9/4 (CW) }
(Q1, OO, A2): -3,
(Q2, OO, A3): -3,
(Q3, OO, A4): -3,
(Q4, OO, A1): -3,
# Q -- CW --> A => { -1/4 (CCW), +7/4 (CW) }
(Q1, OO, A1): -4,
(Q2, OO, A2): -4,
(Q3, OO, A3): -4,
(Q4, OO, A4): -4,
# A -- OO --> A => { +1 (CCW), -1 (CW) }
(A1, A3): 7,
(A2, A4): 7,
(A3, A1): 7,
(A4, A2): 7,
(A1, OO, A3): 7,
(A2, OO, A4): 7,
(A3, OO, A1): 7,
(A4, OO, A2): 7,
# Q -- DIA --> Q => { +1 (CCW), -1 (CW) }
(Q1, Q3): 8,
(Q2, Q4): 8,
(Q3, Q1): 8,
(Q4, Q2): 8,
(Q1, OO, Q3): 8,
(Q2, OO, Q4): 8,
(Q3, OO, Q1): 8,
(Q4, OO, Q2): 8,
# A --- R ---> A => { +1/2 (CCW), -3/2 (CW) }
(A1, A2): 9,
(A2, A3): 9,
(A3, A4): 9,
(A4, A1): 9,
(A1, OO, A2): 9,
(A2, OO, A3): 9,
(A3, OO, A4): 9,
(A4, OO, A1): 9,
# A --- L ---> A => { +3/2 (CCW), -1/2 (CW) }
(A1, A4): 10,
(A2, A1): 10,
(A3, A2): 10,
(A4, A3): 10,
(A1, OO, A4): 10,
(A2, OO, A1): 10,
(A3, OO, A2): 10,
(A4, OO, A3): 10,
# Q --- 1 ---> A => { +3/4 (CCW), -5/4 (CW) }
(Q1, A3): 11,
(Q2, A4): 11,
(Q3, A1): 11,
(Q4, A2): 11,
(Q1, OO, A3): 11,
(Q2, OO, A4): 11,
(Q3, OO, A1): 11,
(Q4, OO, A2): 11,
# Q --- 2 ---> A => { +5/4 (CCW), -3/4 (CW) }
(Q1, A4): 12,
(Q2, A1): 12,
(Q3, A2): 12,
(Q4, A3): 12,
(Q1, OO, A4): 12,
(Q2, OO, A1): 12,
(Q3, OO, A2): 12,
(Q4, OO, A3): 12,
# A --- 1 ---> Q => { +5/4 (CCW), -3/4 (CW) }
(A1, Q3): 13,
(A2, Q4): 13,
(A3, Q1): 13,
(A4, Q2): 13,
(A1, OO, Q3): 13,
(A2, OO, Q4): 13,
(A3, OO, Q1): 13,
(A4, OO, Q2): 13,
# A --- 2 ---> Q => { +3/4 (CCW), -5/4 (CW) }
(A1, Q2): 14,
(A2, Q3): 14,
(A3, Q4): 14,
(A4, Q1): 14,
(A1, OO, Q2): 14,
(A2, OO, Q3): 14,
(A3, OO, Q4): 14,
(A4, OO, Q1): 14,
# Q --> OO --> Q => { +1/2 (CCW), -3/2 (CW) }
(Q1, OO, Q2): 15,
(Q2, OO, Q3): 15,
(Q3, OO, Q4): 15,
(Q4, OO, Q1): 15,
# Q --> OO --> Q => { +3/2 (CCW), -1/2 (CW) }
(Q1, OO, Q4): 16,
(Q2, OO, Q1): 16,
(Q3, OO, Q2): 16,
(Q4, OO, Q3): 16,
# A --> OO --> A => { +2 (CCW), 0 (CW) }
(A1, OO, A1): 17,
(A2, OO, A2): 17,
(A3, OO, A3): 17,
(A4, OO, A4): 17,
# Q --> OO --> Q => { +2 (CCW), 0 (CW) }
(Q1, OO, Q1): 18,
(Q2, OO, Q2): 18,
(Q3, OO, Q3): 18,
(Q4, OO, Q4): 18,
}
_values = {
0: [( 0, 1)],
1: [(+1, 4)],
2: [(-1, 4)],
3: [(+1, 4)],
4: [(-1, 4)],
-1: [(+9, 4), (+1, 4)],
-2: [(+7, 4), (-1, 4)],
-3: [(+9, 4), (+1, 4)],
-4: [(+7, 4), (-1, 4)],
+5: [(+1, 2)],
-5: [(-1, 2)],
7: [(+1, 1), (-1, 1)],
8: [(+1, 1), (-1, 1)],
9: [(+1, 2), (-3, 2)],
10: [(+3, 2), (-1, 2)],
11: [(+3, 4), (-5, 4)],
12: [(+5, 4), (-3, 4)],
13: [(+5, 4), (-3, 4)],
14: [(+3, 4), (-5, 4)],
15: [(+1, 2), (-3, 2)],
16: [(+3, 2), (-1, 2)],
17: [(+2, 1), ( 0, 1)],
18: [(+2, 1), ( 0, 1)],
}
def _classify_point(re, im):
"""Return the half-axis (or origin) on which (re, im) point is located. """
if not re and not im:
return OO
if not re:
if im > 0:
return A2
else:
return A4
elif not im:
if re > 0:
return A1
else:
return A3
def _intervals_to_quadrants(intervals, f1, f2, s, t, F):
"""Generate a sequence of extended quadrants from a list of critical points. """
if not intervals:
return []
Q = []
if not f1:
(a, b), _, _ = intervals[0]
if a == b == s:
if len(intervals) == 1:
if dup_eval(f2, t, F) > 0:
return [OO, A2]
else:
return [OO, A4]
else:
(a, _), _, _ = intervals[1]
if dup_eval(f2, (s + a)/2, F) > 0:
Q.extend([OO, A2])
f2_sgn = +1
else:
Q.extend([OO, A4])
f2_sgn = -1
intervals = intervals[1:]
else:
if dup_eval(f2, s, F) > 0:
Q.append(A2)
f2_sgn = +1
else:
Q.append(A4)
f2_sgn = -1
for (a, _), indices, _ in intervals:
Q.append(OO)
if indices[1] % 2 == 1:
f2_sgn = -f2_sgn
if a != t:
if f2_sgn > 0:
Q.append(A2)
else:
Q.append(A4)
return Q
if not f2:
(a, b), _, _ = intervals[0]
if a == b == s:
if len(intervals) == 1:
if dup_eval(f1, t, F) > 0:
return [OO, A1]
else:
return [OO, A3]
else:
(a, _), _, _ = intervals[1]
if dup_eval(f1, (s + a)/2, F) > 0:
Q.extend([OO, A1])
f1_sgn = +1
else:
Q.extend([OO, A3])
f1_sgn = -1
intervals = intervals[1:]
else:
if dup_eval(f1, s, F) > 0:
Q.append(A1)
f1_sgn = +1
else:
Q.append(A3)
f1_sgn = -1
for (a, _), indices, _ in intervals:
Q.append(OO)
if indices[0] % 2 == 1:
f1_sgn = -f1_sgn
if a != t:
if f1_sgn > 0:
Q.append(A1)
else:
Q.append(A3)
return Q
re = dup_eval(f1, s, F)
im = dup_eval(f2, s, F)
if not re or not im:
Q.append(_classify_point(re, im))
if len(intervals) == 1:
re = dup_eval(f1, t, F)
im = dup_eval(f2, t, F)
else:
(a, _), _, _ = intervals[1]
re = dup_eval(f1, (s + a)/2, F)
im = dup_eval(f2, (s + a)/2, F)
intervals = intervals[1:]
if re > 0:
f1_sgn = +1
else:
f1_sgn = -1
if im > 0:
f2_sgn = +1
else:
f2_sgn = -1
sgn = {
(+1, +1): Q1,
(-1, +1): Q2,
(-1, -1): Q3,
(+1, -1): Q4,
}
Q.append(sgn[(f1_sgn, f2_sgn)])
for (a, b), indices, _ in intervals:
if a == b:
re = dup_eval(f1, a, F)
im = dup_eval(f2, a, F)
cls = _classify_point(re, im)
if cls is not None:
Q.append(cls)
if 0 in indices:
if indices[0] % 2 == 1:
f1_sgn = -f1_sgn
if 1 in indices:
if indices[1] % 2 == 1:
f2_sgn = -f2_sgn
if not (a == b and b == t):
Q.append(sgn[(f1_sgn, f2_sgn)])
return Q
def _traverse_quadrants(Q_L1, Q_L2, Q_L3, Q_L4, exclude=None):
"""Transform sequences of quadrants to a sequence of rules. """
if exclude is True:
edges = [1, 1, 0, 0]
corners = {
(0, 1): 1,
(1, 2): 1,
(2, 3): 0,
(3, 0): 1,
}
else:
edges = [0, 0, 0, 0]
corners = {
(0, 1): 0,
(1, 2): 0,
(2, 3): 0,
(3, 0): 0,
}
if exclude is not None and exclude is not True:
exclude = set(exclude)
for i, edge in enumerate(['S', 'E', 'N', 'W']):
if edge in exclude:
edges[i] = 1
for i, corner in enumerate(['SW', 'SE', 'NE', 'NW']):
if corner in exclude:
corners[((i - 1) % 4, i)] = 1
QQ, rules = [Q_L1, Q_L2, Q_L3, Q_L4], []
for i, Q in enumerate(QQ):
if not Q:
continue
if Q[-1] == OO:
Q = Q[:-1]
if Q[0] == OO:
j, Q = (i - 1) % 4, Q[1:]
qq = (QQ[j][-2], OO, Q[0])
if qq in _rules_ambiguous:
rules.append((_rules_ambiguous[qq], corners[(j, i)]))
else:
raise NotImplementedError("3 element rule (corner): " + str(qq))
q1, k = Q[0], 1
while k < len(Q):
q2, k = Q[k], k + 1
if q2 != OO:
qq = (q1, q2)
if qq in _rules_simple:
rules.append((_rules_simple[qq], 0))
elif qq in _rules_ambiguous:
rules.append((_rules_ambiguous[qq], edges[i]))
else:
raise NotImplementedError("2 element rule (inside): " + str(qq))
else:
qq, k = (q1, q2, Q[k]), k + 1
if qq in _rules_ambiguous:
rules.append((_rules_ambiguous[qq], edges[i]))
else:
raise NotImplementedError("3 element rule (edge): " + str(qq))
q1 = qq[-1]
return rules
def _reverse_intervals(intervals):
"""Reverse intervals for traversal from right to left and from top to bottom. """
return [ ((b, a), indices, f) for (a, b), indices, f in reversed(intervals) ]
def _winding_number(T, field):
"""Compute the winding number of the input polynomial, i.e. the number of roots. """
return int(sum([ field(*_values[t][i]) for t, i in T ]) / field(2))
def dup_count_complex_roots(f, K, inf=None, sup=None, exclude=None):
"""Count all roots in [u + v*I, s + t*I] rectangle using Collins-Krandick algorithm. """
if not K.is_ZZ and not K.is_QQ:
raise DomainError("complex root counting is not supported over %s" % K)
if K.is_ZZ:
R, F = K, K.get_field()
else:
R, F = K.get_ring(), K
f = dup_convert(f, K, F)
if inf is None or sup is None:
n, lc = dup_degree(f), abs(dup_LC(f, F))
B = 2*max([ F.quo(abs(c), lc) for c in f ])
if inf is None:
(u, v) = (-B, -B)
else:
(u, v) = inf
if sup is None:
(s, t) = (+B, +B)
else:
(s, t) = sup
f1, f2 = dup_real_imag(f, F)
f1L1F = dmp_eval_in(f1, v, 1, 1, F)
f2L1F = dmp_eval_in(f2, v, 1, 1, F)
_, f1L1R = dup_clear_denoms(f1L1F, F, R, convert=True)
_, f2L1R = dup_clear_denoms(f2L1F, F, R, convert=True)
f1L2F = dmp_eval_in(f1, s, 0, 1, F)
f2L2F = dmp_eval_in(f2, s, 0, 1, F)
_, f1L2R = dup_clear_denoms(f1L2F, F, R, convert=True)
_, f2L2R = dup_clear_denoms(f2L2F, F, R, convert=True)
f1L3F = dmp_eval_in(f1, t, 1, 1, F)
f2L3F = dmp_eval_in(f2, t, 1, 1, F)
_, f1L3R = dup_clear_denoms(f1L3F, F, R, convert=True)
_, f2L3R = dup_clear_denoms(f2L3F, F, R, convert=True)
f1L4F = dmp_eval_in(f1, u, 0, 1, F)
f2L4F = dmp_eval_in(f2, u, 0, 1, F)
_, f1L4R = dup_clear_denoms(f1L4F, F, R, convert=True)
_, f2L4R = dup_clear_denoms(f2L4F, F, R, convert=True)
S_L1 = [f1L1R, f2L1R]
S_L2 = [f1L2R, f2L2R]
S_L3 = [f1L3R, f2L3R]
S_L4 = [f1L4R, f2L4R]
I_L1 = dup_isolate_real_roots_list(S_L1, R, inf=u, sup=s, fast=True, basis=True, strict=True)
I_L2 = dup_isolate_real_roots_list(S_L2, R, inf=v, sup=t, fast=True, basis=True, strict=True)
I_L3 = dup_isolate_real_roots_list(S_L3, R, inf=u, sup=s, fast=True, basis=True, strict=True)
I_L4 = dup_isolate_real_roots_list(S_L4, R, inf=v, sup=t, fast=True, basis=True, strict=True)
I_L3 = _reverse_intervals(I_L3)
I_L4 = _reverse_intervals(I_L4)
Q_L1 = _intervals_to_quadrants(I_L1, f1L1F, f2L1F, u, s, F)
Q_L2 = _intervals_to_quadrants(I_L2, f1L2F, f2L2F, v, t, F)
Q_L3 = _intervals_to_quadrants(I_L3, f1L3F, f2L3F, s, u, F)
Q_L4 = _intervals_to_quadrants(I_L4, f1L4F, f2L4F, t, v, F)
T = _traverse_quadrants(Q_L1, Q_L2, Q_L3, Q_L4, exclude=exclude)
return _winding_number(T, F)
def _vertical_bisection(N, a, b, I, Q, F1, F2, f1, f2, F):
"""Vertical bisection step in Collins-Krandick root isolation algorithm. """
(u, v), (s, t) = a, b
I_L1, I_L2, I_L3, I_L4 = I
Q_L1, Q_L2, Q_L3, Q_L4 = Q
f1L1F, f1L2F, f1L3F, f1L4F = F1
f2L1F, f2L2F, f2L3F, f2L4F = F2
x = (u + s) / 2
f1V = dmp_eval_in(f1, x, 0, 1, F)
f2V = dmp_eval_in(f2, x, 0, 1, F)
I_V = dup_isolate_real_roots_list([f1V, f2V], F, inf=v, sup=t, fast=True, strict=True, basis=True)
I_L1_L, I_L1_R = [], []
I_L2_L, I_L2_R = I_V, I_L2
I_L3_L, I_L3_R = [], []
I_L4_L, I_L4_R = I_L4, _reverse_intervals(I_V)
for I in I_L1:
(a, b), indices, h = I
if a == b:
if a == x:
I_L1_L.append(I)
I_L1_R.append(I)
elif a < x:
I_L1_L.append(I)
else:
I_L1_R.append(I)
else:
if b <= x:
I_L1_L.append(I)
elif a >= x:
I_L1_R.append(I)
else:
a, b = dup_refine_real_root(h, a, b, F.get_ring(), disjoint=x, fast=True)
if b <= x:
I_L1_L.append(((a, b), indices, h))
if a >= x:
I_L1_R.append(((a, b), indices, h))
for I in I_L3:
(b, a), indices, h = I
if a == b:
if a == x:
I_L3_L.append(I)
I_L3_R.append(I)
elif a < x:
I_L3_L.append(I)
else:
I_L3_R.append(I)
else:
if b <= x:
I_L3_L.append(I)
elif a >= x:
I_L3_R.append(I)
else:
a, b = dup_refine_real_root(h, a, b, F.get_ring(), disjoint=x, fast=True)
if b <= x:
I_L3_L.append(((b, a), indices, h))
if a >= x:
I_L3_R.append(((b, a), indices, h))
Q_L1_L = _intervals_to_quadrants(I_L1_L, f1L1F, f2L1F, u, x, F)
Q_L2_L = _intervals_to_quadrants(I_L2_L, f1V, f2V, v, t, F)
Q_L3_L = _intervals_to_quadrants(I_L3_L, f1L3F, f2L3F, x, u, F)
Q_L4_L = Q_L4
Q_L1_R = _intervals_to_quadrants(I_L1_R, f1L1F, f2L1F, x, s, F)
Q_L2_R = Q_L2
Q_L3_R = _intervals_to_quadrants(I_L3_R, f1L3F, f2L3F, s, x, F)
Q_L4_R = _intervals_to_quadrants(I_L4_R, f1V, f2V, t, v, F)
T_L = _traverse_quadrants(Q_L1_L, Q_L2_L, Q_L3_L, Q_L4_L, exclude=True)
T_R = _traverse_quadrants(Q_L1_R, Q_L2_R, Q_L3_R, Q_L4_R, exclude=True)
N_L = _winding_number(T_L, F)
N_R = _winding_number(T_R, F)
I_L = (I_L1_L, I_L2_L, I_L3_L, I_L4_L)
Q_L = (Q_L1_L, Q_L2_L, Q_L3_L, Q_L4_L)
I_R = (I_L1_R, I_L2_R, I_L3_R, I_L4_R)
Q_R = (Q_L1_R, Q_L2_R, Q_L3_R, Q_L4_R)
F1_L = (f1L1F, f1V, f1L3F, f1L4F)
F2_L = (f2L1F, f2V, f2L3F, f2L4F)
F1_R = (f1L1F, f1L2F, f1L3F, f1V)
F2_R = (f2L1F, f2L2F, f2L3F, f2V)
a, b = (u, v), (x, t)
c, d = (x, v), (s, t)
D_L = (N_L, a, b, I_L, Q_L, F1_L, F2_L)
D_R = (N_R, c, d, I_R, Q_R, F1_R, F2_R)
return D_L, D_R
def _horizontal_bisection(N, a, b, I, Q, F1, F2, f1, f2, F):
"""Horizontal bisection step in Collins-Krandick root isolation algorithm. """
(u, v), (s, t) = a, b
I_L1, I_L2, I_L3, I_L4 = I
Q_L1, Q_L2, Q_L3, Q_L4 = Q
f1L1F, f1L2F, f1L3F, f1L4F = F1
f2L1F, f2L2F, f2L3F, f2L4F = F2
y = (v + t) / 2
f1H = dmp_eval_in(f1, y, 1, 1, F)
f2H = dmp_eval_in(f2, y, 1, 1, F)
I_H = dup_isolate_real_roots_list([f1H, f2H], F, inf=u, sup=s, fast=True, strict=True, basis=True)
I_L1_B, I_L1_U = I_L1, I_H
I_L2_B, I_L2_U = [], []
I_L3_B, I_L3_U = _reverse_intervals(I_H), I_L3
I_L4_B, I_L4_U = [], []
for I in I_L2:
(a, b), indices, h = I
if a == b:
if a == y:
I_L2_B.append(I)
I_L2_U.append(I)
elif a < y:
I_L2_B.append(I)
else:
I_L2_U.append(I)
else:
if b <= y:
I_L2_B.append(I)
elif a >= y:
I_L2_U.append(I)
else:
a, b = dup_refine_real_root(h, a, b, F.get_ring(), disjoint=y, fast=True)
if b <= y:
I_L2_B.append(((a, b), indices, h))
if a >= y:
I_L2_U.append(((a, b), indices, h))
for I in I_L4:
(b, a), indices, h = I
if a == b:
if a == y:
I_L4_B.append(I)
I_L4_U.append(I)
elif a < y:
I_L4_B.append(I)
else:
I_L4_U.append(I)
else:
if b <= y:
I_L4_B.append(I)
elif a >= y:
I_L4_U.append(I)
else:
a, b = dup_refine_real_root(h, a, b, F.get_ring(), disjoint=y, fast=True)
if b <= y:
I_L4_B.append(((b, a), indices, h))
if a >= y:
I_L4_U.append(((b, a), indices, h))
Q_L1_B = Q_L1
Q_L2_B = _intervals_to_quadrants(I_L2_B, f1L2F, f2L2F, v, y, F)
Q_L3_B = _intervals_to_quadrants(I_L3_B, f1H, f2H, s, u, F)
Q_L4_B = _intervals_to_quadrants(I_L4_B, f1L4F, f2L4F, y, v, F)
Q_L1_U = _intervals_to_quadrants(I_L1_U, f1H, f2H, u, s, F)
Q_L2_U = _intervals_to_quadrants(I_L2_U, f1L2F, f2L2F, y, t, F)
Q_L3_U = Q_L3
Q_L4_U = _intervals_to_quadrants(I_L4_U, f1L4F, f2L4F, t, y, F)
T_B = _traverse_quadrants(Q_L1_B, Q_L2_B, Q_L3_B, Q_L4_B, exclude=True)
T_U = _traverse_quadrants(Q_L1_U, Q_L2_U, Q_L3_U, Q_L4_U, exclude=True)
N_B = _winding_number(T_B, F)
N_U = _winding_number(T_U, F)
I_B = (I_L1_B, I_L2_B, I_L3_B, I_L4_B)
Q_B = (Q_L1_B, Q_L2_B, Q_L3_B, Q_L4_B)
I_U = (I_L1_U, I_L2_U, I_L3_U, I_L4_U)
Q_U = (Q_L1_U, Q_L2_U, Q_L3_U, Q_L4_U)
F1_B = (f1L1F, f1L2F, f1H, f1L4F)
F2_B = (f2L1F, f2L2F, f2H, f2L4F)
F1_U = (f1H, f1L2F, f1L3F, f1L4F)
F2_U = (f2H, f2L2F, f2L3F, f2L4F)
a, b = (u, v), (s, y)
c, d = (u, y), (s, t)
D_B = (N_B, a, b, I_B, Q_B, F1_B, F2_B)
D_U = (N_U, c, d, I_U, Q_U, F1_U, F2_U)
return D_B, D_U
def _depth_first_select(rectangles):
"""Find a rectangle of minimum area for bisection. """
min_area, j = None, None
for i, (_, (u, v), (s, t), _, _, _, _) in enumerate(rectangles):
area = (s - u)*(t - v)
if min_area is None or area < min_area:
min_area, j = area, i
return rectangles.pop(j)
def _rectangle_small_p(a, b, eps):
"""Return ``True`` if the given rectangle is small enough. """
(u, v), (s, t) = a, b
if eps is not None:
return s - u < eps and t - v < eps
else:
return True
def dup_isolate_complex_roots_sqf(f, K, eps=None, inf=None, sup=None, blackbox=False):
"""Isolate complex roots of a square-free polynomial using Collins-Krandick algorithm. """
if not K.is_ZZ and not K.is_QQ:
raise DomainError("isolation of complex roots is not supported over %s" % K)
if dup_degree(f) <= 0:
return []
if K.is_ZZ:
R, F = K, K.get_field()
else:
R, F = K.get_ring(), K
f = dup_convert(f, K, F)
n, lc = dup_degree(f), abs(dup_LC(f, F))
B = 2*max([ F.quo(abs(c), lc) for c in f ])
(u, v), (s, t) = (-B, F.zero), (B, B)
if inf is not None:
u = inf
if sup is not None:
s = sup
if v < 0 or t <= v or s <= u:
raise ValueError("not a valid complex isolation rectangle")
f1, f2 = dup_real_imag(f, F)
f1L1 = dmp_eval_in(f1, v, 1, 1, F)
f2L1 = dmp_eval_in(f2, v, 1, 1, F)
f1L2 = dmp_eval_in(f1, s, 0, 1, F)
f2L2 = dmp_eval_in(f2, s, 0, 1, F)
f1L3 = dmp_eval_in(f1, t, 1, 1, F)
f2L3 = dmp_eval_in(f2, t, 1, 1, F)
f1L4 = dmp_eval_in(f1, u, 0, 1, F)
f2L4 = dmp_eval_in(f2, u, 0, 1, F)
S_L1 = [f1L1, f2L1]
S_L2 = [f1L2, f2L2]
S_L3 = [f1L3, f2L3]
S_L4 = [f1L4, f2L4]
I_L1 = dup_isolate_real_roots_list(S_L1, F, inf=u, sup=s, fast=True, strict=True, basis=True)
I_L2 = dup_isolate_real_roots_list(S_L2, F, inf=v, sup=t, fast=True, strict=True, basis=True)
I_L3 = dup_isolate_real_roots_list(S_L3, F, inf=u, sup=s, fast=True, strict=True, basis=True)
I_L4 = dup_isolate_real_roots_list(S_L4, F, inf=v, sup=t, fast=True, strict=True, basis=True)
I_L3 = _reverse_intervals(I_L3)
I_L4 = _reverse_intervals(I_L4)
Q_L1 = _intervals_to_quadrants(I_L1, f1L1, f2L1, u, s, F)
Q_L2 = _intervals_to_quadrants(I_L2, f1L2, f2L2, v, t, F)
Q_L3 = _intervals_to_quadrants(I_L3, f1L3, f2L3, s, u, F)
Q_L4 = _intervals_to_quadrants(I_L4, f1L4, f2L4, t, v, F)
T = _traverse_quadrants(Q_L1, Q_L2, Q_L3, Q_L4)
N = _winding_number(T, F)
if not N:
return []
I = (I_L1, I_L2, I_L3, I_L4)
Q = (Q_L1, Q_L2, Q_L3, Q_L4)
F1 = (f1L1, f1L2, f1L3, f1L4)
F2 = (f2L1, f2L2, f2L3, f2L4)
rectangles, roots = [(N, (u, v), (s, t), I, Q, F1, F2)], []
while rectangles:
N, (u, v), (s, t), I, Q, F1, F2 = _depth_first_select(rectangles)
if s - u > t - v:
D_L, D_R = _vertical_bisection(N, (u, v), (s, t), I, Q, F1, F2, f1, f2, F)
N_L, a, b, I_L, Q_L, F1_L, F2_L = D_L
N_R, c, d, I_R, Q_R, F1_R, F2_R = D_R
if N_L >= 1:
if N_L == 1 and _rectangle_small_p(a, b, eps):
roots.append(ComplexInterval(a, b, I_L, Q_L, F1_L, F2_L, f1, f2, F))
else:
rectangles.append(D_L)
if N_R >= 1:
if N_R == 1 and _rectangle_small_p(c, d, eps):
roots.append(ComplexInterval(c, d, I_R, Q_R, F1_R, F2_R, f1, f2, F))
else:
rectangles.append(D_R)
else:
D_B, D_U = _horizontal_bisection(N, (u, v), (s, t), I, Q, F1, F2, f1, f2, F)
N_B, a, b, I_B, Q_B, F1_B, F2_B = D_B
N_U, c, d, I_U, Q_U, F1_U, F2_U = D_U
if N_B >= 1:
if N_B == 1 and _rectangle_small_p(a, b, eps):
roots.append(ComplexInterval(
a, b, I_B, Q_B, F1_B, F2_B, f1, f2, F))
else:
rectangles.append(D_B)
if N_U >= 1:
if N_U == 1 and _rectangle_small_p(c, d, eps):
roots.append(ComplexInterval(
c, d, I_U, Q_U, F1_U, F2_U, f1, f2, F))
else:
rectangles.append(D_U)
_roots, roots = sorted(roots, key=lambda r: (r.ax, r.ay)), []
for root in _roots:
roots.extend([root.conjugate(), root])
if blackbox:
return roots
else:
return [ r.as_tuple() for r in roots ]
def dup_isolate_all_roots_sqf(f, K, eps=None, inf=None, sup=None, fast=False, blackbox=False):
"""Isolate real and complex roots of a square-free polynomial ``f``. """
return (
dup_isolate_real_roots_sqf( f, K, eps=eps, inf=inf, sup=sup, fast=fast, blackbox=blackbox),
dup_isolate_complex_roots_sqf(f, K, eps=eps, inf=inf, sup=sup, blackbox=blackbox))
def dup_isolate_all_roots(f, K, eps=None, inf=None, sup=None, fast=False):
"""Isolate real and complex roots of a non-square-free polynomial ``f``. """
if not K.is_ZZ and not K.is_QQ:
raise DomainError("isolation of real and complex roots is not supported over %s" % K)
_, factors = dup_sqf_list(f, K)
if len(factors) == 1:
((f, k),) = factors
real_part, complex_part = dup_isolate_all_roots_sqf(
f, K, eps=eps, inf=inf, sup=sup, fast=fast)
real_part = [ ((a, b), k) for (a, b) in real_part ]
complex_part = [ ((a, b), k) for (a, b) in complex_part ]
return real_part, complex_part
else:
raise NotImplementedError( "only trivial square-free polynomials are supported")
class RealInterval(object):
"""A fully qualified representation of a real isolation interval. """
def __init__(self, data, f, dom):
"""Initialize new real interval with complete information. """
if len(data) == 2:
s, t = data
self.neg = False
if s < 0:
if t <= 0:
f, s, t, self.neg = dup_mirror(f, dom), -t, -s, True
else:
raise ValueError("can't refine a real root in (%s, %s)" % (s, t))
a, b, c, d = _mobius_from_interval((s, t), dom.get_field())
f = dup_transform(f, dup_strip([a, b]),
dup_strip([c, d]), dom)
self.mobius = a, b, c, d
else:
self.mobius = data[:-1]
self.neg = data[-1]
self.f, self.dom = f, dom
@property
def a(self):
"""Return the position of the left end. """
field = self.dom.get_field()
a, b, c, d = self.mobius
if not self.neg:
if a*d < b*c:
return field(a, c)
return field(b, d)
else:
if a*d > b*c:
return -field(a, c)
return -field(b, d)
@property
def b(self):
"""Return the position of the right end. """
was = self.neg
self.neg = not was
rv = -self.a
self.neg = was
return rv
@property
def dx(self):
"""Return width of the real isolating interval. """
return self.b - self.a
@property
def center(self):
"""Return the center of the real isolating interval. """
return (self.a + self.b)/2
def as_tuple(self):
"""Return tuple representation of real isolating interval. """
return (self.a, self.b)
def __repr__(self):
return "(%s, %s)" % (self.a, self.b)
def is_disjoint(self, other):
"""Return ``True`` if two isolation intervals are disjoint. """
return (self.b <= other.a or other.b <= self.a)
def _inner_refine(self):
"""Internal one step real root refinement procedure. """
if self.mobius is None:
return self
f, mobius = dup_inner_refine_real_root(
self.f, self.mobius, self.dom, steps=1, mobius=True)
return RealInterval(mobius + (self.neg,), f, self.dom)
def refine_disjoint(self, other):
"""Refine an isolating interval until it is disjoint with another one. """
expr = self
while not expr.is_disjoint(other):
expr, other = expr._inner_refine(), other._inner_refine()
return expr, other
def refine_size(self, dx):
"""Refine an isolating interval until it is of sufficiently small size. """
expr = self
while not (expr.dx < dx):
expr = expr._inner_refine()
return expr
def refine_step(self, steps=1):
"""Perform several steps of real root refinement algorithm. """
expr = self
for _ in range(steps):
expr = expr._inner_refine()
return expr
def refine(self):
"""Perform one step of real root refinement algorithm. """
return self._inner_refine()
class ComplexInterval(object):
"""A fully qualified representation of a complex isolation interval.
The printed form is shown as (x1, y1) x (x2, y2): the southwest x northeast
coordinates of the interval's rectangle."""
def __init__(self, a, b, I, Q, F1, F2, f1, f2, dom, conj=False):
"""Initialize new complex interval with complete information. """
self.a, self.b = a, b # the southwest and northeast corner: (x1, y1), (x2, y2)
self.I, self.Q = I, Q
self.f1, self.F1 = f1, F1
self.f2, self.F2 = f2, F2
self.dom = dom
self.conj = conj
@property
def ax(self):
"""Return ``x`` coordinate of south-western corner. """
return self.a[0]
@property
def ay(self):
"""Return ``y`` coordinate of south-western corner. """
if not self.conj:
return self.a[1]
else:
return -self.b[1]
@property
def bx(self):
"""Return ``x`` coordinate of north-eastern corner. """
return self.b[0]
@property
def by(self):
"""Return ``y`` coordinate of north-eastern corner. """
if not self.conj:
return self.b[1]
else:
return -self.a[1]
@property
def dx(self):
"""Return width of the complex isolating interval. """
return self.b[0] - self.a[0]
@property
def dy(self):
"""Return height of the complex isolating interval. """
return self.b[1] - self.a[1]
@property
def center(self):
"""Return the center of the complex isolating interval. """
return ((self.ax + self.bx)/2, (self.ay + self.by)/2)
def as_tuple(self):
"""Return tuple representation of complex isolating interval. """
return ((self.ax, self.ay), (self.bx, self.by))
def __repr__(self):
return "(%s, %s) x (%s, %s)" % (self.ax, self.bx, self.ay, self.by)
def conjugate(self):
"""This complex interval really is located in lower half-plane. """
return ComplexInterval(self.a, self.b, self.I, self.Q,
self.F1, self.F2, self.f1, self.f2, self.dom, conj=True)
def is_disjoint(self, other):
"""Return ``True`` if two isolation intervals are disjoint. """
if self.conj != other.conj:
return True
re_distinct = (self.bx <= other.ax or other.bx <= self.ax)
if re_distinct:
return True
im_distinct = (self.by <= other.ay or other.by <= self.ay)
return im_distinct
def _inner_refine(self):
"""Internal one step complex root refinement procedure. """
(u, v), (s, t) = self.a, self.b
I, Q = self.I, self.Q
f1, F1 = self.f1, self.F1
f2, F2 = self.f2, self.F2
dom = self.dom
if s - u > t - v:
D_L, D_R = _vertical_bisection(1, (u, v), (s, t), I, Q, F1, F2, f1, f2, dom)
if D_L[0] == 1:
_, a, b, I, Q, F1, F2 = D_L
else:
_, a, b, I, Q, F1, F2 = D_R
else:
D_B, D_U = _horizontal_bisection(1, (u, v), (s, t), I, Q, F1, F2, f1, f2, dom)
if D_B[0] == 1:
_, a, b, I, Q, F1, F2 = D_B
else:
_, a, b, I, Q, F1, F2 = D_U
return ComplexInterval(a, b, I, Q, F1, F2, f1, f2, dom, self.conj)
def refine_disjoint(self, other):
"""Refine an isolating interval until it is disjoint with another one. """
expr = self
while not expr.is_disjoint(other):
expr, other = expr._inner_refine(), other._inner_refine()
return expr, other
def refine_size(self, dx, dy=None):
"""Refine an isolating interval until it is of sufficiently small size. """
if dy is None:
dy = dx
expr = self
while not (expr.dx < dx and expr.dy < dy):
expr = expr._inner_refine()
return expr
def refine_step(self, steps=1):
"""Perform several steps of complex root refinement algorithm. """
expr = self
for _ in range(steps):
expr = expr._inner_refine()
return expr
def refine(self):
"""Perform one step of complex root refinement algorithm. """
return self._inner_refine()
| bsd-3-clause |
nest/nest-simulator | examples/NESTServerClient/NESTServerClient.py | 17 | 2145 | # -*- coding: utf-8 -*-
#
# NESTServerClient.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
import requests
from werkzeug.exceptions import BadRequest
__all__ = [
'NESTServerClient',
]
def encode(response):
if response.ok:
return response.json()
elif response.status_code == 400:
raise BadRequest(response.text)
class NESTServerClient(object):
def __init__(self, host='localhost', port=5000):
self.url = 'http://{}:{}/'.format(host, port)
self.headers = {'Content-type': 'application/json', 'Accept': 'text/plain'}
def __getattr__(self, call):
def method(*args, **kwargs):
kwargs.update({'args': args})
response = requests.post(self.url + 'api/' + call, json=kwargs, headers=self.headers)
return encode(response)
return method
def exec_script(self, source, return_vars=None):
params = {
'source': source,
'return': return_vars,
}
response = requests.post(self.url + 'exec', json=params, headers=self.headers)
return encode(response)
def from_file(self, filename, return_vars=None):
with open(filename, 'r') as f:
lines = f.readlines()
script = ''.join(lines)
print('Execute script code of {}'.format(filename))
print('Return variables: {}'.format(return_vars))
print(20*'-')
print(script)
print(20*'-')
return self.exec_script(script, return_vars)
| gpl-2.0 |
lungetech/luigi | test/scheduler_visualisation_test.py | 5 | 19278 | # -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import division
import os
import tempfile
import time
from helpers import unittest
import luigi
import luigi.notifications
import luigi.scheduler
import luigi.six as six
import luigi.worker
luigi.notifications.DEBUG = True
tempdir = tempfile.mkdtemp()
class DummyTask(luigi.Task):
task_id = luigi.Parameter()
def run(self):
f = self.output().open('w')
f.close()
def output(self):
return luigi.LocalTarget(os.path.join(tempdir, str(self)))
class FactorTask(luigi.Task):
product = luigi.Parameter()
def requires(self):
for factor in range(2, self.product):
if self.product % factor == 0:
yield FactorTask(factor)
yield FactorTask(self.product // factor)
return
def run(self):
f = self.output().open('w')
f.close()
def output(self):
return luigi.LocalTarget(os.path.join(tempdir, 'luigi_test_factor_%d' % self.product))
class BadReqTask(luigi.Task):
succeed = luigi.BoolParameter()
def requires(self):
assert self.succeed
yield BadReqTask(False)
def run(self):
pass
def complete(self):
return False
class FailingTask(luigi.Task):
task_namespace = __name__
task_id = luigi.Parameter()
def run(self):
raise Exception("Error Message")
class OddFibTask(luigi.Task):
n = luigi.IntParameter()
done = luigi.BoolParameter(default=True, significant=False)
def requires(self):
if self.n > 1:
yield OddFibTask(self.n - 1, self.done)
yield OddFibTask(self.n - 2, self.done)
def complete(self):
return self.n % 2 == 0 and self.done
def run(self):
assert False
class SchedulerVisualisationTest(unittest.TestCase):
def setUp(self):
self.scheduler = luigi.scheduler.CentralPlannerScheduler()
def tearDown(self):
pass
def _assert_complete(self, tasks):
for t in tasks:
self.assert_(t.complete())
def _build(self, tasks):
with luigi.worker.Worker(scheduler=self.scheduler, worker_processes=1) as w:
for t in tasks:
w.add(t)
w.run()
def _remote(self):
return self.scheduler
def _test_run(self, workers):
tasks = [DummyTask(i) for i in range(20)]
self._build(tasks, workers=workers)
self._assert_complete(tasks)
def test_graph(self):
start = time.time()
tasks = [DummyTask(task_id=1), DummyTask(task_id=2)]
self._build(tasks)
self._assert_complete(tasks)
end = time.time()
remote = self._remote()
graph = remote.graph()
self.assertEqual(len(graph), 2)
self.assert_(DummyTask(task_id=1).task_id in graph)
d1 = graph[DummyTask(task_id=1).task_id]
self.assertEqual(d1[u'status'], u'DONE')
self.assertEqual(d1[u'deps'], [])
self.assertGreaterEqual(d1[u'start_time'], start)
self.assertLessEqual(d1[u'start_time'], end)
d2 = graph[DummyTask(task_id=2).task_id]
self.assertEqual(d2[u'status'], u'DONE')
self.assertEqual(d2[u'deps'], [])
self.assertGreaterEqual(d2[u'start_time'], start)
self.assertLessEqual(d2[u'start_time'], end)
def test_large_graph_truncate(self):
class LinearTask(luigi.Task):
idx = luigi.IntParameter()
def requires(self):
if self.idx > 0:
yield LinearTask(self.idx - 1)
def complete(self):
return False
root_task = LinearTask(100)
self.scheduler = luigi.scheduler.CentralPlannerScheduler(max_graph_nodes=10)
self._build([root_task])
graph = self.scheduler.dep_graph(root_task.task_id)
self.assertEqual(10, len(graph))
expected_nodes = [LinearTask(i).task_id for i in range(100, 90, -1)]
six.assertCountEqual(self, expected_nodes, graph)
def test_large_inverse_graph_truncate(self):
class LinearTask(luigi.Task):
idx = luigi.IntParameter()
def requires(self):
if self.idx > 0:
yield LinearTask(self.idx - 1)
def complete(self):
return False
root_task = LinearTask(100)
self.scheduler = luigi.scheduler.CentralPlannerScheduler(max_graph_nodes=10)
self._build([root_task])
graph = self.scheduler.inverse_dep_graph(LinearTask(0).task_id)
self.assertEqual(10, len(graph))
expected_nodes = [LinearTask(i).task_id for i in range(10)]
six.assertCountEqual(self, expected_nodes, graph)
def test_truncate_graph_with_full_levels(self):
class BinaryTreeTask(luigi.Task):
idx = luigi.IntParameter()
def requires(self):
if self.idx < 100:
return map(BinaryTreeTask, (self.idx * 2, self.idx * 2 + 1))
root_task = BinaryTreeTask(1)
self.scheduler = luigi.scheduler.CentralPlannerScheduler(max_graph_nodes=10)
self._build([root_task])
graph = self.scheduler.dep_graph(root_task.task_id)
self.assertEqual(10, len(graph))
expected_nodes = [BinaryTreeTask(i).task_id for i in range(1, 11)]
six.assertCountEqual(self, expected_nodes, graph)
def test_truncate_graph_with_multiple_depths(self):
class LinearTask(luigi.Task):
idx = luigi.IntParameter()
def requires(self):
if self.idx > 0:
yield LinearTask(self.idx - 1)
yield LinearTask(0)
def complete(self):
return False
root_task = LinearTask(100)
self.scheduler = luigi.scheduler.CentralPlannerScheduler(max_graph_nodes=10)
self._build([root_task])
graph = self.scheduler.dep_graph(root_task.task_id)
self.assertEqual(10, len(graph))
expected_nodes = [LinearTask(i).task_id for i in range(100, 91, -1)] +\
[LinearTask(0).task_id]
self.maxDiff = None
six.assertCountEqual(self, expected_nodes, graph)
def _assert_all_done(self, tasks):
self._assert_all(tasks, u'DONE')
def _assert_all(self, tasks, status):
for task in tasks.values():
self.assertEqual(task[u'status'], status)
def test_dep_graph_single(self):
self._build([FactorTask(1)])
remote = self._remote()
dep_graph = remote.dep_graph(FactorTask(product=1).task_id)
self.assertEqual(len(dep_graph), 1)
self._assert_all_done(dep_graph)
d1 = dep_graph.get(FactorTask(product=1).task_id)
self.assertEqual(type(d1), type({}))
self.assertEqual(d1[u'deps'], [])
def test_dep_graph_not_found(self):
self._build([FactorTask(1)])
remote = self._remote()
dep_graph = remote.dep_graph(FactorTask(product=5).task_id)
self.assertEqual(len(dep_graph), 0)
def test_inverse_dep_graph_not_found(self):
self._build([FactorTask(1)])
remote = self._remote()
dep_graph = remote.inverse_dep_graph('FactorTask(product=5)')
self.assertEqual(len(dep_graph), 0)
def test_dep_graph_tree(self):
self._build([FactorTask(30)])
remote = self._remote()
dep_graph = remote.dep_graph(FactorTask(product=30).task_id)
self.assertEqual(len(dep_graph), 5)
self._assert_all_done(dep_graph)
d30 = dep_graph[FactorTask(product=30).task_id]
self.assertEqual(sorted(d30[u'deps']), sorted([FactorTask(product=15).task_id, FactorTask(product=2).task_id]))
d2 = dep_graph[FactorTask(product=2).task_id]
self.assertEqual(sorted(d2[u'deps']), [])
d15 = dep_graph[FactorTask(product=15).task_id]
self.assertEqual(sorted(d15[u'deps']), sorted([FactorTask(product=3).task_id, FactorTask(product=5).task_id]))
d3 = dep_graph[FactorTask(product=3).task_id]
self.assertEqual(sorted(d3[u'deps']), [])
d5 = dep_graph[FactorTask(product=5).task_id]
self.assertEqual(sorted(d5[u'deps']), [])
def test_dep_graph_missing_deps(self):
self._build([BadReqTask(True)])
dep_graph = self._remote().dep_graph(BadReqTask(succeed=True).task_id)
self.assertEqual(len(dep_graph), 2)
suc = dep_graph[BadReqTask(succeed=True).task_id]
self.assertEqual(suc[u'deps'], [BadReqTask(succeed=False).task_id])
fail = dep_graph[BadReqTask(succeed=False).task_id]
self.assertEqual(fail[u'name'], 'BadReqTask')
self.assertEqual(fail[u'params'], {'succeed': 'False'})
self.assertEqual(fail[u'status'], 'UNKNOWN')
def test_dep_graph_diamond(self):
self._build([FactorTask(12)])
remote = self._remote()
dep_graph = remote.dep_graph(FactorTask(product=12).task_id)
self.assertEqual(len(dep_graph), 4)
self._assert_all_done(dep_graph)
d12 = dep_graph[FactorTask(product=12).task_id]
self.assertEqual(sorted(d12[u'deps']), sorted([FactorTask(product=2).task_id, FactorTask(product=6).task_id]))
d6 = dep_graph[FactorTask(product=6).task_id]
self.assertEqual(sorted(d6[u'deps']), sorted([FactorTask(product=2).task_id, FactorTask(product=3).task_id]))
d3 = dep_graph[FactorTask(product=3).task_id]
self.assertEqual(sorted(d3[u'deps']), [])
d2 = dep_graph[FactorTask(product=2).task_id]
self.assertEqual(sorted(d2[u'deps']), [])
def test_dep_graph_skip_done(self):
task = OddFibTask(9)
self._build([task])
remote = self._remote()
task_id = task.task_id
self.assertEqual(9, len(remote.dep_graph(task_id, include_done=True)))
skip_done_graph = remote.dep_graph(task_id, include_done=False)
self.assertEqual(5, len(skip_done_graph))
for task in skip_done_graph.values():
self.assertNotEqual('DONE', task['status'])
self.assertLess(len(task['deps']), 2)
def test_inverse_dep_graph_skip_done(self):
self._build([OddFibTask(9, done=False)])
self._build([OddFibTask(9, done=True)])
remote = self._remote()
task_id = OddFibTask(1).task_id
self.assertEqual(9, len(remote.inverse_dep_graph(task_id, include_done=True)))
skip_done_graph = remote.inverse_dep_graph(task_id, include_done=False)
self.assertEqual(5, len(skip_done_graph))
for task in skip_done_graph.values():
self.assertNotEqual('DONE', task['status'])
self.assertLess(len(task['deps']), 2)
def test_task_list_single(self):
self._build([FactorTask(7)])
remote = self._remote()
tasks_done = remote.task_list('DONE', '')
self.assertEqual(len(tasks_done), 1)
self._assert_all_done(tasks_done)
t7 = tasks_done.get(FactorTask(product=7).task_id)
self.assertEqual(type(t7), type({}))
self.assertEqual(remote.task_list('', ''), tasks_done)
self.assertEqual(remote.task_list('FAILED', ''), {})
self.assertEqual(remote.task_list('PENDING', ''), {})
def test_dep_graph_root_has_display_name(self):
root_task = FactorTask(12)
self._build([root_task])
dep_graph = self._remote().dep_graph(root_task.task_id)
self.assertEqual('FactorTask(product=12)', dep_graph[root_task.task_id]['display_name'])
def test_dep_graph_non_root_nodes_lack_display_name(self):
root_task = FactorTask(12)
self._build([root_task])
dep_graph = self._remote().dep_graph(root_task.task_id)
for task_id, node in dep_graph.items():
if task_id != root_task.task_id:
self.assertNotIn('display_name', node)
def test_task_list_failed(self):
self._build([FailingTask(8)])
remote = self._remote()
failed = remote.task_list('FAILED', '')
self.assertEqual(len(failed), 1)
f8 = failed.get(FailingTask(task_id=8).task_id)
self.assertEqual(f8[u'status'], u'FAILED')
self.assertEqual(remote.task_list('DONE', ''), {})
self.assertEqual(remote.task_list('PENDING', ''), {})
def test_task_list_upstream_status(self):
class A(luigi.ExternalTask):
pass
class B(luigi.ExternalTask):
def complete(self):
return True
class C(luigi.Task):
def requires(self):
return [A(), B()]
class F(luigi.Task):
def run(self):
raise Exception()
class D(luigi.Task):
def requires(self):
return [F()]
class E(luigi.Task):
def requires(self):
return [C(), D()]
self._build([E()])
remote = self._remote()
done = remote.task_list('DONE', '')
self.assertEqual(len(done), 1)
db = done.get(B().task_id)
self.assertEqual(db['status'], 'DONE')
missing_input = remote.task_list('PENDING', 'UPSTREAM_MISSING_INPUT')
self.assertEqual(len(missing_input), 2)
pa = missing_input.get(A().task_id)
self.assertEqual(pa['status'], 'PENDING')
self.assertEqual(remote._upstream_status(A().task_id, {}), 'UPSTREAM_MISSING_INPUT')
pc = missing_input.get(C().task_id)
self.assertEqual(pc['status'], 'PENDING')
self.assertEqual(remote._upstream_status(C().task_id, {}), 'UPSTREAM_MISSING_INPUT')
upstream_failed = remote.task_list('PENDING', 'UPSTREAM_FAILED')
self.assertEqual(len(upstream_failed), 2)
pe = upstream_failed.get(E().task_id)
self.assertEqual(pe['status'], 'PENDING')
self.assertEqual(remote._upstream_status(E().task_id, {}), 'UPSTREAM_FAILED')
pe = upstream_failed.get(D().task_id)
self.assertEqual(pe['status'], 'PENDING')
self.assertEqual(remote._upstream_status(D().task_id, {}), 'UPSTREAM_FAILED')
pending = dict(missing_input)
pending.update(upstream_failed)
self.assertEqual(remote.task_list('PENDING', ''), pending)
self.assertEqual(remote.task_list('PENDING', 'UPSTREAM_RUNNING'), {})
failed = remote.task_list('FAILED', '')
self.assertEqual(len(failed), 1)
fd = failed.get(F().task_id)
self.assertEqual(fd['status'], 'FAILED')
all = dict(pending)
all.update(done)
all.update(failed)
self.assertEqual(remote.task_list('', ''), all)
self.assertEqual(remote.task_list('RUNNING', ''), {})
def test_task_search(self):
self._build([FactorTask(8)])
self._build([FailingTask(8)])
remote = self._remote()
all_tasks = remote.task_search('Task')
self.assertEqual(len(all_tasks), 2)
self._assert_all(all_tasks['DONE'], 'DONE')
self._assert_all(all_tasks['FAILED'], 'FAILED')
def test_fetch_error(self):
self._build([FailingTask(8)])
remote = self._remote()
error = remote.fetch_error(FailingTask(task_id=8).task_id)
self.assertEqual(error["taskId"], FailingTask(task_id=8).task_id)
self.assertTrue("Error Message" in error["error"])
self.assertTrue("Runtime error" in error["error"])
self.assertTrue("Traceback" in error["error"])
def test_inverse_deps(self):
class X(luigi.Task):
pass
class Y(luigi.Task):
def requires(self):
return [X()]
class Z(luigi.Task):
id = luigi.Parameter()
def requires(self):
return [Y()]
class ZZ(luigi.Task):
def requires(self):
return [Z(1), Z(2)]
self._build([ZZ()])
dep_graph = self._remote().inverse_dep_graph(X().task_id)
def assert_has_deps(task_id, deps):
self.assertTrue(task_id in dep_graph, '%s not in dep_graph %s' % (task_id, dep_graph))
task = dep_graph[task_id]
self.assertEqual(sorted(task['deps']), sorted(deps), '%s does not have deps %s' % (task_id, deps))
assert_has_deps(X().task_id, [Y().task_id])
assert_has_deps(Y().task_id, [Z(id=1).task_id, Z(id=2).task_id])
assert_has_deps(Z(id=1).task_id, [ZZ().task_id])
assert_has_deps(Z(id=2).task_id, [ZZ().task_id])
assert_has_deps(ZZ().task_id, [])
def test_simple_worker_list(self):
class X(luigi.Task):
def run(self):
self._complete = True
def complete(self):
return getattr(self, '_complete', False)
task_x = X()
self._build([task_x])
workers = self._remote().worker_list()
self.assertEqual(1, len(workers))
worker = workers[0]
self.assertEqual(task_x.task_id, worker['first_task'])
self.assertEqual(0, worker['num_pending'])
self.assertEqual(0, worker['num_uniques'])
self.assertEqual(0, worker['num_running'])
self.assertEqual(1, worker['workers'])
def test_worker_list_pending_uniques(self):
class X(luigi.Task):
def complete(self):
return False
class Y(X):
def requires(self):
return X()
class Z(Y):
pass
w1 = luigi.worker.Worker(scheduler=self.scheduler, worker_processes=1)
w2 = luigi.worker.Worker(scheduler=self.scheduler, worker_processes=1)
w1.add(Y())
w2.add(Z())
workers = self._remote().worker_list()
self.assertEqual(2, len(workers))
for worker in workers:
self.assertEqual(2, worker['num_pending'])
self.assertEqual(1, worker['num_uniques'])
self.assertEqual(0, worker['num_running'])
def test_worker_list_running(self):
class X(luigi.Task):
n = luigi.IntParameter()
w = luigi.worker.Worker(worker_id='w', scheduler=self.scheduler, worker_processes=3)
w.add(X(0))
w.add(X(1))
w.add(X(2))
w.add(X(3))
self.scheduler.get_work(worker='w')
self.scheduler.get_work(worker='w')
self.scheduler.get_work(worker='w')
workers = self._remote().worker_list()
self.assertEqual(1, len(workers))
worker = workers[0]
self.assertEqual(3, worker['num_running'])
self.assertEqual(1, worker['num_pending'])
self.assertEqual(1, worker['num_uniques'])
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
swampbotics/new.swampbotics.org | node_modules/node-gyp/gyp/pylib/gyp/MSVSUserFile.py | 2710 | 5094 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Visual Studio user preferences file writer."""
import os
import re
import socket # for gethostname
import gyp.common
import gyp.easy_xml as easy_xml
#------------------------------------------------------------------------------
def _FindCommandInPath(command):
"""If there are no slashes in the command given, this function
searches the PATH env to find the given command, and converts it
to an absolute path. We have to do this because MSVS is looking
for an actual file to launch a debugger on, not just a command
line. Note that this happens at GYP time, so anything needing to
be built needs to have a full path."""
if '/' in command or '\\' in command:
# If the command already has path elements (either relative or
# absolute), then assume it is constructed properly.
return command
else:
# Search through the path list and find an existing file that
# we can access.
paths = os.environ.get('PATH','').split(os.pathsep)
for path in paths:
item = os.path.join(path, command)
if os.path.isfile(item) and os.access(item, os.X_OK):
return item
return command
def _QuoteWin32CommandLineArgs(args):
new_args = []
for arg in args:
# Replace all double-quotes with double-double-quotes to escape
# them for cmd shell, and then quote the whole thing if there
# are any.
if arg.find('"') != -1:
arg = '""'.join(arg.split('"'))
arg = '"%s"' % arg
# Otherwise, if there are any spaces, quote the whole arg.
elif re.search(r'[ \t\n]', arg):
arg = '"%s"' % arg
new_args.append(arg)
return new_args
class Writer(object):
"""Visual Studio XML user user file writer."""
def __init__(self, user_file_path, version, name):
"""Initializes the user file.
Args:
user_file_path: Path to the user file.
version: Version info.
name: Name of the user file.
"""
self.user_file_path = user_file_path
self.version = version
self.name = name
self.configurations = {}
def AddConfig(self, name):
"""Adds a configuration to the project.
Args:
name: Configuration name.
"""
self.configurations[name] = ['Configuration', {'Name': name}]
def AddDebugSettings(self, config_name, command, environment = {},
working_directory=""):
"""Adds a DebugSettings node to the user file for a particular config.
Args:
command: command line to run. First element in the list is the
executable. All elements of the command will be quoted if
necessary.
working_directory: other files which may trigger the rule. (optional)
"""
command = _QuoteWin32CommandLineArgs(command)
abs_command = _FindCommandInPath(command[0])
if environment and isinstance(environment, dict):
env_list = ['%s="%s"' % (key, val)
for (key,val) in environment.iteritems()]
environment = ' '.join(env_list)
else:
environment = ''
n_cmd = ['DebugSettings',
{'Command': abs_command,
'WorkingDirectory': working_directory,
'CommandArguments': " ".join(command[1:]),
'RemoteMachine': socket.gethostname(),
'Environment': environment,
'EnvironmentMerge': 'true',
# Currently these are all "dummy" values that we're just setting
# in the default manner that MSVS does it. We could use some of
# these to add additional capabilities, I suppose, but they might
# not have parity with other platforms then.
'Attach': 'false',
'DebuggerType': '3', # 'auto' debugger
'Remote': '1',
'RemoteCommand': '',
'HttpUrl': '',
'PDBPath': '',
'SQLDebugging': '',
'DebuggerFlavor': '0',
'MPIRunCommand': '',
'MPIRunArguments': '',
'MPIRunWorkingDirectory': '',
'ApplicationCommand': '',
'ApplicationArguments': '',
'ShimCommand': '',
'MPIAcceptMode': '',
'MPIAcceptFilter': ''
}]
# Find the config, and add it if it doesn't exist.
if config_name not in self.configurations:
self.AddConfig(config_name)
# Add the DebugSettings onto the appropriate config.
self.configurations[config_name].append(n_cmd)
def WriteIfChanged(self):
"""Writes the user file."""
configs = ['Configurations']
for config, spec in sorted(self.configurations.iteritems()):
configs.append(spec)
content = ['VisualStudioUserFile',
{'Version': self.version.ProjectVersion(),
'Name': self.name
},
configs]
easy_xml.WriteXmlIfChanged(content, self.user_file_path,
encoding="Windows-1252")
| mit |
peer-node/flex | test/lib/googletest/scripts/gen_gtest_pred_impl.py | 2538 | 21986 | #!/usr/bin/env python
#
# Copyright 2006, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""gen_gtest_pred_impl.py v0.1
Generates the implementation of Google Test predicate assertions and
accompanying tests.
Usage:
gen_gtest_pred_impl.py MAX_ARITY
where MAX_ARITY is a positive integer.
The command generates the implementation of up-to MAX_ARITY-ary
predicate assertions, and writes it to file gtest_pred_impl.h in the
directory where the script is. It also generates the accompanying
unit test in file gtest_pred_impl_unittest.cc.
"""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import sys
import time
# Where this script is.
SCRIPT_DIR = os.path.dirname(sys.argv[0])
# Where to store the generated header.
HEADER = os.path.join(SCRIPT_DIR, '../include/gtest/gtest_pred_impl.h')
# Where to store the generated unit test.
UNIT_TEST = os.path.join(SCRIPT_DIR, '../test/gtest_pred_impl_unittest.cc')
def HeaderPreamble(n):
"""Returns the preamble for the header file.
Args:
n: the maximum arity of the predicate macros to be generated.
"""
# A map that defines the values used in the preamble template.
DEFS = {
'today' : time.strftime('%m/%d/%Y'),
'year' : time.strftime('%Y'),
'command' : '%s %s' % (os.path.basename(sys.argv[0]), n),
'n' : n
}
return (
"""// Copyright 2006, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// This file is AUTOMATICALLY GENERATED on %(today)s by command
// '%(command)s'. DO NOT EDIT BY HAND!
//
// Implements a family of generic predicate assertion macros.
#ifndef GTEST_INCLUDE_GTEST_GTEST_PRED_IMPL_H_
#define GTEST_INCLUDE_GTEST_GTEST_PRED_IMPL_H_
// Makes sure this header is not included before gtest.h.
#ifndef GTEST_INCLUDE_GTEST_GTEST_H_
# error Do not include gtest_pred_impl.h directly. Include gtest.h instead.
#endif // GTEST_INCLUDE_GTEST_GTEST_H_
// This header implements a family of generic predicate assertion
// macros:
//
// ASSERT_PRED_FORMAT1(pred_format, v1)
// ASSERT_PRED_FORMAT2(pred_format, v1, v2)
// ...
//
// where pred_format is a function or functor that takes n (in the
// case of ASSERT_PRED_FORMATn) values and their source expression
// text, and returns a testing::AssertionResult. See the definition
// of ASSERT_EQ in gtest.h for an example.
//
// If you don't care about formatting, you can use the more
// restrictive version:
//
// ASSERT_PRED1(pred, v1)
// ASSERT_PRED2(pred, v1, v2)
// ...
//
// where pred is an n-ary function or functor that returns bool,
// and the values v1, v2, ..., must support the << operator for
// streaming to std::ostream.
//
// We also define the EXPECT_* variations.
//
// For now we only support predicates whose arity is at most %(n)s.
// Please email googletestframework@googlegroups.com if you need
// support for higher arities.
// GTEST_ASSERT_ is the basic statement to which all of the assertions
// in this file reduce. Don't use this in your code.
#define GTEST_ASSERT_(expression, on_failure) \\
GTEST_AMBIGUOUS_ELSE_BLOCKER_ \\
if (const ::testing::AssertionResult gtest_ar = (expression)) \\
; \\
else \\
on_failure(gtest_ar.failure_message())
""" % DEFS)
def Arity(n):
"""Returns the English name of the given arity."""
if n < 0:
return None
elif n <= 3:
return ['nullary', 'unary', 'binary', 'ternary'][n]
else:
return '%s-ary' % n
def Title(word):
"""Returns the given word in title case. The difference between
this and string's title() method is that Title('4-ary') is '4-ary'
while '4-ary'.title() is '4-Ary'."""
return word[0].upper() + word[1:]
def OneTo(n):
"""Returns the list [1, 2, 3, ..., n]."""
return range(1, n + 1)
def Iter(n, format, sep=''):
"""Given a positive integer n, a format string that contains 0 or
more '%s' format specs, and optionally a separator string, returns
the join of n strings, each formatted with the format string on an
iterator ranged from 1 to n.
Example:
Iter(3, 'v%s', sep=', ') returns 'v1, v2, v3'.
"""
# How many '%s' specs are in format?
spec_count = len(format.split('%s')) - 1
return sep.join([format % (spec_count * (i,)) for i in OneTo(n)])
def ImplementationForArity(n):
"""Returns the implementation of n-ary predicate assertions."""
# A map the defines the values used in the implementation template.
DEFS = {
'n' : str(n),
'vs' : Iter(n, 'v%s', sep=', '),
'vts' : Iter(n, '#v%s', sep=', '),
'arity' : Arity(n),
'Arity' : Title(Arity(n))
}
impl = """
// Helper function for implementing {EXPECT|ASSERT}_PRED%(n)s. Don't use
// this in your code.
template <typename Pred""" % DEFS
impl += Iter(n, """,
typename T%s""")
impl += """>
AssertionResult AssertPred%(n)sHelper(const char* pred_text""" % DEFS
impl += Iter(n, """,
const char* e%s""")
impl += """,
Pred pred"""
impl += Iter(n, """,
const T%s& v%s""")
impl += """) {
if (pred(%(vs)s)) return AssertionSuccess();
""" % DEFS
impl += ' return AssertionFailure() << pred_text << "("'
impl += Iter(n, """
<< e%s""", sep=' << ", "')
impl += ' << ") evaluates to false, where"'
impl += Iter(n, """
<< "\\n" << e%s << " evaluates to " << v%s""")
impl += """;
}
// Internal macro for implementing {EXPECT|ASSERT}_PRED_FORMAT%(n)s.
// Don't use this in your code.
#define GTEST_PRED_FORMAT%(n)s_(pred_format, %(vs)s, on_failure)\\
GTEST_ASSERT_(pred_format(%(vts)s, %(vs)s), \\
on_failure)
// Internal macro for implementing {EXPECT|ASSERT}_PRED%(n)s. Don't use
// this in your code.
#define GTEST_PRED%(n)s_(pred, %(vs)s, on_failure)\\
GTEST_ASSERT_(::testing::AssertPred%(n)sHelper(#pred""" % DEFS
impl += Iter(n, """, \\
#v%s""")
impl += """, \\
pred"""
impl += Iter(n, """, \\
v%s""")
impl += """), on_failure)
// %(Arity)s predicate assertion macros.
#define EXPECT_PRED_FORMAT%(n)s(pred_format, %(vs)s) \\
GTEST_PRED_FORMAT%(n)s_(pred_format, %(vs)s, GTEST_NONFATAL_FAILURE_)
#define EXPECT_PRED%(n)s(pred, %(vs)s) \\
GTEST_PRED%(n)s_(pred, %(vs)s, GTEST_NONFATAL_FAILURE_)
#define ASSERT_PRED_FORMAT%(n)s(pred_format, %(vs)s) \\
GTEST_PRED_FORMAT%(n)s_(pred_format, %(vs)s, GTEST_FATAL_FAILURE_)
#define ASSERT_PRED%(n)s(pred, %(vs)s) \\
GTEST_PRED%(n)s_(pred, %(vs)s, GTEST_FATAL_FAILURE_)
""" % DEFS
return impl
def HeaderPostamble():
"""Returns the postamble for the header file."""
return """
#endif // GTEST_INCLUDE_GTEST_GTEST_PRED_IMPL_H_
"""
def GenerateFile(path, content):
"""Given a file path and a content string, overwrites it with the
given content."""
print 'Updating file %s . . .' % path
f = file(path, 'w+')
print >>f, content,
f.close()
print 'File %s has been updated.' % path
def GenerateHeader(n):
"""Given the maximum arity n, updates the header file that implements
the predicate assertions."""
GenerateFile(HEADER,
HeaderPreamble(n)
+ ''.join([ImplementationForArity(i) for i in OneTo(n)])
+ HeaderPostamble())
def UnitTestPreamble():
"""Returns the preamble for the unit test file."""
# A map that defines the values used in the preamble template.
DEFS = {
'today' : time.strftime('%m/%d/%Y'),
'year' : time.strftime('%Y'),
'command' : '%s %s' % (os.path.basename(sys.argv[0]), sys.argv[1]),
}
return (
"""// Copyright 2006, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// This file is AUTOMATICALLY GENERATED on %(today)s by command
// '%(command)s'. DO NOT EDIT BY HAND!
// Regression test for gtest_pred_impl.h
//
// This file is generated by a script and quite long. If you intend to
// learn how Google Test works by reading its unit tests, read
// gtest_unittest.cc instead.
//
// This is intended as a regression test for the Google Test predicate
// assertions. We compile it as part of the gtest_unittest target
// only to keep the implementation tidy and compact, as it is quite
// involved to set up the stage for testing Google Test using Google
// Test itself.
//
// Currently, gtest_unittest takes ~11 seconds to run in the testing
// daemon. In the future, if it grows too large and needs much more
// time to finish, we should consider separating this file into a
// stand-alone regression test.
#include <iostream>
#include "gtest/gtest.h"
#include "gtest/gtest-spi.h"
// A user-defined data type.
struct Bool {
explicit Bool(int val) : value(val != 0) {}
bool operator>(int n) const { return value > Bool(n).value; }
Bool operator+(const Bool& rhs) const { return Bool(value + rhs.value); }
bool operator==(const Bool& rhs) const { return value == rhs.value; }
bool value;
};
// Enables Bool to be used in assertions.
std::ostream& operator<<(std::ostream& os, const Bool& x) {
return os << (x.value ? "true" : "false");
}
""" % DEFS)
def TestsForArity(n):
"""Returns the tests for n-ary predicate assertions."""
# A map that defines the values used in the template for the tests.
DEFS = {
'n' : n,
'es' : Iter(n, 'e%s', sep=', '),
'vs' : Iter(n, 'v%s', sep=', '),
'vts' : Iter(n, '#v%s', sep=', '),
'tvs' : Iter(n, 'T%s v%s', sep=', '),
'int_vs' : Iter(n, 'int v%s', sep=', '),
'Bool_vs' : Iter(n, 'Bool v%s', sep=', '),
'types' : Iter(n, 'typename T%s', sep=', '),
'v_sum' : Iter(n, 'v%s', sep=' + '),
'arity' : Arity(n),
'Arity' : Title(Arity(n)),
}
tests = (
"""// Sample functions/functors for testing %(arity)s predicate assertions.
// A %(arity)s predicate function.
template <%(types)s>
bool PredFunction%(n)s(%(tvs)s) {
return %(v_sum)s > 0;
}
// The following two functions are needed to circumvent a bug in
// gcc 2.95.3, which sometimes has problem with the above template
// function.
bool PredFunction%(n)sInt(%(int_vs)s) {
return %(v_sum)s > 0;
}
bool PredFunction%(n)sBool(%(Bool_vs)s) {
return %(v_sum)s > 0;
}
""" % DEFS)
tests += """
// A %(arity)s predicate functor.
struct PredFunctor%(n)s {
template <%(types)s>
bool operator()(""" % DEFS
tests += Iter(n, 'const T%s& v%s', sep=""",
""")
tests += """) {
return %(v_sum)s > 0;
}
};
""" % DEFS
tests += """
// A %(arity)s predicate-formatter function.
template <%(types)s>
testing::AssertionResult PredFormatFunction%(n)s(""" % DEFS
tests += Iter(n, 'const char* e%s', sep=""",
""")
tests += Iter(n, """,
const T%s& v%s""")
tests += """) {
if (PredFunction%(n)s(%(vs)s))
return testing::AssertionSuccess();
return testing::AssertionFailure()
<< """ % DEFS
tests += Iter(n, 'e%s', sep=' << " + " << ')
tests += """
<< " is expected to be positive, but evaluates to "
<< %(v_sum)s << ".";
}
""" % DEFS
tests += """
// A %(arity)s predicate-formatter functor.
struct PredFormatFunctor%(n)s {
template <%(types)s>
testing::AssertionResult operator()(""" % DEFS
tests += Iter(n, 'const char* e%s', sep=""",
""")
tests += Iter(n, """,
const T%s& v%s""")
tests += """) const {
return PredFormatFunction%(n)s(%(es)s, %(vs)s);
}
};
""" % DEFS
tests += """
// Tests for {EXPECT|ASSERT}_PRED_FORMAT%(n)s.
class Predicate%(n)sTest : public testing::Test {
protected:
virtual void SetUp() {
expected_to_finish_ = true;
finished_ = false;""" % DEFS
tests += """
""" + Iter(n, 'n%s_ = ') + """0;
}
"""
tests += """
virtual void TearDown() {
// Verifies that each of the predicate's arguments was evaluated
// exactly once."""
tests += ''.join(["""
EXPECT_EQ(1, n%s_) <<
"The predicate assertion didn't evaluate argument %s "
"exactly once.";""" % (i, i + 1) for i in OneTo(n)])
tests += """
// Verifies that the control flow in the test function is expected.
if (expected_to_finish_ && !finished_) {
FAIL() << "The predicate assertion unexpactedly aborted the test.";
} else if (!expected_to_finish_ && finished_) {
FAIL() << "The failed predicate assertion didn't abort the test "
"as expected.";
}
}
// true iff the test function is expected to run to finish.
static bool expected_to_finish_;
// true iff the test function did run to finish.
static bool finished_;
""" % DEFS
tests += Iter(n, """
static int n%s_;""")
tests += """
};
bool Predicate%(n)sTest::expected_to_finish_;
bool Predicate%(n)sTest::finished_;
""" % DEFS
tests += Iter(n, """int Predicate%%(n)sTest::n%s_;
""") % DEFS
tests += """
typedef Predicate%(n)sTest EXPECT_PRED_FORMAT%(n)sTest;
typedef Predicate%(n)sTest ASSERT_PRED_FORMAT%(n)sTest;
typedef Predicate%(n)sTest EXPECT_PRED%(n)sTest;
typedef Predicate%(n)sTest ASSERT_PRED%(n)sTest;
""" % DEFS
def GenTest(use_format, use_assert, expect_failure,
use_functor, use_user_type):
"""Returns the test for a predicate assertion macro.
Args:
use_format: true iff the assertion is a *_PRED_FORMAT*.
use_assert: true iff the assertion is a ASSERT_*.
expect_failure: true iff the assertion is expected to fail.
use_functor: true iff the first argument of the assertion is
a functor (as opposed to a function)
use_user_type: true iff the predicate functor/function takes
argument(s) of a user-defined type.
Example:
GenTest(1, 0, 0, 1, 0) returns a test that tests the behavior
of a successful EXPECT_PRED_FORMATn() that takes a functor
whose arguments have built-in types."""
if use_assert:
assrt = 'ASSERT' # 'assert' is reserved, so we cannot use
# that identifier here.
else:
assrt = 'EXPECT'
assertion = assrt + '_PRED'
if use_format:
pred_format = 'PredFormat'
assertion += '_FORMAT'
else:
pred_format = 'Pred'
assertion += '%(n)s' % DEFS
if use_functor:
pred_format_type = 'functor'
pred_format += 'Functor%(n)s()'
else:
pred_format_type = 'function'
pred_format += 'Function%(n)s'
if not use_format:
if use_user_type:
pred_format += 'Bool'
else:
pred_format += 'Int'
test_name = pred_format_type.title()
if use_user_type:
arg_type = 'user-defined type (Bool)'
test_name += 'OnUserType'
if expect_failure:
arg = 'Bool(n%s_++)'
else:
arg = 'Bool(++n%s_)'
else:
arg_type = 'built-in type (int)'
test_name += 'OnBuiltInType'
if expect_failure:
arg = 'n%s_++'
else:
arg = '++n%s_'
if expect_failure:
successful_or_failed = 'failed'
expected_or_not = 'expected.'
test_name += 'Failure'
else:
successful_or_failed = 'successful'
expected_or_not = 'UNEXPECTED!'
test_name += 'Success'
# A map that defines the values used in the test template.
defs = DEFS.copy()
defs.update({
'assert' : assrt,
'assertion' : assertion,
'test_name' : test_name,
'pf_type' : pred_format_type,
'pf' : pred_format,
'arg_type' : arg_type,
'arg' : arg,
'successful' : successful_or_failed,
'expected' : expected_or_not,
})
test = """
// Tests a %(successful)s %(assertion)s where the
// predicate-formatter is a %(pf_type)s on a %(arg_type)s.
TEST_F(%(assertion)sTest, %(test_name)s) {""" % defs
indent = (len(assertion) + 3)*' '
extra_indent = ''
if expect_failure:
extra_indent = ' '
if use_assert:
test += """
expected_to_finish_ = false;
EXPECT_FATAL_FAILURE({ // NOLINT"""
else:
test += """
EXPECT_NONFATAL_FAILURE({ // NOLINT"""
test += '\n' + extra_indent + """ %(assertion)s(%(pf)s""" % defs
test = test % defs
test += Iter(n, ',\n' + indent + extra_indent + '%(arg)s' % defs)
test += ');\n' + extra_indent + ' finished_ = true;\n'
if expect_failure:
test += ' }, "");\n'
test += '}\n'
return test
# Generates tests for all 2**6 = 64 combinations.
tests += ''.join([GenTest(use_format, use_assert, expect_failure,
use_functor, use_user_type)
for use_format in [0, 1]
for use_assert in [0, 1]
for expect_failure in [0, 1]
for use_functor in [0, 1]
for use_user_type in [0, 1]
])
return tests
def UnitTestPostamble():
"""Returns the postamble for the tests."""
return ''
def GenerateUnitTest(n):
"""Returns the tests for up-to n-ary predicate assertions."""
GenerateFile(UNIT_TEST,
UnitTestPreamble()
+ ''.join([TestsForArity(i) for i in OneTo(n)])
+ UnitTestPostamble())
def _Main():
"""The entry point of the script. Generates the header file and its
unit test."""
if len(sys.argv) != 2:
print __doc__
print 'Author: ' + __author__
sys.exit(1)
n = int(sys.argv[1])
GenerateHeader(n)
GenerateUnitTest(n)
if __name__ == '__main__':
_Main()
| agpl-3.0 |
zsjohny/jumpserver | apps/perms/urls/views_urls.py | 1 | 2684 | # coding:utf-8
from django.conf.urls import url
from django.urls import path
from .. import views
app_name = 'perms'
urlpatterns = [
# asset-permission
path('asset-permission/', views.AssetPermissionListView.as_view(), name='asset-permission-list'),
path('asset-permission/create/', views.AssetPermissionCreateView.as_view(), name='asset-permission-create'),
path('asset-permission/<uuid:pk>/update/', views.AssetPermissionUpdateView.as_view(), name='asset-permission-update'),
path('asset-permission/<uuid:pk>/', views.AssetPermissionDetailView.as_view(),name='asset-permission-detail'),
path('asset-permission/<uuid:pk>/delete/', views.AssetPermissionDeleteView.as_view(), name='asset-permission-delete'),
path('asset-permission/<uuid:pk>/user/', views.AssetPermissionUserView.as_view(), name='asset-permission-user-list'),
path('asset-permission/<uuid:pk>/asset/', views.AssetPermissionAssetView.as_view(), name='asset-permission-asset-list'),
# remote-app-permission
path('remote-app-permission/', views.RemoteAppPermissionListView.as_view(), name='remote-app-permission-list'),
path('remote-app-permission/create/', views.RemoteAppPermissionCreateView.as_view(), name='remote-app-permission-create'),
path('remote-app-permission/<uuid:pk>/update/', views.RemoteAppPermissionUpdateView.as_view(), name='remote-app-permission-update'),
path('remote-app-permission/<uuid:pk>/', views.RemoteAppPermissionDetailView.as_view(), name='remote-app-permission-detail'),
path('remote-app-permission/<uuid:pk>/user/', views.RemoteAppPermissionUserView.as_view(), name='remote-app-permission-user-list'),
path('remote-app-permission/<uuid:pk>/remote-app/', views.RemoteAppPermissionRemoteAppView.as_view(), name='remote-app-permission-remote-app-list'),
# database-app-permission
path('database-app-permission/', views.DatabaseAppPermissionListView.as_view(), name='database-app-permission-list'),
path('database-app-permission/create/', views.DatabaseAppPermissionCreateView.as_view(), name='database-app-permission-create'),
path('database-app-permission/<uuid:pk>/update/', views.DatabaseAppPermissionUpdateView.as_view(), name='database-app-permission-update'),
path('database-app-permission/<uuid:pk>/', views.DatabaseAppPermissionDetailView.as_view(), name='database-app-permission-detail'),
path('database-app-permission/<uuid:pk>/user/', views.DatabaseAppPermissionUserView.as_view(), name='database-app-permission-user-list'),
path('database-app-permission/<uuid:pk>/database-app/', views.DatabaseAppPermissionDatabaseAppView.as_view(), name='database-app-permission-database-app-list'),
]
| gpl-2.0 |
cctaylor/googleads-python-lib | examples/dfp/v201505/proposal_service/submit_proposals_for_approval.py | 3 | 2395 | #!/usr/bin/python
#
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This code example approves a single proposal.
To determine which proposals exist, run get_all_proposals.py."""
__author__ = 'Nicholas Chen'
# Import appropriate modules from the client library.
from googleads import dfp
PROPOSAL_ID = 'INSERT_PROPOSAL_ID_HERE'
def main(client, proposal_id):
# Initialize appropriate service.
proposal_service = client.GetService('ProposalService', version='v201505')
# Create query.
values = [{
'key': 'proposalId',
'value': {
'xsi_type': 'TextValue',
'value': proposal_id
}
}]
query = 'WHERE id = :proposalId'
# Create a filter statement.
statement = dfp.FilterStatement(query, values)
proposals_approved = 0
# Get proposals by statement.
while True:
response = proposal_service.getProposalsByStatement(statement.ToStatement())
if 'results' in response:
# Display results.
for proposal in response['results']:
print ('Proposal with id \'%s\', name \'%s\', and status \'%s\' will be'
' approved.' % (proposal['id'], proposal['name'],
proposal['status']))
# Perform action.
result = proposal_service.performProposalAction(
{'xsi_type': 'SubmitProposalsForApproval'}, statement.ToStatement())
if result and int(result['numChanges']) > 0:
proposals_approved += int(result['numChanges'])
statement.offset += dfp.SUGGESTED_PAGE_LIMIT
else:
break
# Display results.
if proposals_approved > 0:
print '\nNumber of proposals approved: %s' % proposals_approved
else:
print '\nNo proposals were approved.'
if __name__ == '__main__':
# Initialize client object.
dfp_client = dfp.DfpClient.LoadFromStorage()
main(dfp_client, PROPOSAL_ID)
| apache-2.0 |
wtanaka/google-app-engine-django-openid | src/openid/consumer/html_parse.py | 167 | 7161 | """
This module implements a VERY limited parser that finds <link> tags in
the head of HTML or XHTML documents and parses out their attributes
according to the OpenID spec. It is a liberal parser, but it requires
these things from the data in order to work:
- There must be an open <html> tag
- There must be an open <head> tag inside of the <html> tag
- Only <link>s that are found inside of the <head> tag are parsed
(this is by design)
- The parser follows the OpenID specification in resolving the
attributes of the link tags. This means that the attributes DO NOT
get resolved as they would by an XML or HTML parser. In particular,
only certain entities get replaced, and href attributes do not get
resolved relative to a base URL.
From http://openid.net/specs.bml#linkrel:
- The openid.server URL MUST be an absolute URL. OpenID consumers
MUST NOT attempt to resolve relative URLs.
- The openid.server URL MUST NOT include entities other than &,
<, >, and ".
The parser ignores SGML comments and <![CDATA[blocks]]>. Both kinds of
quoting are allowed for attributes.
The parser deals with invalid markup in these ways:
- Tag names are not case-sensitive
- The <html> tag is accepted even when it is not at the top level
- The <head> tag is accepted even when it is not a direct child of
the <html> tag, but a <html> tag must be an ancestor of the <head>
tag
- <link> tags are accepted even when they are not direct children of
the <head> tag, but a <head> tag must be an ancestor of the <link>
tag
- If there is no closing tag for an open <html> or <head> tag, the
remainder of the document is viewed as being inside of the tag. If
there is no closing tag for a <link> tag, the link tag is treated
as a short tag. Exceptions to this rule are that <html> closes
<html> and <body> or <head> closes <head>
- Attributes of the <link> tag are not required to be quoted.
- In the case of duplicated attribute names, the attribute coming
last in the tag will be the value returned.
- Any text that does not parse as an attribute within a link tag will
be ignored. (e.g. <link pumpkin rel='openid.server' /> will ignore
pumpkin)
- If there are more than one <html> or <head> tag, the parser only
looks inside of the first one.
- The contents of <script> tags are ignored entirely, except unclosed
<script> tags. Unclosed <script> tags are ignored.
- Any other invalid markup is ignored, including unclosed SGML
comments and unclosed <![CDATA[blocks.
"""
__all__ = ['parseLinkAttrs']
import re
flags = ( re.DOTALL # Match newlines with '.'
| re.IGNORECASE
| re.VERBOSE # Allow comments and whitespace in patterns
| re.UNICODE # Make \b respect Unicode word boundaries
)
# Stuff to remove before we start looking for tags
removed_re = re.compile(r'''
# Comments
<!--.*?-->
# CDATA blocks
| <!\[CDATA\[.*?\]\]>
# script blocks
| <script\b
# make sure script is not an XML namespace
(?!:)
[^>]*>.*?</script>
''', flags)
tag_expr = r'''
# Starts with the tag name at a word boundary, where the tag name is
# not a namespace
<%(tag_name)s\b(?!:)
# All of the stuff up to a ">", hopefully attributes.
(?P<attrs>[^>]*?)
(?: # Match a short tag
/>
| # Match a full tag
>
(?P<contents>.*?)
# Closed by
(?: # One of the specified close tags
</?%(closers)s\s*>
# End of the string
| \Z
)
)
'''
def tagMatcher(tag_name, *close_tags):
if close_tags:
options = '|'.join((tag_name,) + close_tags)
closers = '(?:%s)' % (options,)
else:
closers = tag_name
expr = tag_expr % locals()
return re.compile(expr, flags)
# Must contain at least an open html and an open head tag
html_find = tagMatcher('html')
head_find = tagMatcher('head', 'body')
link_find = re.compile(r'<link\b(?!:)', flags)
attr_find = re.compile(r'''
# Must start with a sequence of word-characters, followed by an equals sign
(?P<attr_name>\w+)=
# Then either a quoted or unquoted attribute
(?:
# Match everything that\'s between matching quote marks
(?P<qopen>["\'])(?P<q_val>.*?)(?P=qopen)
|
# If the value is not quoted, match up to whitespace
(?P<unq_val>(?:[^\s<>/]|/(?!>))+)
)
|
(?P<end_link>[<>])
''', flags)
# Entity replacement:
replacements = {
'amp':'&',
'lt':'<',
'gt':'>',
'quot':'"',
}
ent_replace = re.compile(r'&(%s);' % '|'.join(replacements.keys()))
def replaceEnt(mo):
"Replace the entities that are specified by OpenID"
return replacements.get(mo.group(1), mo.group())
def parseLinkAttrs(html):
"""Find all link tags in a string representing a HTML document and
return a list of their attributes.
@param html: the text to parse
@type html: str or unicode
@return: A list of dictionaries of attributes, one for each link tag
@rtype: [[(type(html), type(html))]]
"""
stripped = removed_re.sub('', html)
html_mo = html_find.search(stripped)
if html_mo is None or html_mo.start('contents') == -1:
return []
start, end = html_mo.span('contents')
head_mo = head_find.search(stripped, start, end)
if head_mo is None or head_mo.start('contents') == -1:
return []
start, end = head_mo.span('contents')
link_mos = link_find.finditer(stripped, head_mo.start(), head_mo.end())
matches = []
for link_mo in link_mos:
start = link_mo.start() + 5
link_attrs = {}
for attr_mo in attr_find.finditer(stripped, start):
if attr_mo.lastgroup == 'end_link':
break
# Either q_val or unq_val must be present, but not both
# unq_val is a True (non-empty) value if it is present
attr_name, q_val, unq_val = attr_mo.group(
'attr_name', 'q_val', 'unq_val')
attr_val = ent_replace.sub(replaceEnt, unq_val or q_val)
link_attrs[attr_name] = attr_val
matches.append(link_attrs)
return matches
def relMatches(rel_attr, target_rel):
"""Does this target_rel appear in the rel_str?"""
# XXX: TESTME
rels = rel_attr.strip().split()
for rel in rels:
rel = rel.lower()
if rel == target_rel:
return 1
return 0
def linkHasRel(link_attrs, target_rel):
"""Does this link have target_rel as a relationship?"""
# XXX: TESTME
rel_attr = link_attrs.get('rel')
return rel_attr and relMatches(rel_attr, target_rel)
def findLinksRel(link_attrs_list, target_rel):
"""Filter the list of link attributes on whether it has target_rel
as a relationship."""
# XXX: TESTME
matchesTarget = lambda attrs: linkHasRel(attrs, target_rel)
return filter(matchesTarget, link_attrs_list)
def findFirstHref(link_attrs_list, target_rel):
"""Return the value of the href attribute for the first link tag
in the list that has target_rel as a relationship."""
# XXX: TESTME
matches = findLinksRel(link_attrs_list, target_rel)
if not matches:
return None
first = matches[0]
return first.get('href')
| gpl-3.0 |
davidharvey1986/pyRRG | unittests/bugFixPyRRG/lib/python3.7/site-packages/pip/_vendor/distlib/scripts.py | 7 | 16998 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2013-2015 Vinay Sajip.
# Licensed to the Python Software Foundation under a contributor agreement.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
from io import BytesIO
import logging
import os
import re
import struct
import sys
from .compat import sysconfig, detect_encoding, ZipFile
from .resources import finder
from .util import (FileOperator, get_export_entry, convert_path,
get_executable, in_venv)
logger = logging.getLogger(__name__)
_DEFAULT_MANIFEST = '''
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<assembly xmlns="urn:schemas-microsoft-com:asm.v1" manifestVersion="1.0">
<assemblyIdentity version="1.0.0.0"
processorArchitecture="X86"
name="%s"
type="win32"/>
<!-- Identify the application security requirements. -->
<trustInfo xmlns="urn:schemas-microsoft-com:asm.v3">
<security>
<requestedPrivileges>
<requestedExecutionLevel level="asInvoker" uiAccess="false"/>
</requestedPrivileges>
</security>
</trustInfo>
</assembly>'''.strip()
# check if Python is called on the first line with this expression
FIRST_LINE_RE = re.compile(b'^#!.*pythonw?[0-9.]*([ \t].*)?$')
SCRIPT_TEMPLATE = r'''# -*- coding: utf-8 -*-
import re
import sys
from %(module)s import %(import_name)s
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(%(func)s())
'''
def _enquote_executable(executable):
if ' ' in executable:
# make sure we quote only the executable in case of env
# for example /usr/bin/env "/dir with spaces/bin/jython"
# instead of "/usr/bin/env /dir with spaces/bin/jython"
# otherwise whole
if executable.startswith('/usr/bin/env '):
env, _executable = executable.split(' ', 1)
if ' ' in _executable and not _executable.startswith('"'):
executable = '%s "%s"' % (env, _executable)
else:
if not executable.startswith('"'):
executable = '"%s"' % executable
return executable
class ScriptMaker(object):
"""
A class to copy or create scripts from source scripts or callable
specifications.
"""
script_template = SCRIPT_TEMPLATE
executable = None # for shebangs
def __init__(self, source_dir, target_dir, add_launchers=True,
dry_run=False, fileop=None):
self.source_dir = source_dir
self.target_dir = target_dir
self.add_launchers = add_launchers
self.force = False
self.clobber = False
# It only makes sense to set mode bits on POSIX.
self.set_mode = (os.name == 'posix') or (os.name == 'java' and
os._name == 'posix')
self.variants = set(('', 'X.Y'))
self._fileop = fileop or FileOperator(dry_run)
self._is_nt = os.name == 'nt' or (
os.name == 'java' and os._name == 'nt')
def _get_alternate_executable(self, executable, options):
if options.get('gui', False) and self._is_nt: # pragma: no cover
dn, fn = os.path.split(executable)
fn = fn.replace('python', 'pythonw')
executable = os.path.join(dn, fn)
return executable
if sys.platform.startswith('java'): # pragma: no cover
def _is_shell(self, executable):
"""
Determine if the specified executable is a script
(contains a #! line)
"""
try:
with open(executable) as fp:
return fp.read(2) == '#!'
except (OSError, IOError):
logger.warning('Failed to open %s', executable)
return False
def _fix_jython_executable(self, executable):
if self._is_shell(executable):
# Workaround for Jython is not needed on Linux systems.
import java
if java.lang.System.getProperty('os.name') == 'Linux':
return executable
elif executable.lower().endswith('jython.exe'):
# Use wrapper exe for Jython on Windows
return executable
return '/usr/bin/env %s' % executable
def _build_shebang(self, executable, post_interp):
"""
Build a shebang line. In the simple case (on Windows, or a shebang line
which is not too long or contains spaces) use a simple formulation for
the shebang. Otherwise, use /bin/sh as the executable, with a contrived
shebang which allows the script to run either under Python or sh, using
suitable quoting. Thanks to Harald Nordgren for his input.
See also: http://www.in-ulm.de/~mascheck/various/shebang/#length
https://hg.mozilla.org/mozilla-central/file/tip/mach
"""
if os.name != 'posix':
simple_shebang = True
else:
# Add 3 for '#!' prefix and newline suffix.
shebang_length = len(executable) + len(post_interp) + 3
if sys.platform == 'darwin':
max_shebang_length = 512
else:
max_shebang_length = 127
simple_shebang = ((b' ' not in executable) and
(shebang_length <= max_shebang_length))
if simple_shebang:
result = b'#!' + executable + post_interp + b'\n'
else:
result = b'#!/bin/sh\n'
result += b"'''exec' " + executable + post_interp + b' "$0" "$@"\n'
result += b"' '''"
return result
def _get_shebang(self, encoding, post_interp=b'', options=None):
enquote = True
if self.executable:
executable = self.executable
enquote = False # assume this will be taken care of
elif not sysconfig.is_python_build():
executable = get_executable()
elif in_venv(): # pragma: no cover
executable = os.path.join(sysconfig.get_path('scripts'),
'python%s' % sysconfig.get_config_var('EXE'))
else: # pragma: no cover
executable = os.path.join(
sysconfig.get_config_var('BINDIR'),
'python%s%s' % (sysconfig.get_config_var('VERSION'),
sysconfig.get_config_var('EXE')))
if options:
executable = self._get_alternate_executable(executable, options)
if sys.platform.startswith('java'): # pragma: no cover
executable = self._fix_jython_executable(executable)
# Normalise case for Windows - COMMENTED OUT
# executable = os.path.normcase(executable)
# N.B. The normalising operation above has been commented out: See
# issue #124. Although paths in Windows are generally case-insensitive,
# they aren't always. For example, a path containing a ẞ (which is a
# LATIN CAPITAL LETTER SHARP S - U+1E9E) is normcased to ß (which is a
# LATIN SMALL LETTER SHARP S' - U+00DF). The two are not considered by
# Windows as equivalent in path names.
# If the user didn't specify an executable, it may be necessary to
# cater for executable paths with spaces (not uncommon on Windows)
if enquote:
executable = _enquote_executable(executable)
# Issue #51: don't use fsencode, since we later try to
# check that the shebang is decodable using utf-8.
executable = executable.encode('utf-8')
# in case of IronPython, play safe and enable frames support
if (sys.platform == 'cli' and '-X:Frames' not in post_interp
and '-X:FullFrames' not in post_interp): # pragma: no cover
post_interp += b' -X:Frames'
shebang = self._build_shebang(executable, post_interp)
# Python parser starts to read a script using UTF-8 until
# it gets a #coding:xxx cookie. The shebang has to be the
# first line of a file, the #coding:xxx cookie cannot be
# written before. So the shebang has to be decodable from
# UTF-8.
try:
shebang.decode('utf-8')
except UnicodeDecodeError: # pragma: no cover
raise ValueError(
'The shebang (%r) is not decodable from utf-8' % shebang)
# If the script is encoded to a custom encoding (use a
# #coding:xxx cookie), the shebang has to be decodable from
# the script encoding too.
if encoding != 'utf-8':
try:
shebang.decode(encoding)
except UnicodeDecodeError: # pragma: no cover
raise ValueError(
'The shebang (%r) is not decodable '
'from the script encoding (%r)' % (shebang, encoding))
return shebang
def _get_script_text(self, entry):
return self.script_template % dict(module=entry.prefix,
import_name=entry.suffix.split('.')[0],
func=entry.suffix)
manifest = _DEFAULT_MANIFEST
def get_manifest(self, exename):
base = os.path.basename(exename)
return self.manifest % base
def _write_script(self, names, shebang, script_bytes, filenames, ext):
use_launcher = self.add_launchers and self._is_nt
linesep = os.linesep.encode('utf-8')
if not shebang.endswith(linesep):
shebang += linesep
if not use_launcher:
script_bytes = shebang + script_bytes
else: # pragma: no cover
if ext == 'py':
launcher = self._get_launcher('t')
else:
launcher = self._get_launcher('w')
stream = BytesIO()
with ZipFile(stream, 'w') as zf:
zf.writestr('__main__.py', script_bytes)
zip_data = stream.getvalue()
script_bytes = launcher + shebang + zip_data
for name in names:
outname = os.path.join(self.target_dir, name)
if use_launcher: # pragma: no cover
n, e = os.path.splitext(outname)
if e.startswith('.py'):
outname = n
outname = '%s.exe' % outname
try:
self._fileop.write_binary_file(outname, script_bytes)
except Exception:
# Failed writing an executable - it might be in use.
logger.warning('Failed to write executable - trying to '
'use .deleteme logic')
dfname = '%s.deleteme' % outname
if os.path.exists(dfname):
os.remove(dfname) # Not allowed to fail here
os.rename(outname, dfname) # nor here
self._fileop.write_binary_file(outname, script_bytes)
logger.debug('Able to replace executable using '
'.deleteme logic')
try:
os.remove(dfname)
except Exception:
pass # still in use - ignore error
else:
if self._is_nt and not outname.endswith('.' + ext): # pragma: no cover
outname = '%s.%s' % (outname, ext)
if os.path.exists(outname) and not self.clobber:
logger.warning('Skipping existing file %s', outname)
continue
self._fileop.write_binary_file(outname, script_bytes)
if self.set_mode:
self._fileop.set_executable_mode([outname])
filenames.append(outname)
def _make_script(self, entry, filenames, options=None):
post_interp = b''
if options:
args = options.get('interpreter_args', [])
if args:
args = ' %s' % ' '.join(args)
post_interp = args.encode('utf-8')
shebang = self._get_shebang('utf-8', post_interp, options=options)
script = self._get_script_text(entry).encode('utf-8')
name = entry.name
scriptnames = set()
if '' in self.variants:
scriptnames.add(name)
if 'X' in self.variants:
scriptnames.add('%s%s' % (name, sys.version_info[0]))
if 'X.Y' in self.variants:
scriptnames.add('%s-%s.%s' % (name, sys.version_info[0],
sys.version_info[1]))
if options and options.get('gui', False):
ext = 'pyw'
else:
ext = 'py'
self._write_script(scriptnames, shebang, script, filenames, ext)
def _copy_script(self, script, filenames):
adjust = False
script = os.path.join(self.source_dir, convert_path(script))
outname = os.path.join(self.target_dir, os.path.basename(script))
if not self.force and not self._fileop.newer(script, outname):
logger.debug('not copying %s (up-to-date)', script)
return
# Always open the file, but ignore failures in dry-run mode --
# that way, we'll get accurate feedback if we can read the
# script.
try:
f = open(script, 'rb')
except IOError: # pragma: no cover
if not self.dry_run:
raise
f = None
else:
first_line = f.readline()
if not first_line: # pragma: no cover
logger.warning('%s: %s is an empty file (skipping)',
self.get_command_name(), script)
return
match = FIRST_LINE_RE.match(first_line.replace(b'\r\n', b'\n'))
if match:
adjust = True
post_interp = match.group(1) or b''
if not adjust:
if f:
f.close()
self._fileop.copy_file(script, outname)
if self.set_mode:
self._fileop.set_executable_mode([outname])
filenames.append(outname)
else:
logger.info('copying and adjusting %s -> %s', script,
self.target_dir)
if not self._fileop.dry_run:
encoding, lines = detect_encoding(f.readline)
f.seek(0)
shebang = self._get_shebang(encoding, post_interp)
if b'pythonw' in first_line: # pragma: no cover
ext = 'pyw'
else:
ext = 'py'
n = os.path.basename(outname)
self._write_script([n], shebang, f.read(), filenames, ext)
if f:
f.close()
@property
def dry_run(self):
return self._fileop.dry_run
@dry_run.setter
def dry_run(self, value):
self._fileop.dry_run = value
if os.name == 'nt' or (os.name == 'java' and os._name == 'nt'): # pragma: no cover
# Executable launcher support.
# Launchers are from https://bitbucket.org/vinay.sajip/simple_launcher/
def _get_launcher(self, kind):
if struct.calcsize('P') == 8: # 64-bit
bits = '64'
else:
bits = '32'
name = '%s%s.exe' % (kind, bits)
# Issue 31: don't hardcode an absolute package name, but
# determine it relative to the current package
distlib_package = __name__.rsplit('.', 1)[0]
resource = finder(distlib_package).find(name)
if not resource:
msg = ('Unable to find resource %s in package %s' % (name,
distlib_package))
raise ValueError(msg)
return resource.bytes
# Public API follows
def make(self, specification, options=None):
"""
Make a script.
:param specification: The specification, which is either a valid export
entry specification (to make a script from a
callable) or a filename (to make a script by
copying from a source location).
:param options: A dictionary of options controlling script generation.
:return: A list of all absolute pathnames written to.
"""
filenames = []
entry = get_export_entry(specification)
if entry is None:
self._copy_script(specification, filenames)
else:
self._make_script(entry, filenames, options=options)
return filenames
def make_multiple(self, specifications, options=None):
"""
Take a list of specifications and make scripts from them,
:param specifications: A list of specifications.
:return: A list of all absolute pathnames written to,
"""
filenames = []
for specification in specifications:
filenames.extend(self.make(specification, options))
return filenames
| mit |
chengdezhi/language_model_for_typing | lm_prediction.py | 1 | 6516 | import datrie
from data_utils import Vocabulary, Dataset
import string
import re
from flask import Flask
from flask_restful import Resource, Api
import traceback
import time
import sys
#import thriftpy
import os
from flask import Flask, request, redirect, url_for
from werkzeug.utils import secure_filename
from newPyClient import computeKSR
import json
import numpy as np
import time
import tensorflow as tf
from data_utils import Vocabulary, Dataset
from language_model import LM
from common import CheckpointLoader
import heapq
UPLOAD_FOLDER = '/data/ngramTest/uploads'
UPLOAD_FOLDER = './'
top_k = 3
pattern = re.compile('[\w+]')
p_punc = re.compile('(\.|\"|,|\?|\!)')
hps = LM.get_default_hparams()
vocab = Vocabulary.from_file("1b_word_vocab.txt")
with tf.variable_scope("model"):
hps.num_sampled = 0 # Always using full softmax at evaluation. run out of memory
hps.keep_prob = 1.0
hps.num_gpus = 1
model = LM(hps,"predict_next", "/cpu:0")
if hps.average_params:
print("Averaging parameters for evaluation.")
saver = tf.train.Saver(model.avg_dict)
else:
saver = tf.train.Saver()
# Use only 4 threads for the evaluation.
config = tf.ConfigProto(allow_soft_placement=True,
intra_op_parallelism_threads=20,
inter_op_parallelism_threads=1)
config.gpu_options.allow_growth=True
sess = tf.Session(config=config)
ckpt_loader = CheckpointLoader(saver, model.global_step, "log.txt/train")
saver.restore(sess,"log.txt/train/model.ckpt-742996")
app = Flask(__name__)
api = Api(app)
'''
#build vocab trie
trie = datrie.new(string.printable)
cnt = 0
vocab_size = 140000
for i in range(vocab_size):
word = vocab.get_token(i)
trie[word] = i
for key in trie.keys(u"pre"):
print key,trie[key]
trie.save("data/vocab_trie")
'''
trie = datrie.Trie.load("data/vocab_trie")
class ngramPredict(Resource):
def get(self,input):
input = input.decode("utf-8")
#print "input:",input
input_words = input
if input_words.find('<S>')!=0:
input_words = '<S> ' + input
isCompletion = False
if input_words[-1] == ' ':
#print "Predict:"
prefix_input = [vocab.get_id(w) for w in input_words.split()]
else:
#print "Compeletion:"
isCompletion = True
prefix_input = [vocab.get_id(w) for w in input_words.split()[:-1]]
prefix = input_words.split()[-1]
#print "prefix:",prefix,type(prefix)
#print("input:",input,"pre:",prefix_input,"len:",len(prefix_input))
w = np.zeros([1, len(prefix_input)], np.uint8)
w[:] =1
inputs = np.zeros([hps.batch_size*hps.num_gpus,hps.num_steps])
weights = np.zeros([hps.batch_size*hps.num_gpus,hps.num_steps])
inputs[0,:len(prefix_input)] = prefix_input[:]
weights[0,:len(prefix_input)] = w[:]
words = []
with sess.as_default():
#ckpt_loader.load_checkpoint() # FOR ONLY ONE CHECKPOINT
sess.run(tf.local_variables_initializer())
words = []
if not isCompletion:
indexes = sess.run([model.index],{model.x:inputs, model.w:weights})
indexes = np.reshape(indexes,[hps.num_steps,hps.arg_max])
for j in range(hps.arg_max):
word = vocab.get_token(indexes[len(prefix_input)-1][j])
if not p_punc.match(word)==None:
words += [word]
continue
if pattern.match(word)==None:
continue
words += [word]
else:
prob = sess.run([model.logits],{model.x:inputs, model.w:weights})
prob = np.reshape(prob,[hps.num_steps,hps.vocab_size])
prob = prob[len(prefix_input)-1] # the last prefix_input step prob is the predict one
#print "prob:", len(prob)
#print "prefix:",trie.keys(prefix)
cand = [trie[cand_index] for cand_index in trie.keys(prefix)]
#print "cand:", cand
#print "prefix:", prefix
cand_prob = [prob[pb] for pb in cand]
ins = heapq.nlargest(top_k, range(len(cand_prob)), cand_prob.__getitem__)
for j in ins:
word = vocab.get_token(cand[j])
words += [word]
#print words
return words[:top_k]
@app.route('/ngramfile/', methods=['GET', 'POST'])
def upload_file():
if request.method == 'POST':
doc = request.json
if doc:
doc = doc['text']
ngramClient = ngramPredict()
res = computeKSR(ngramClient,doc)
return json.dumps(res)
if 'text' not in request.files:
return "{\"ret\":-1}"
file = request.files['text']
if file.filename == '':
return "{\"ret\":-2}"
filename = secure_filename(file.filename)
uploadFilePath = os.path.join(UPLOAD_FOLDER, filename)
file.save(uploadFilePath)
doc = ""
with open(uploadFilePath, 'rb') as textFile:
doc = textFile.read()
ngramClient = ngramPredict()
res = computeKSR(ngramClient,doc)
#print("res:",res)
#TODO
#return json.dumps(res)
api.add_resource(ngramPredict, '/ngram/<input>')
#predictClient = PredictClient()
if __name__ == '__main__':
'''
ngrampredict = ngramPredict()
ngrampredict.get("how are")
ngrampredict.get("what the")
ngrampredict.get("i am")
ngrampredict.get("how do")
'''
#print('test for grep ksr')
app.run(host = "0",port=9898)
| mit |
alsrgv/tensorflow | tensorflow/contrib/distributions/python/ops/bijectors/softplus.py | 35 | 5563 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Softplus bijector."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops.distributions import bijector
from tensorflow.python.ops.distributions import util as distribution_util
from tensorflow.python.util import deprecation
__all__ = [
"Softplus",
]
class Softplus(bijector.Bijector):
"""Bijector which computes `Y = g(X) = Log[1 + exp(X)]`.
The softplus `Bijector` has the following two useful properties:
* The domain is the positive real numbers
* `softplus(x) approx x`, for large `x`, so it does not overflow as easily as
the `Exp` `Bijector`.
The optional nonzero `hinge_softness` parameter changes the transition at
zero. With `hinge_softness = c`, the bijector is:
```f_c(x) := c * g(x / c) = c * Log[1 + exp(x / c)].```
For large `x >> 1`, `c * Log[1 + exp(x / c)] approx c * Log[exp(x / c)] = x`,
so the behavior for large `x` is the same as the standard softplus.
As `c > 0` approaches 0 from the right, `f_c(x)` becomes less and less soft,
approaching `max(0, x)`.
* `c = 1` is the default.
* `c > 0` but small means `f(x) approx ReLu(x) = max(0, x)`.
* `c < 0` flips sign and reflects around the `y-axis`: `f_{-c}(x) = -f_c(-x)`.
* `c = 0` results in a non-bijective transformation and triggers an exception.
Example Use:
```python
# Create the Y=g(X)=softplus(X) transform which works only on Tensors with 1
# batch ndim and 2 event ndims (i.e., vector of matrices).
softplus = Softplus()
x = [[[1., 2],
[3, 4]],
[[5, 6],
[7, 8]]]
log(1 + exp(x)) == softplus.forward(x)
log(exp(x) - 1) == softplus.inverse(x)
```
Note: log(.) and exp(.) are applied element-wise but the Jacobian is a
reduction over the event space.
"""
@distribution_util.AppendDocstring(
kwargs_dict={
"hinge_softness": (
"Nonzero floating point `Tensor`. Controls the softness of what "
"would otherwise be a kink at the origin. Default is 1.0")})
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def __init__(self,
hinge_softness=None,
validate_args=False,
name="softplus"):
with ops.name_scope(name, values=[hinge_softness]):
if hinge_softness is not None:
self._hinge_softness = ops.convert_to_tensor(
hinge_softness, name="hinge_softness")
else:
self._hinge_softness = None
if validate_args:
nonzero_check = check_ops.assert_none_equal(
ops.convert_to_tensor(
0, dtype=self.hinge_softness.dtype),
self.hinge_softness,
message="hinge_softness must be non-zero")
self._hinge_softness = control_flow_ops.with_dependencies(
[nonzero_check], self.hinge_softness)
super(Softplus, self).__init__(
forward_min_event_ndims=0,
validate_args=validate_args,
name=name)
def _forward(self, x):
if self.hinge_softness is None:
return nn_ops.softplus(x)
hinge_softness = math_ops.cast(self.hinge_softness, x.dtype)
return hinge_softness * nn_ops.softplus(x / hinge_softness)
def _inverse(self, y):
if self.hinge_softness is None:
return distribution_util.softplus_inverse(y)
hinge_softness = math_ops.cast(self.hinge_softness, y.dtype)
return hinge_softness * distribution_util.softplus_inverse(
y / hinge_softness)
def _inverse_log_det_jacobian(self, y):
# Could also do:
# ildj = math_ops.reduce_sum(y - distribution_util.softplus_inverse(y),
# axis=event_dims)
# but the following is more numerically stable. Ie,
# Y = Log[1 + exp{X}] ==> X = Log[exp{Y} - 1]
# ==> dX/dY = exp{Y} / (exp{Y} - 1)
# = 1 / (1 - exp{-Y}),
# which is the most stable for large Y > 0. For small Y, we use
# 1 - exp{-Y} approx Y.
if self.hinge_softness is not None:
y /= math_ops.cast(self.hinge_softness, y.dtype)
return -math_ops.log(-math_ops.expm1(-y))
def _forward_log_det_jacobian(self, x):
if self.hinge_softness is not None:
x /= math_ops.cast(self.hinge_softness, x.dtype)
return -nn_ops.softplus(-x)
@property
def hinge_softness(self):
return self._hinge_softness
| apache-2.0 |
adybbroe/mesan_compositer | mesan_compositer/ctth_quicklooks.py | 1 | 4733 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2014, 2015, 2019 Adam.Dybbroe
# Author(s):
# Adam.Dybbroe <a000680@c14526.ad.smhi.se>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Make quick look images of the ctth composite
"""
import argparse
from datetime import datetime
import numpy as np
import xarray as xr
from trollimage.xrimage import XRImage
from mesan_compositer import ctth_height
from mesan_compositer.netcdf_io import ncCTTHComposite
from mesan_compositer import get_config
from satpy.composites import ColormapCompositor
import sys
import os
from logging import handlers
import logging
LOG = logging.getLogger(__name__)
#: Default time format
_DEFAULT_TIME_FORMAT = '%Y-%m-%d %H:%M:%S'
#: Default log format
_DEFAULT_LOG_FORMAT = '[%(levelname)s: %(asctime)s : %(name)s] %(message)s'
def get_arguments():
"""
Get command line arguments
args.logging_conf_file, args.config_file, obs_time, area_id, wsize
Return
File path of the logging.ini file
File path of the application configuration file
Observation/Analysis time
Area id
Window size
"""
parser = argparse.ArgumentParser()
parser.add_argument('--datetime', '-d', help='Date and time of observation - yyyymmddhh',
required=True)
parser.add_argument('--area_id', '-a', help='Area id',
required=True)
parser.add_argument('-c', '--config_file',
type=str,
dest='config_file',
required=True,
help="The file containing configuration parameters e.g. mesan_sat_config.yaml")
parser.add_argument("-l", "--logging",
help="The path to the log-configuration file (e.g. './logging.ini')",
dest="logging_conf_file",
type=str,
required=False)
parser.add_argument("-v", "--verbose",
help="print debug messages too",
action="store_true")
args = parser.parse_args()
tanalysis = datetime.strptime(args.datetime, '%Y%m%d%H')
area_id = args.area_id
if 'template' in args.config_file:
print("Template file given as master config, aborting!")
sys.exit()
return args.logging_conf_file, args.config_file, tanalysis, area_id
if __name__ == "__main__":
(logfile, config_filename, time_of_analysis, areaid) = get_arguments()
if logfile:
logging.config.fileConfig(logfile)
handler = logging.StreamHandler(sys.stderr)
formatter = logging.Formatter(fmt=_DEFAULT_LOG_FORMAT,
datefmt=_DEFAULT_TIME_FORMAT)
handler.setFormatter(formatter)
handler.setLevel(logging.DEBUG)
logging.getLogger('').addHandler(handler)
logging.getLogger('').setLevel(logging.DEBUG)
logging.getLogger('satpy').setLevel(logging.INFO)
LOG = logging.getLogger('ctth_quicklooks')
log_handlers = logging.getLogger('').handlers
for log_handle in log_handlers:
if type(log_handle) is handlers.SMTPHandler:
LOG.debug("Mail notifications to: %s", str(log_handle.toaddrs))
OPTIONS = get_config(config_filename)
values = {"area": areaid, }
bname = time_of_analysis.strftime(OPTIONS['ctth_composite_filename']) % values
path = OPTIONS['composite_output_dir']
filename = os.path.join(path, bname) + '.nc'
if not os.path.exists(filename):
LOG.error("File " + str(filename) + " does not exist!")
sys.exit(-1)
comp = ncCTTHComposite()
comp.load(filename)
palette = ctth_height()
ctth_data = comp.height.data
ctth_data = ctth_data / 500.0 + 1
ctth_data = ctth_data.astype(np.uint8)
cmap = ColormapCompositor('mesan_cloudheight_composite')
colors, sqpal = cmap.build_colormap(palette, np.uint8, {})
attrs = {'_FillValue': 0}
xdata = xr.DataArray(ctth_data, dims=['y', 'x'], attrs=attrs).astype('uint8')
pimage = XRImage(xdata)
pimage.palettize(colors)
pimage.save(filename.strip('.nc') + '_height.png')
| gpl-3.0 |