repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 18
values | size
stringlengths 4
7
| content
stringlengths 736
1.04M
| license
stringclasses 15
values | hash
int64 -9,222,983,980,000,580,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
the-fascinator/fascinator-portal | src/main/config/portal/default/default/scripts/download.py | 1 | 7607 | import os
from com.googlecode.fascinator.api.indexer import SearchRequest
from com.googlecode.fascinator.api.storage import StorageException
from com.googlecode.fascinator.common.solr import SolrDoc, SolrResult
from java.io import ByteArrayInputStream, ByteArrayOutputStream
from java.lang import Boolean
from java.net import URLDecoder
from org.apache.commons.io import IOUtils
class DownloadData:
def __init__(self):
pass
def __activate__(self, context):
self.services = context["Services"]
self.contextPath = context["contextPath"]
self.pageName = context["pageName"]
self.portalId = context["portalId"]
self.request = context["request"]
self.response = context["response"]
self.formData = context["formData"]
self.page = context["page"]
self.log = context["log"]
self.__metadata = SolrDoc(None)
object = None
payload = None
# URL basics
basePath = self.portalId + "/" + self.pageName
fullUri = URLDecoder.decode(self.request.getAttribute("RequestURI"))
uri = fullUri[len(basePath)+1:]
# Turn our URL into objects
object, payload = self.__resolve(uri)
if object is None:
if uri.endswith("/"):
self.log.error("Object 404: '{}'", uri)
self.response.setStatus(404);
writer = self.response.getPrintWriter("text/plain; charset=UTF-8")
writer.println("Object not found")
writer.close()
return
else:
# Sometimes adding a slash to the end will resolve the problem
self.log.error("Redirecting, object 404: '{}'", uri)
self.response.sendRedirect(context["urlBase"] + fullUri + "/")
return
# Ensure solr metadata is useable
oid = object.getId()
if self.isIndexed():
self.__metadata = self.__solrData.getResults().get(0)
else:
self.__metadata.getJsonObject().put("id", oid)
#print "URI='%s' OID='%s' PID='%s'" % (uri, object.getId(), payload.getId())
# Security check
if self.isAccessDenied(uri):
# Redirect to the object page for standard access denied error
self.response.sendRedirect(context["portalPath"] + "/detail/" + object.getId())
return
## The byte range cache will check for byte range requests first
self.cache = self.services.getByteRangeCache()
processed = self.cache.processRequest(self.request, self.response, payload)
if processed:
# We don't need to return data, the cache took care of it.
return
# Now the 'real' work of payload retrieval
if payload is not None:
filename = os.path.split(payload.getId())[1]
mimeType = payload.getContentType()
if mimeType == "application/octet-stream":
self.response.setHeader("Content-Disposition", "attachment; filename=%s" % filename)
type = payload.getContentType()
# Enocode textual responses before sending
if type is not None and type.startswith("text/"):
out = ByteArrayOutputStream()
IOUtils.copy(payload.open(), out)
payload.close()
writer = self.response.getPrintWriter(type + "; charset=UTF-8")
writer.println(out.toString("UTF-8"))
writer.close()
# Other data can just be streamed out
else:
if type is None:
# Send as raw data
out = self.response.getOutputStream("application/octet-stream")
else:
out = self.response.getOutputStream(type)
IOUtils.copy(payload.open(), out)
payload.close()
object.close()
out.close()
else:
self.response.setStatus(404)
writer = self.response.getPrintWriter("text/plain; charset=UTF-8")
writer.println("Resource not found: uri='%s'" % uri)
writer.close()
def getAllowedRoles(self):
metadata = self.getMetadata()
if metadata is not None:
return metadata.getList("security_filter")
else:
return []
def getMetadata(self):
return self.__metadata
def isAccessDenied(self,uri):
# Admins always have access
if self.page.authentication.is_admin():
return False
slash = uri.find("/")
if slash == -1:
return None, None
oid = uri[:slash]
objectMetadata = self.services.getStorage().getObject(oid).getMetadata()
if objectMetadata is not None:
current_user = self.page.authentication.get_username()
owner = objectMetadata.getProperty("owner")
if current_user == owner:
return False
# Check for normal access
myRoles = self.page.authentication.get_roles_list()
allowedRoles = self.getAllowedRoles()
if myRoles is None or allowedRoles is None:
return True
for role in myRoles:
if role in allowedRoles:
return False
return True
def isDetail(self):
preview = Boolean.parseBoolean(self.formData.get("preview", "false"))
return not (self.request.isXHR() or preview)
def isIndexed(self):
found = self.__solrData.getNumFound()
return (found is not None) and (found == 1)
def __resolve(self, uri):
# Grab OID from the URL
slash = uri.find("/")
if slash == -1:
return None, None
oid = uri[:slash]
# Query solr for this object
self.__loadSolrData(oid)
if not self.isIndexed():
print "WARNING: Object '%s' not found in index" % oid
sid = None
else:
# Query storage for this object
sid = self.__solrData.getResults().get(0).getFirst("storage_id")
try:
if sid is None:
# Use the URL OID
object = self.services.getStorage().getObject(oid)
else:
# We have a special storage ID from the index
object = self.services.getStorage().getObject(sid)
except StorageException, e:
#print "Failed to access object: %s" % (str(e))
return None, None
# Grab the payload from the rest of the URL
pid = uri[slash+1:]
if pid == "":
# We want the source
pid = object.getSourceId()
# Now get the payload from storage
try:
payload = object.getPayload(pid)
except StorageException, e:
#print "Failed to access payload: %s" % (str(e))
return None, None
# We're done
return object, payload
def __loadSolrData(self, oid):
portal = self.page.getPortal()
query = 'id:"%s"' % oid
if self.isDetail() and portal.getSearchQuery():
query += " AND " + portal.getSearchQuery()
req = SearchRequest(query)
req.addParam("fq", 'item_type:"object"')
if self.isDetail():
req.addParam("fq", portal.getQuery())
out = ByteArrayOutputStream()
self.services.getIndexer().search(req, out)
self.__solrData = SolrResult(ByteArrayInputStream(out.toByteArray()))
| gpl-2.0 | 963,536,829,417,706,400 | 35.927184 | 100 | 0.567767 | false |
GraveRaven/hivemind | hivemindsrc/ants.py | 1 | 15536 | #!/bin/env python
"""
The MIT License
Copyright (c) 2010 The Chicago Tribune & Contributors
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from multiprocessing import Pool
import os
import re
import socket
import time
import sys
IS_PY2 = sys.version_info.major == 2
if IS_PY2:
from urllib2 import urlopen, Request
from StringIO import StringIO
else:
from urllib.request import urlopen, Request
from io import StringIO
import base64
import csv
import random
import ssl
from contextlib import contextmanager
import traceback
import boto.ec2
import boto.exception
import paramiko
STATE_FILENAME = os.path.expanduser('~/.ants')
# Utilities
@contextmanager
def _redirect_stdout(outfile=None):
save_stdout = sys.stdout
sys.stdout = outfile or StringIO()
yield
sys.stdout = save_stdout
def _read_server_list():
instance_ids = []
if not os.path.isfile(STATE_FILENAME):
return (None, None, None, None)
with open(STATE_FILENAME, 'r') as f:
username = f.readline().strip()
key_name = f.readline().strip()
zone = f.readline().strip()
text = f.read()
instance_ids = [i for i in text.split('\n') if i != '']
print('Read %i bees from the roster.' % len(instance_ids))
return (username, key_name, zone, instance_ids)
def _write_server_list(username, key_name, zone, instances):
with open(STATE_FILENAME, 'w') as f:
f.write('%s\n' % username)
f.write('%s\n' % key_name)
f.write('%s\n' % zone)
f.write('\n'.join([instance.id for instance in instances]))
def _delete_server_list():
os.remove(STATE_FILENAME)
def _get_pem_path(key):
return os.path.expanduser('~/.ssh/%s.pem' % key)
def _get_region(zone):
return zone if 'gov' in zone else zone[:-1] # chop off the "d" in the "us-east-1d" to get the "Region"
def _get_security_group_id(connection, security_group_name, subnet):
if not security_group_name:
print('The bees need a security group to run under. Need to open a port from where you are to the target subnet.')
return
security_groups = connection.get_all_security_groups(filters={'group-name': [security_group_name]})
if not security_groups:
print('The bees need a security group to run under. The one specified was not found.')
return
group = security_groups[0] if security_groups else None
return group.id
# Methods
def up(count, group, zone, image_id, instance_type, username, key_name, subnet, bid = None):
"""
Startup the load testing server.
"""
existing_username, existing_key_name, existing_zone, instance_ids = _read_server_list()
count = int(count)
if existing_username == username and existing_key_name == key_name and existing_zone == zone:
ec2_connection = boto.ec2.connect_to_region(_get_region(zone))
existing_reservations = ec2_connection.get_all_instances(instance_ids=instance_ids)
existing_instances = filter(lambda i: i.state == 'running', [r.instances[0] for r in existing_reservations])
# User, key and zone match existing values and instance ids are found on state file
if count <= len(existing_instances):
# Count is less than the amount of existing instances. No need to create new ones.
print('Ants are already assembled and awaiting orders.')
return
else:
# Count is greater than the amount of existing instances. Need to create the only the extra instances.
count -= len(existing_instances)
elif instance_ids:
# Instances found on state file but user, key and/or zone not matching existing value.
# State file only stores one user/key/zone config combination so instances are unusable.
print('Taking down {} unusable ants.'.format(len(instance_ids)))
# Redirect prints in down() to devnull to avoid duplicate messages
with _redirect_stdout():
down()
# down() deletes existing state file so _read_server_list() returns a blank state
existing_username, existing_key_name, existing_zone, instance_ids = _read_server_list()
pem_path = _get_pem_path(key_name)
if not os.path.isfile(pem_path):
print('Warning. No key file found for %s. You will need to add this key to your SSH agent to connect.' % pem_path)
print('Connecting to the hive.')
try:
ec2_connection = boto.ec2.connect_to_region(_get_region(zone))
except boto.exception.NoAuthHandlerFound as e:
print("Authenciation config error, perhaps you do not have a ~/.boto file with correct permissions?")
print(e.message)
return e
except Exception as e:
print("Unknown error occured:")
print(e.message)
return e
if ec2_connection == None:
raise Exception("Invalid zone specified? Unable to connect to region using zone name")
groupId = group if subnet is None else _get_security_group_id(ec2_connection, group, subnet)
print("GroupId found: %s" % groupId)
placement = None if 'gov' in zone else zone
print("Placement: %s" % placement)
if bid:
print('Attempting to call up %i spot ants, this can take a while...' % count)
spot_requests = ec2_connection.request_spot_instances(
image_id=image_id,
price=bid,
count=count,
key_name=key_name,
security_group_ids=[groupId],
instance_type=instance_type,
placement=placement,
subnet_id=subnet)
# it can take a few seconds before the spot requests are fully processed
time.sleep(5)
instances = _wait_for_spot_request_fulfillment(ec2_connection, spot_requests)
else:
print('Attempting to call up %i ants.' % count)
try:
reservation = ec2_connection.run_instances(
image_id=image_id,
min_count=count,
max_count=count,
key_name=key_name,
security_group_ids=[groupId],
instance_type=instance_type,
placement=placement,
subnet_id=subnet)
except boto.exception.EC2ResponseError as e:
print("Unable to call ants:", e.message)
return e
instances = reservation.instances
if instance_ids:
existing_reservations = ec2_connection.get_all_instances(instance_ids=instance_ids)
existing_instances = filter(lambda i: i.state == 'running', [r.instances[0] for r in existing_reservations])
map(instances.append, existing_instances)
dead_instances = filter(lambda i: i not in [j.id for j in existing_instances], instance_ids)
map(instance_ids.pop, [instance_ids.index(i) for i in dead_instances])
print('Waiting for ants to spawn...')
instance_ids = instance_ids or []
for instance in [i for i in instances if i.state == 'pending']:
instance.update()
while instance.state != 'running':
print('.')
time.sleep(5)
instance.update()
instance_ids.append(instance.id)
print('Ant %s is ready.' % instance.id)
ec2_connection.create_tags(instance_ids, { "Name": "an ant!" })
_write_server_list(username, key_name, zone, instances)
print('The hive has assembled %i ants.' % len(instances))
def report():
"""
Report the status of the load testing servers.
"""
username, key_name, zone, instance_ids = _read_server_list()
if not instance_ids:
print('No ants have been mobilized.')
return
ec2_connection = boto.ec2.connect_to_region(_get_region(zone))
reservations = ec2_connection.get_all_instances(instance_ids=instance_ids)
instances = []
for reservation in reservations:
instances.extend(reservation.instances)
for instance in instances:
print('Ant %s: %s @ %s' % (instance.id, instance.state, instance.ip_address))
def down():
"""
Shutdown the load testing server.
"""
username, key_name, zone, instance_ids = _read_server_list()
if not instance_ids:
print('No ants have been mobilized.')
return
print('Connecting to the hive.')
ec2_connection = boto.ec2.connect_to_region(_get_region(zone))
print('Calling off the hive.')
terminated_instance_ids = ec2_connection.terminate_instances(
instance_ids=instance_ids)
print('Stood down %i ants.' % len(terminated_instance_ids))
_delete_server_list()
def _wait_for_spot_request_fulfillment(conn, requests, fulfilled_requests = []):
"""
Wait until all spot requests are fulfilled.
Once all spot requests are fulfilled, return a list of corresponding spot instances.
"""
if len(requests) == 0:
reservations = conn.get_all_instances(instance_ids = [r.instance_id for r in fulfilled_requests])
return [r.instances[0] for r in reservations]
else:
time.sleep(10)
print('.')
requests = conn.get_all_spot_instance_requests(request_ids=[req.id for req in requests])
for req in requests:
if req.status.code == 'fulfilled':
fulfilled_requests.append(req)
print("spot ant `{}` joined the hive.".format(req.instance_id))
return _wait_for_spot_request_fulfillment(conn, [r for r in requests if r not in fulfilled_requests], fulfilled_requests)
def _execute_order(params):
print('Ant %i is joining the hive.' % params['i'])
try:
client = paramiko.SSHClient()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
pem_path = params.get('key_name') and _get_pem_path(params['key_name']) or None
if not os.path.isfile(pem_path):
client.load_system_host_keys()
client.connect(params['instance_name'], username=params['username'])
else:
client.connect(
params['instance_name'],
username=params['username'],
key_filename=pem_path)
print('Ant %i is executing order' % params['i'])
stdin, stdout, stderr = client.exec_command(params['order'])
#response = {}
# paramiko's read() returns bytes which need to be converted back to a str
#ab_results = IS_PY2 and stdout.read() or stdout.read().decode('utf-8')
print(stdout.read().decode('utf-8'))
client.close()
except socket.error as e:
return e
except Exception as e:
traceback.print_exc()
print()
raise e
def _execute_order_file(params):
upload_path = "/tmp/"
print('Ant %i is joining the hive.' % params['i'])
try:
client = paramiko.SSHClient()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
pem_path = params.get('key_name') and _get_pem_path(params['key_name']) or None
if not os.path.isfile(pem_path):
client.load_system_host_keys()
client.connect(params['instance_name'], username=params['username'])
else:
client.connect(
params['instance_name'],
username=params['username'],
key_filename=pem_path)
order_file = params['order_file']
filename = os.path.basename(order_file)
print('Ant %s uploading file %s to %s' % (params['i'], order_file, upload_path + filename))
command = 'scp -i %s -o StrictHostKeyChecking=no %s %s@%s:%s' % (_get_pem_path(params['key_name']), order_file, params['username'], params['instance_name'], upload_path)
os.system(command)
print('Ant %s executing file %s' % (params['i'], upload_path + filename))
stdin, stdout, stderr = client.exec_command('chmod +x %s'% upload_path + filename)
stdin, stdout, stderr = client.exec_command(upload_path + filename)
#response = {}
# paramiko's read() returns bytes which need to be converted back to a str
#ab_results = IS_PY2 and stdout.read() or stdout.read().decode('utf-8')
print(stdout.read().decode('utf-8'))
client.close()
except socket.error as e:
return e
except Exception as e:
traceback.print_exc()
print()
raise e
def order(orders, order_files):
username, key_name, zone, instance_ids = _read_server_list()
if not instance_ids:
print('No ants are ready for orders.')
return
print('Connecting to the hive.')
ec2_connection = boto.ec2.connect_to_region(_get_region(zone))
print('Assembling ants.')
reservations = ec2_connection.get_all_instances(instance_ids=instance_ids)
instances = []
for reservation in reservations:
instances.extend(reservation.instances)
instance_count = len(instances)
params = []
#Start with executing order
if not orders == None:
for order in orders:
del params[:]
for i, instance in enumerate(instances):
params.append({
'i': i,
'instance_id': instance.id,
'instance_name': instance.private_dns_name if instance.public_dns_name == "" else instance.public_dns_name,
'username': username,
'key_name': key_name,
'order': order
})
print('Organizing the hive.')
# Spin up processes for connecting to EC2 instances
pool = Pool(len(params))
results = pool.map(_execute_order, params)
#Now run order files
if not order_files == None:
for order_file in order_files:
print('Filename: %s' % order_file)
del params[:]
for i, instance in enumerate(instances):
params.append({
'i': i,
'instance_id': instance.id,
'instance_name': instance.private_dns_name if instance.public_dns_name == "" else instance.public_dns_name,
'username': username,
'key_name': key_name,
'order_file': order_file
})
#print('Running order file %s' % order_file)
print('Organizing the hive.')
# Spin up processes for connecting to EC2 instances
pool = Pool(len(params))
results = pool.map(_execute_order_file, params)
print('The hive is awaiting new orders.')
sys.exit(0)
| mit | 2,145,380,531,466,934,800 | 33.678571 | 177 | 0.630021 | false |
nojhan/ereshkigal | tunnelmon.py | 1 | 26267 | #!/usr/bin/python3
# -*- coding: utf-8 -*-
#
# Ereshkigal is an AutoSSH tunnel monitor
# It gives a curses user interface to monitor existing SSH tunnel that are managed with autossh.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Author : nojhan <nojhan@nojhan.net>
#
#################################################################################################
# CORE
#################################################################################################
import os
import subprocess
import logging
import psutil
import socket
import re
import collections
class Tunnel:
def __init__(self, ssh_pid = None, in_port = None, via_host = None, target_host = None, out_port = None):
# assert(ssh_pid != None)
self.ssh_pid = ssh_pid
assert(in_port!=None)
self.in_port = in_port
assert(via_host!=None)
self.via_host = via_host
assert(target_host!=None)
self.target_host = target_host
assert(out_port!=None)
self.out_port = out_port
self.connections = []
def repr_tunnel(self):
return "%i\t%i\t%s\t%s\t%i" % (
self.ssh_pid,
self.in_port,
self.via_host,
self.target_host,
self.out_port)
def repr_connections(self):
# list of tunnels linked to this process
rep = ""
for c in self.connections:
rep += "\n\t↳ %s" % c
return rep
def __repr__(self):
return self.repr_tunnel() + self.repr_connections()
class AutoTunnel(Tunnel):
def __init__(self, autossh_pid = None, *args, **kwargs):
super().__init__(*args, **kwargs)
assert(autossh_pid!=None)
self.autossh_pid = autossh_pid
def repr_tunnel(self):
rep = super().repr_tunnel()
return "auto\t" + rep
class RawTunnel(Tunnel):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def repr_tunnel(self):
rep = super().repr_tunnel()
return "ssh\t" + rep
class Connection:
"""A dictionary that stores an SSH connection related to a tunnel"""
def __init__(self, local_address = None, in_port = None, foreign_address = None, out_port = None,
status = None, family = None ):
# informations available with netstat
assert(local_address!=None)
self.local_address = local_address
assert(in_port!=None)
self.in_port = in_port
self.foreign_address = foreign_address
self.out_port = out_port
assert(status!=None)
self.status = status
assert(family!=None)
self.family = family
self.family_rep = {socket.AddressFamily.AF_INET:"INET", socket.AddressFamily.AF_INET6:"INET6", socket.AddressFamily.AF_UNIX:"UNIX"}
# FIXME would be nice to have an estimation of the connections latency
#self.latency = 0
def __repr__(self):
# do not logging.debug all the informations by default
if self.foreign_address and self.out_port:
return "%s\t%s\t%s:%i → %s:%i" % (
self.family_rep[self.family],
self.status,
self.local_address,
self.in_port,
self.foreign_address,
self.out_port,
)
else:
return "%s\t%s\t%s:%i" % (
self.family_rep[self.family],
self.status,
self.local_address,
self.in_port,
)
class TunnelsParser:
def __init__(self):
"""Warning: the initialization does not gather tunnels informations, use update() to do so"""
# { ssh_pid : Tunnel }
self.tunnels = collections.OrderedDict()
# do not perform update by default
# this is necessary because one may want
# only a list of connections OR autossh processes
#self.update()
self.re_forwarding = re.compile(r"-L(\d+):(.+):(\d+)")
self.header = 'TYPE\tSSH_PID\tIN_PORT\tVIA_HOST\tTARGET_HOST\tOUT_PORT'
def get_tunnel(self, pos):
pid = list(self.tunnels.keys())[pos]
return self.tunnels[pid]
def parse(self, cmd):
cmdline = " ".join(cmd)
logging.debug('autossh cmd line:', cmdline)
logging.debug('forwarding regexp:', self.re_forwarding)
match = self.re_forwarding.findall(cmdline)
logging.debug(match)
if match:
assert(len(match)==1)
in_port, target_host, out_port = match[0]
logging.debug("matches: ", match)
# Find the hostname on wich the tunnel is built.
via_host = "unknown"
# Search backward and take the first parameter argument.
# FIXME this is an ugly hack
for i in range( len(cmd)-1,0,-1 ):
if cmd[i][0] != '-':
via_host = cmd[i]
break
return (int(in_port), via_host, target_host, int(out_port))
def update(self):
"""Gather and parse informations from the operating system"""
self.tunnels.clear()
# Browse the SSH processes handling a tunnel.
for proc in psutil.process_iter():
try:
process = proc.as_dict(attrs=['pid','ppid','name','cmdline','connections'])
cmd = process['cmdline']
except psutil.NoSuchProcess:
pass
else:
if process['name'] == 'ssh':
logging.debug(process)
in_port, via_host, target_host, out_port = self.parse(cmd)
logging.debug(in_port, via_host, target_host, out_port)
# Check if this ssh tunnel is managed by autossh.
parent = psutil.Process(process['ppid'])
if parent.name() == 'autossh':
# Add an autossh tunnel.
pid = parent.pid # autossh pid
self.tunnels[pid] = AutoTunnel(pid, process['pid'], in_port, via_host, target_host, out_port )
else:
# Add a raw tunnel.
pid = process['pid']
self.tunnels[pid] = RawTunnel(pid, in_port, via_host, target_host, out_port )
for c in process['connections']:
logging.debug(c)
laddr,lport = c.laddr
if c.raddr:
raddr,rport = c.raddr
else:
raddr,rport = (None,None)
connection = Connection(laddr,lport,raddr,rport,c.status,c.family)
logging.debug(connection)
self.tunnels[pid].connections.append(connection)
logging.debug(self.tunnels)
def __repr__(self):
reps = [self.header]
for t in self.tunnels:
reps.append(str(self.tunnels[t]))
return "\n".join(reps)
#################################################################################################
# INTERFACES
#################################################################################################
import curses
import time
import signal
class CursesMonitor:
"""Textual user interface to display up-to-date informations about current tunnels"""
def __init__(self, scr):
# curses screen
self.scr = scr
# tunnels monitor
self.tp = TunnelsParser()
# selected line
self.cur_line = -1
# selected pid
self.cur_pid = -1
# switch to show only autoss processes (False) or ssh connections also (True)
self.show_connections = False
# FIXME pass as parameters+options
self.update_delay = 1 # seconds of delay between two data updates
self.ui_delay = 0.05 # seconds between two screen update
# colors
# FIXME different colors for different types of tunnels (auto or raw)
self.colors_tunnel = {'kind_auto':4, 'kind_raw':5, 'ssh_pid':0, 'in_port':3, 'via_host':2, 'target_host':2, 'out_port':3, 'tunnels_nb':4, 'tunnels_nb_none':1}
self.colors_highlight = {'kind_auto':9, 'kind_raw':9, 'ssh_pid':9, 'in_port':9, 'via_host':9, 'target_host':9, 'out_port':9, 'tunnels_nb':9, 'tunnels_nb_none':9}
self.colors_connection = {'ssh_pid':0, 'autossh_pid':0, 'status':4, 'status_out':1, 'local_address':2, 'in_port':3, 'foreign_address':2, 'out_port':3}
self.header = ("TYPE","SSHPID","INPORT","VIA","TARGET","OUTPORT")
def do_Q(self):
"""Quit"""
logging.debug("Waited: %s" % self.log_ticks)
self.log_ticks = ""
logging.debug("Key pushed: Q")
return False
def do_R(self):
"""Reload autossh tunnel"""
logging.debug("Waited: %s" % self.log_ticks)
self.log_ticks = ""
logging.debug("Key pushed: R")
# if a pid is selected
if self.cur_pid != -1:
# send the SIGUSR1 signal
if type(self.tp.get_tunnel(self.cur_line)) == AutoTunnel:
# autossh performs a reload of existing tunnels that it manages
logging.debug("SIGUSR1 on PID: %i" % self.cur_pid)
os.kill( self.cur_pid, signal.SIGUSR1 )
else:
logging.debug("Cannot reload a RAW tunnel")
return True
def do_C(self):
"""Close tunnel"""
logging.debug("Waited: %s" % self.log_ticks)
self.log_ticks = ""
logging.debug("Key pushed: C")
if self.cur_pid != -1:
# send a SIGKILL
# the related process is stopped
# FIXME SIGTERM or SIGKILL ?
tunnel = self.tp.get_tunnel(self.cur_line)
if type(tunnel) == AutoTunnel:
logging.debug("SIGKILL on autossh PID: %i" % self.cur_pid)
try:
os.kill( self.cur_pid, signal.SIGKILL )
except OSError:
logging.error("No such process: %i" % self.cur_pid)
logging.debug("SIGKILL on ssh PID: %i" % tunnel.ssh_pid)
try:
os.kill( tunnel.ssh_pid, signal.SIGKILL )
except OSError:
logging.error("No such process: %i" % tunnel.ssh_pid)
self.cur_line = -1
self.cur_pid = -1
# FIXME update cur_pid or get rid of it everywhere
return True
def do_N(self):
"""Show connections"""
logging.debug("Waited: %s" % self.log_ticks)
self.log_ticks = ""
logging.debug("Key pushed: N")
self.show_connections = not self.show_connections
return True
def do_258(self):
"""Move down"""
logging.debug("Waited: %s" % self.log_ticks)
self.log_ticks = ""
logging.debug("Key pushed: down")
# if not the end of the list
if self.cur_line < len(self.tp.tunnels)-1:
self.cur_line += 1
# get the pid
if type(self.tp.get_tunnel(self.cur_line)) == AutoTunnel:
self.cur_pid = self.tp.get_tunnel(self.cur_line).autossh_pid
else:
self.cur_pid = self.tp.get_tunnel(self.cur_line).ssh_pid
return True
def do_259(self):
"""Move up"""
logging.debug("Waited: %s" % self.log_ticks)
self.log_ticks = ""
logging.debug("Key pushed: up")
if self.cur_line > -1:
self.cur_line -= 1
if self.cur_line > 0:
self.cur_pid = self.tp.get_tunnel(self.cur_line).pid
return True
def __call__(self):
"""Start the interface"""
self.scr.clear() # clear all
self.scr.nodelay(1) # non-bloking getch
# first display
self.display()
# first update counter
self.last_update = time.clock()
self.last_state = None
self.log_ticks = ""
# infinite loop
notquit = True
while(notquit):
# wait some time
# necessary to not overload the system with unnecessary calls
time.sleep( self.ui_delay )
# if its time to update
if time.time() > self.last_update + self.update_delay:
self.tp.update()
# reset the counter
self.last_update = time.time()
state = "%s" % self.tp
if state != self.last_state:
logging.debug("Waited: %s" % self.log_ticks)
self.log_ticks = ""
logging.debug("----- Time of screen update: %s -----" % time.time())
logging.debug("State of tunnels:\n%s" % self.tp)
self.last_state = state
else:
self.log_ticks += "."
kc = self.scr.getch() # keycode
if kc != -1: # if keypress
pass
ch = chr(0)
if 0 < kc < 256: # if ascii key
# ascii character from the keycode
ch = chr(kc)
# Call the do_* handler.
fch = "do_%s" % ch.capitalize()
fkc = "do_%i" % kc
logging.debug("key func: %s / %s" % (fch,fkc))
if fch in dir(self):
notquit = eval("self."+fch+"()")
elif fkc in dir(self):
notquit = eval("self."+fkc+"()")
logging.debug("notquit = %s" % notquit)
# update the display
self.display()
# force a screen refresh
self.scr.refresh()
# end of the loop
def format(self):
reps = [self.tp.tunnels[t].repr_tunnel() for t in self.tp.tunnels]
tuns = [t.split() for t in reps]
tuns.append(self.header)
logging.debug(tuns)
cols = zip(*tuns)
widths = [max(len(s) for s in col) for col in cols]
logging.debug(widths)
fmt = ['{{: <{}}}'.format(w) for w in widths]
logging.debug(fmt)
return fmt
def display(self):
"""Generate the interface screen"""
# Automagically format help line with available do_* handlers.
h = []
for f in dir(self):
if "do_" in f:
key = f.replace("do_","")
if key.isalpha(): # We do not want arrows.
msg = "[%s] %s" % (key,eval("self.%s.__doc__" % f))
h.append(msg)
help_msg = ", ".join(h)
help_msg += "\n"
self.scr.addstr(0,0, help_msg, curses.color_pair(4) )
self.scr.clrtoeol()
# Second line
self.scr.addstr( "Active tunnels: ", curses.color_pair(6) )
self.scr.addstr( str( len(self.tp.tunnels) ), curses.color_pair(1) )
self.scr.addstr( " / Active connections: ", curses.color_pair(6) )
self.scr.addstr( str( sum([len(self.tp.tunnels[t].connections) for t in self.tp.tunnels]) ), curses.color_pair(1) )
self.scr.addstr( '\n', curses.color_pair(1) )
self.scr.clrtoeol()
# if no line is selected
color = 0
if self.cur_line==-1:
# selected color for the header
color = 9
self.cur_pid = -1
# header line
# header_msg = "TYPE\tINPORT\tVIA \tTARGET \tOUTPORT"
# if os.geteuid() == 0:
header_msg = " ".join(self.format()).format(*self.header)
header_msg += " CONNECTIONS"
self.scr.addstr( header_msg, curses.color_pair(color) )
self.scr.clrtoeol()
# for each tunnel processes available in the monitor
for l in range(len(self.tp.tunnels)):
# add a line for the l-th autossh process
self.add_tunnel( l )
# if one want to show connections
if self.show_connections:# and os.getuid() == 0:
self.add_connection( l )
self.scr.clrtobot()
def add_connection(self, line ):
"""Add lines for each connections related to the l-th autossh process"""
colors = self.colors_connection
# for each connections related to te line-th autossh process
for t in sorted(self.tp.get_tunnel(line).connections, key=lambda c:c.status):
# FIXME fail if the screen's height is too small.
self.scr.addstr( '\n\t+ ' )
color = self.colors_connection['status']
# if the connections is established
# TODO avoid hard-coded constants
if t.status != 'ESTABLISHED' and t.status != 'LISTEN':
color = self.colors_connection['status_out']
self.scr.addstr( t.status, curses.color_pair( color ) )
self.scr.addstr( '\t' )
# self.scr.addstr( str( t['ssh_pid'] ), curses.color_pair(colors['ssh_pid'] ) )
# self.scr.addstr( '\t' )
self.scr.addstr( str( t.local_address ) , curses.color_pair(colors['local_address'] ))
self.scr.addstr( ':' )
self.scr.addstr( str( t.in_port ) , curses.color_pair(colors['in_port'] ))
if t.foreign_address and t.out_port:
self.scr.addstr( ' -> ' )
self.scr.addstr( str( t.foreign_address ) , curses.color_pair(colors['foreign_address'] ))
self.scr.addstr( ':' )
self.scr.addstr( str( t.out_port ) , curses.color_pair(colors['out_port'] ))
self.scr.clrtoeol()
def add_tunnel(self, line):
"""Add line corresponding to the line-th autossh process"""
self.scr.addstr( '\n' )
colors = self.colors_tunnel
if self.cur_line == line:
colors = self.colors_highlight
if type(self.tp.get_tunnel(line)) == AutoTunnel:
self.scr.addstr( self.format()[0].format('auto'), curses.color_pair(colors['kind_auto']) )
self.scr.addstr( ' ', curses.color_pair(colors['kind_auto']) )
else:
self.scr.addstr( self.format()[0].format('ssh'), curses.color_pair(colors['kind_raw']) )
self.scr.addstr( ' ', curses.color_pair(colors['kind_raw']) )
# self.add_tunnel_info('ssh_pid', line)
self.add_tunnel_info('ssh_pid', line, 1)
self.add_tunnel_info('in_port', line, 2)
self.add_tunnel_info('via_host', line, 3)
self.add_tunnel_info('target_host', line, 4)
self.add_tunnel_info('out_port', line, 5)
nb = len(self.tp.get_tunnel(line).connections )
if nb > 0:
# for each connection related to this process
for i in self.tp.get_tunnel(line).connections:
# add a vertical bar |
# the color change according to the status of the connection
if i.status == 'ESTABLISHED' or i.status == 'LISTEN':
self.scr.addstr( '|', curses.color_pair(self.colors_connection['status']) )
else:
self.scr.addstr( '|', curses.color_pair(self.colors_connection['status_out']) )
else:
# if os.geteuid() == 0:
# if there is no connection, display a "None"
self.scr.addstr( 'None', curses.color_pair(self.colors_tunnel['tunnels_nb_none']) )
self.scr.clrtoeol()
def add_tunnel_info( self, key, line, col ):
"""Add an information of an autossh process, in the configured color"""
colors = self.colors_tunnel
# if the line is selected
if self.cur_line == line:
# set the color to the highlight one
colors = self.colors_highlight
txt = eval("str(self.tp.get_tunnel(line).%s)" % key)
if key == 'target_host' or key == 'via_host':
txt = eval("str(self.tp.get_tunnel(line).%s)" % key)
self.scr.addstr(self.format()[col].format(txt), curses.color_pair(colors[key]) )
self.scr.addstr( ' ', curses.color_pair(colors[key]) )
if __name__ == "__main__":
import sys
from optparse import OptionParser
import configparser
usage = """%prog [options]
A user interface to monitor existing SSH tunnel that are managed with autossh.
Called without options, ereshkigal displays a list of tunnels on the standard output.
Note: Users other than root will not see tunnels connections.
Version 0.3"""
parser = OptionParser(usage=usage)
parser.add_option("-c", "--curses",
action="store_true", default=False,
help="Start the user interface in text mode.")
parser.add_option("-n", "--connections",
action="store_true", default=False,
help="Display only SSH connections related to a tunnel.")
parser.add_option("-u", "--tunnels",
action="store_true", default=False,
help="Display only the list of tunnels processes.")
LOG_LEVELS = {'error' : logging.ERROR,
'warning' : logging.WARNING,
'debug' : logging.DEBUG}
parser.add_option('-l', '--log-level', choices=list(LOG_LEVELS), default='error', metavar='LEVEL',
help='Log level (%s), default: %s.' % (", ".join(LOG_LEVELS), 'error') )
parser.add_option('-g', '--log-file', default=None, metavar='FILE',
help="Log to this file, default to standard output. \
If you use the curses interface, you may want to set this to actually see logs.")
parser.add_option('-f', '--config-file', default=None, metavar='FILE',
help="Use this configuration file (default: '~/.ereshkigal.conf')")
(asked_for, args) = parser.parse_args()
logmsg = "----- Started Ereshkigal -----"
if asked_for.log_file:
logfile = asked_for.log_file
logging.basicConfig(filename=logfile, level=LOG_LEVELS[asked_for.log_level])
logging.debug(logmsg)
logging.debug("Log in %s" % logfile)
else:
if asked_for.curses:
logging.warning("It's a bad idea to log to stdout while in the curses interface.")
logging.basicConfig(level=LOG_LEVELS[asked_for.log_level])
logging.debug(logmsg)
logging.debug("Log to stdout")
logging.debug("Asked for: %s" % asked_for)
# unfortunately, asked_for class has no __len__ method in python 2.4.3 (bug?)
#if len(asked_for) > 1:
# parser.error("asked_for are mutually exclusive")
config = configparser.ConfigParser()
if asked_for.config_file:
try:
config.read(asked_for.config_file)
except configparser.MissingSectionHeaderError:
logging.error("'%s' contains no known configuration" % asked_for.config_file)
else:
try:
config.read('~/.ereshkigal.conf')
except configparser.MissingSectionHeaderError:
logging.error("'%s' contains no known configuration" % asked_for.config_file)
# Load autossh instances by sections: [expected]
# if config['expected']:
if asked_for.curses:
logging.debug("Entering curses mode")
import curses
import traceback
try:
scr = curses.initscr()
curses.start_color()
# 0:black, 1:red, 2:green, 3:yellow, 4:blue, 5:magenta, 6:cyan, 7:white
curses.init_pair(1, curses.COLOR_RED, curses.COLOR_BLACK)
curses.init_pair(2, curses.COLOR_GREEN, curses.COLOR_BLACK)
curses.init_pair(3, curses.COLOR_YELLOW, curses.COLOR_BLACK)
curses.init_pair(4, curses.COLOR_BLUE, curses.COLOR_BLACK)
curses.init_pair(5, curses.COLOR_MAGENTA, curses.COLOR_BLACK)
curses.init_pair(6, curses.COLOR_CYAN, curses.COLOR_BLACK)
curses.init_pair(7, curses.COLOR_WHITE, curses.COLOR_BLACK)
curses.init_pair(8, curses.COLOR_WHITE, curses.COLOR_GREEN)
curses.init_pair(9, curses.COLOR_WHITE, curses.COLOR_BLUE)
curses.noecho()
curses.cbreak()
scr.keypad(1)
# create the monitor
mc = CursesMonitor( scr )
# call the monitor
mc()
scr.keypad(0)
curses.echo()
curses.nocbreak()
curses.endwin()
except:
# end cleanly
scr.keypad(0)
curses.echo()
curses.nocbreak()
curses.endwin()
# print the traceback
traceback.print_exc()
elif asked_for.connections:
logging.debug("Entering connections mode")
tp = TunnelsParser()
tp.update()
# do not call update() but only get connections
logging.debug("UID: %i." % os.geteuid())
# if os.geteuid() == 0:
for t in tp.tunnels:
for c in tp.tunnels[t].connections:
print(tp.tunnels[t].ssh_pid, c)
# else:
# logging.error("Only root can see SSH tunnels connections.")
elif asked_for.tunnels:
logging.debug("Entering tunnel mode")
tp = TunnelsParser()
tp.update()
# do not call update() bu only get autossh processes
print(tp.header)
for t in tp.tunnels:
print(tp.tunnels[t].repr_tunnel())
else:
logging.debug("Entering default mode")
tp = TunnelsParser()
# call update
tp.update()
# call the default __repr__
print(tp)
#
# In Mesopotamian mythology, Ereshkigal (lit. "great lady under earth")
# was the goddess of Irkalla, the land of the dead or underworld.
#
# Thus, she knows a lot about tunnels...
#
# http://en.wikipedia.org/wiki/Ereshkigal
#
| gpl-3.0 | -8,461,380,079,873,353,000 | 33.924202 | 169 | 0.545025 | false |
yilei0620/3D_Conditional_Gan | GenSample_obj.py | 1 | 4544 | import sys
sys.path.append('..')
import os
import json
from time import time
import numpy as np
from sklearn.externals import joblib
import scipy
from scipy import io
# from matplotlib import pyplot as plt
# from sklearn.externals import joblib
import theano
import theano.tensor as T
from lib import activations
from lib import updates
from lib import inits
from lib.rng import py_rng, np_rng
from lib.ops import batchnorm, conv_cond_concat, conv, dropout
from lib.theano_utils import floatX, sharedX
from lib.data_utils import OneHot, shuffle, iter_data
from lib.metrics import nnc_score, nnd_score
from load import load_shapenet_train, load_shapenet_test
relu = activations.Rectify()
sigmoid = activations.Sigmoid()
lrelu = activations.LeakyRectify()
bce = T.nnet.binary_crossentropy
parameters = {'objectNumber': 2, 'Nz' : 200, 'Channel' :(1,64,128,256,512), 'kernal':(4,4,4,4), 'batchsize': 50, 'Convlayersize':(64,32,16,8,4), 'Genlrt' : 0.001, 'Discrimlrt' : 0.00001 , 'beta' : 0.5, 'l2':2.5e-5, 'Genk' : 2 , 'niter':50, 'niter_decay' : 150}
for p in parameters:
tmp = p + " = parameters[p]"
exec(tmp)
# print conditional,type(batchsize),Channel[-1],kernal
gifn = inits.Normal(scale=0.02)
difn = inits.Normal(scale=0.02)
## filter_shape: (output channels, input channels, filter height, filter width, filter depth)
## load the parameters
# gen_params = [gw1, gw2, gw3, gw4, gw5, gwx]
# discrim_params = [dw1, dw2, dw3, dw4, dw5, dwy]
temp = joblib.load('models%d/50_gen_params.jl'%objectNumber)
gw1 = sharedX(temp[0])
gg1 = sharedX(temp[1])
gb1 = sharedX(temp[2])
gw2 = sharedX(temp[3])
gg2 = sharedX(temp[4])
gb2 = sharedX(temp[5])
gw3 = sharedX(temp[6])
gg3 = sharedX(temp[7])
gb3 = sharedX(temp[8])
gw4 = sharedX(temp[9])
gg4 = sharedX(temp[10])
gb4 = sharedX(temp[11])
gwx = sharedX(temp[12])
gen_params = [gw1, gg1, gb1, gw2, gg2, gb2, gw3, gg3, gb3, gw4 ,gg4, gb4, gwx]
##
def gen(Z, w1, g1, b1, w2, g2, b2, w3, g3, b3, w4, g4, b4, wx):
Gl1 = relu(batchnorm(T.dot(Z, w1), g=g1, b=b1))
Gl1 = Gl1.reshape((Gl1.shape[0],Channel[-1],Convlayersize[-1],Convlayersize[-1],Convlayersize[-1]))
input_shape = (None , None,Convlayersize[-1],Convlayersize[-1],Convlayersize[-1])
filter_shape = (Channel[-1] , Channel[-2], kernal[-1], kernal[-1], kernal[-1])
Gl2 = relu(batchnorm(conv(Gl1,w2,filter_shape = filter_shape, input_shape = input_shape, conv_mode = 'deconv'),g = g2, b = b2))
input_shape = (None , None,Convlayersize[-2],Convlayersize[-2],Convlayersize[-2])
filter_shape = (Channel[-2] , Channel[-3], kernal[-2], kernal[-2], kernal[-2])
Gl3 = relu(batchnorm(conv(Gl2,w3,filter_shape = filter_shape, input_shape = input_shape, conv_mode = 'deconv'),g = g3, b = b3))
input_shape = (None , None,Convlayersize[-3],Convlayersize[-3],Convlayersize[-3])
filter_shape = (Channel[-3] , Channel[-4], kernal[-3], kernal[-3], kernal[-3])
Gl4 = relu(batchnorm(conv(Gl3,w4,filter_shape = filter_shape, input_shape = input_shape, conv_mode = 'deconv'),g = g4, b= b4))
input_shape = (None, None, Convlayersize[-4],Convlayersize[-4],Convlayersize[-4])
filter_shape = (Channel[-4], Channel[-5], kernal[-4], kernal[-4], kernal[-4])
GlX = sigmoid(conv(Gl4,wx,filter_shape = filter_shape, input_shape = input_shape, conv_mode = 'deconv'))
return GlX
X = T.tensor5()
Z = T.matrix()
gX = gen(Z, *gen_params)
print 'COMPILING'
t = time()
# _train_g = theano.function([X, Z, Y], cost, updates=g_updates)
# _train_d = theano.function([X, Z, Y], cost, updates=d_updates)
_gen = theano.function([Z], gX)
print '%.2f seconds to compile theano functions'%(time()-t)
# trX, trY, ntrain = load_shapenet_train()
n = 10
nbatch = 10
rng = np.random.RandomState(int(time()))
# sample_ymb = floatX(np.asarray(np.eye(3)))
z_dist = scipy.io.loadmat('Z_dist_class2.mat')
z_mean = z_dist['mean']
z_mean = np.reshape(z_mean,(Nz,1))
z_std = z_dist['std']
z_std = np.reshape(z_std,(Nz,1))
def gen_z(z_dist,nbatch):
ret = np.zeros((nbatch,Nz))
for j in xrange(Nz):
z_tmp = np_rng.normal(z_mean[j],z_std[j],nbatch)
ret[:,j] = z_tmp
# print ret
return ret
try:
os.mkdir('Gen_models%d'%objectNumber)
except:
pass
for j in xrange(n/nbatch):
sample_zmb = floatX(gen_z(z_dist,nbatch))
samples = np.asarray(_gen(sample_zmb))
for i in xrange(nbatch):
io.savemat('Gen_models%d/Gen_example_%d.mat'%(objectNumber,nbatch*j+i),{'instance':samples[i,:,:,:],'Z':sample_zmb[i,:]})
# niter = 1
# niter_decay = 1
| mit | 4,118,196,402,505,532,400 | 27.942675 | 261 | 0.659991 | false |
mesocentrefc/Janua-SMS | janua/actions/sms_usage.py | 1 | 2426 | # -*- Mode: Python; coding: utf-8; indent-tabs-mode: nil; tab-width: 4 -*-
#
# Copyright (c) 2016 Cédric Clerget - HPC Center of Franche-Comté University
#
# This file is part of Janua-SMS
#
# http://github.com/mesocentrefc/Janua-SMS
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from janua import jdb
from janua.actions.action import Action
from janua.utils.utilities import get_role
from janua.ws.services import urlconfig, jsonify
class SmsUsage(Action):
"""
Get SMS usage based on administrator quota
* Sample request with administrator level:
.. code-block:: javascript
GET /sms-usage HTTP/1.1
Host: janua.mydomain.com
Content-Type: application/json
JanuaAuthToken: abcdef123456789
Sample response:
.. code-block:: javascript
HTTP/1.1 200
{
"smsusage": {
"global": 18,
"quota": "100 M",
"sent": 18
}
}
* Sample request with supervisor level:
.. code-block:: javascript
GET /sms-usage HTTP/1.1
Host: janua.mydomain.com
Content-Type: application/json
Sample response:
.. code-block:: javascript
HTTP/1.1 200
{
"smsusage": {
"quota": "200 D",
"sent": 4
}
}
"""
category = '__INTERNAL__'
@urlconfig('/sms-usage')
def web(self):
admin = jdb.admin.get_by_phone(self.phone_number)
data = {
'success': True,
'params': [],
'num_params': 0
}
reached, numsms = jdb.sms.is_admin_quota_reached(admin)
quota = admin.sms_quota
data = {'sent': int(numsms), 'quota': quota}
if get_role(admin) == 'admin':
data.update({'global': int(jdb.sms.month_usage())})
return jsonify(smsusage=data)
| gpl-2.0 | 3,349,284,974,751,728,000 | 25.347826 | 76 | 0.60066 | false |
mhogg/BMDanalyse | BMDanalyse/MainWindow.py | 1 | 30163 | # -*- coding: utf-8 -*-
# Copyright (C) 2016 Michael Hogg
# This file is part of BMDanalyse - See LICENSE.txt for information on usage and redistribution
import os, matplotlib, matplotlib.pyplot, types
import numpy as np
from pyqtgraph.Qt import QtCore, QtGui
from pyqtgraph import ImageItem
from pyqtgraph.widgets.GraphicsLayoutWidget import GraphicsLayoutWidget
from PIL import Image
from ViewBoxCustom import MultiRoiViewBox, ImageAnalysisViewBox
from MatplotlibWidget import MatplotlibWidget
from SidePanel import SidePanel
from TableWidget import TableWidget
from version import __version__
absDirPath = os.path.dirname(__file__)
class MainWindow(QtGui.QMainWindow):
def __init__(self, parent=None):
QtGui.QMainWindow.__init__(self, parent)
self.loadIcons()
self.setupUserInterface()
self.setupSignals()
self.__version__ = __version__
# Initialise variables
self.imageFiles = {}
self.timeData = None
self.plotWin = None
self.imageWin = None
self.BMDchange = None
self.roiNames = None
def loadIcons(self):
""" Load icons """
self.icons = dict([
('BMDanalyseIcon', QtGui.QIcon(os.path.join(absDirPath,"icons","logo.png"))),
('imageAddIcon', QtGui.QIcon(os.path.join(absDirPath,"icons","file_add.png"))),
('imageRemIcon', QtGui.QIcon(os.path.join(absDirPath,"icons","file_delete2.png"))),
('imageDownIcon', QtGui.QIcon(os.path.join(absDirPath,"icons","arrow-up-2.png"))),
('imageUpIcon', QtGui.QIcon(os.path.join(absDirPath,"icons","arrow-down-2.png"))),
('imagePrevIcon', QtGui.QIcon(os.path.join(absDirPath,"icons","arrow-left.png"))),
('imageNextIcon', QtGui.QIcon(os.path.join(absDirPath,"icons","arrow-right.png"))),
('roiAddIcon', QtGui.QIcon(os.path.join(absDirPath,"icons","green-add3.png"))),
('roiRectIcon', QtGui.QIcon(os.path.join(absDirPath,"icons","rectangularIcon.png"))),
('roiPolyIcon', QtGui.QIcon(os.path.join(absDirPath,"icons","polygonIcon.png"))),
('roiRemIcon', QtGui.QIcon(os.path.join(absDirPath,"icons","red_delete.png"))),
('roiSaveIcon', QtGui.QIcon(os.path.join(absDirPath,"icons","filesave.png"))),
('roiCopyIcon', QtGui.QIcon(os.path.join(absDirPath,"icons","file_copy.png"))),
('roiLoadIcon', QtGui.QIcon(os.path.join(absDirPath,"icons","opened-folder.png")))])
def setupUserInterface(self):
""" Initialise the User Interface """
# Left frame
leftFrame = QtGui.QFrame()
leftFrameLayout = QtGui.QHBoxLayout()
leftFrame.setLayout(leftFrameLayout)
leftFrame.setLineWidth(0)
leftFrame.setFrameStyle(QtGui.QFrame.Panel)
leftFrameLayout.setContentsMargins(0,0,5,0)
# Left frame contents
self.viewMain = GraphicsLayoutWidget() # A GraphicsLayout within a GraphicsView
leftFrameLayout.addWidget(self.viewMain)
self.viewMain.setMinimumSize(200,200)
self.vb = MultiRoiViewBox(lockAspect=True,enableMenu=True)
self.viewMain.addItem(self.vb)
self.vb.disableAutoRange()
# Right frame
self.sidePanel = SidePanel(self)
# UI window (containing left and right frames)
UIwindow = QtGui.QWidget(self)
UIwindowLayout = QtGui.QHBoxLayout()
UIwindowSplitter = QtGui.QSplitter(QtCore.Qt.Horizontal)
UIwindowLayout.addWidget(UIwindowSplitter)
UIwindow.setLayout(UIwindowLayout)
self.setCentralWidget(UIwindow)
UIwindowSplitter.addWidget(leftFrame)
UIwindowSplitter.addWidget(self.sidePanel)
# Application window
self.setWindowTitle('BMDanalyse')
self.setWindowIcon(self.icons['BMDanalyseIcon'])
self.setMinimumSize(600,500)
self.resize(self.minimumSize())
# Window menus
self.createMenus()
self.createActions()
def createMenus(self):
# Menus
menubar = self.menuBar()
self.fileMenu = menubar.addMenu('&File')
self.imageMenu = menubar.addMenu('&Images')
self.roiMenu = menubar.addMenu('&ROIs')
self.submenu = self.roiMenu.addMenu(self.icons['roiAddIcon'],"Add ROI")
self.analyseMenu = menubar.addMenu('&Analysis')
self.aboutMenu = menubar.addMenu('A&bout')
def createActions(self):
# Actions for File menu
self.exitAct = QtGui.QAction("&Quit", self, shortcut="Ctrl+Q",statusTip="Exit the application")
self.exitAct.triggered[()].connect(self.close)
self.fileMenu.addAction(self.exitAct)
# Actions for Images menu
self.loadImageAct = QtGui.QAction(self.icons['imageAddIcon'], "&Load image(s)", self, shortcut="Ctrl+L")
self.removeImageAct = QtGui.QAction(self.icons['imageRemIcon'], "&Remove current image", self, shortcut="Ctrl+X")
imageMenuActions = [self.loadImageAct,self.removeImageAct]
imageMenuActFuncs = [self.loadImages,self.removeImage]
for i in xrange(len(imageMenuActions)):
action = imageMenuActions[i]
function = imageMenuActFuncs[i]
action.triggered[()].connect(function)
self.imageMenu.addAction(self.loadImageAct)
self.imageMenu.addAction(self.removeImageAct)
# Actions for ROI menu
self.addROIRectAct = QtGui.QAction("Rectangular",self.submenu)
self.addROIPolyAct = QtGui.QAction("Polygon",self.submenu)
self.addROIRectAct.triggered[()].connect(self.vb.addROI)
self.addROIPolyAct.triggered[()].connect(self.vb.addPolyRoiRequest)
self.submenu.addAction(self.addROIRectAct)
self.submenu.addAction(self.addROIPolyAct)
self.addROIRectAct.setIcon(self.icons['roiRectIcon'])
self.addROIPolyAct.setIcon(self.icons['roiPolyIcon'])
self.addROIRectAct.setShortcut("Ctrl+Shift+R")
self.addROIPolyAct.setShortcut("Ctrl+Shift+P")
self.loadRoiAct = QtGui.QAction(self.icons['roiLoadIcon'], "L&oad ROI", self, shortcut="Ctrl+O")
self.copyRoiAct = QtGui.QAction(self.icons['roiCopyIcon'], "&Copy ROI", self, shortcut="Ctrl+C")
self.saveRoiAct = QtGui.QAction(self.icons['roiSaveIcon'], "&Save ROI", self, shortcut="Ctrl+S")
self.remRoiAct = QtGui.QAction(self.icons['roiRemIcon'] , "&Remove ROI", self, shortcut="Ctrl+D")
roiMenuActions = [self.loadRoiAct,self.copyRoiAct,self.saveRoiAct,self.remRoiAct]
roiMenuActFuncs = [self.vb.loadROI,self.vb.copyROI,self.vb.saveROI,self.vb.removeROI]
for i in xrange(len(roiMenuActions)):
action = roiMenuActions[i]
function = roiMenuActFuncs[i]
action.triggered[()].connect(function)
self.roiMenu.addAction(action)
# Actions for Analyse menu
self.roiAnalysisAct = QtGui.QAction("&ROI analysis", self.viewMain, shortcut="Ctrl+R",triggered=self.getBMD)
self.imgAnalysisAct = QtGui.QAction("&Image analysis", self.viewMain, shortcut="Ctrl+I",triggered=self.imageAnalysis)
self.analyseMenu.addAction(self.roiAnalysisAct)
self.analyseMenu.addAction(self.imgAnalysisAct)
# Actions for
self.aboutAct = QtGui.QAction("&About", self.viewMain, shortcut='F1', triggered=self.onAbout)
self.aboutMenu.addAction(self.aboutAct)
def setupSignals(self):
""" Setup signals """
self.sidePanel.imageFileList.itemSelectionChanged.connect(self.getImageToDisplay)
self.sidePanel.buttImageAdd.clicked.connect(self.loadImages)
self.sidePanel.buttImageRem.clicked.connect(self.removeImage)
self.sidePanel.buttImageUp.clicked.connect(self.sidePanel.moveImageUp)
self.sidePanel.buttImageDown.clicked.connect(self.sidePanel.moveImageDown)
self.sidePanel.roiMenu.button1.clicked[()].connect(self.vb.addROI)
self.sidePanel.roiMenu.button2.clicked[()].connect(self.vb.addPolyRoiRequest)
self.sidePanel.buttRoiCopy.clicked[()].connect(self.vb.copyROI)
self.sidePanel.buttRoiRem.clicked.connect(self.vb.removeROI)
self.sidePanel.buttRoiLoad.clicked.connect(self.vb.loadROI)
self.sidePanel.buttRoiSave.clicked.connect(self.vb.saveROI)
self.sidePanel.buttRoiAnalysis.clicked.connect(self.getBMD)
self.sidePanel.buttImgAnalysis.clicked.connect(self.imageAnalysis)
def onAbout(self):
""" About BMDanalyse message"""
author ='Michael Hogg'
date ='2016'
version = self.__version__
QtGui.QMessageBox.about(self, 'About BMDanalyse',
"""
<b>BMDanalyse</b>
<p>A simple program for the analysis of a time series of Bone Mineral Density (BMD) images.</p>
<p>Used to evaluate the bone gain / loss in a number of regions of interest (ROIs) over time,
typically due to bone remodelling as a result of stress shielding around an orthopaedic implant.</p>
<p><table border="0" width="150">
<tr>
<td>Author:</td>
<td>%s</td>
</tr>
<tr>
<td>Version:</td>
<td>%s</td>
</tr>
<tr>
<td>Date:</td>
<td>%s</td>
</tr>
</table></p>
""" % (author,version,date))
def loadImages(self):
""" Load an image to be analysed """
newImages = {}
fileNames = QtGui.QFileDialog.getOpenFileNames(self, self.tr("Load images"),QtCore.QDir.currentPath())
# Fix for PySide. PySide doesn't support QStringList types. PyQt4 getOpenFileNames returns a QStringList, whereas PySide
# returns a type (the first entry being the list of filenames).
if isinstance(fileNames,types.TupleType): fileNames = fileNames[0]
if hasattr(QtCore,'QStringList') and isinstance(fileNames, QtCore.QStringList): fileNames = [str(i) for i in fileNames]
if len(fileNames)>0:
for fileName in fileNames:
if fileName!='':
img = Image.open(str(fileName))
imgarr = np.array(img.convert('L')) # Convert to 8-bit
imgarr = imgarr.swapaxes(0,1)
imgarr = imgarr[:,::-1]
newImages[fileName] = imgarr
# Add filenames to list widget. Only add new filenames. If filename exists aready, then
# it will not be added, but data will be updated
for fileName in sorted(newImages.keys()):
if not self.imageFiles.has_key(fileName):
self.sidePanel.addImageToList(fileName)
self.imageFiles[fileName] = newImages[fileName]
# Show image in Main window
self.vb.enableAutoRange()
if self.sidePanel.imageFileList.currentRow()==-1:
self.sidePanel.imageFileList.setCurrentRow(0)
self.showImage(str(self.sidePanel.imageFileList.currentItem().text()))
self.vb.disableAutoRange()
def removeImage(self):
""" Remove image from sidePanel imageFileList """
# Return if there is no image to remove
if self.vb.img is None: return
# Get current image in sidePanel imageFileList and remove from list
currentRow = self.sidePanel.imageFileList.currentRow()
image = self.sidePanel.imageFileList.takeItem(currentRow)
imageName = str(image.text())
# Delete key and value from dictionary
if imageName!='': del self.imageFiles[imageName]
# Get image item in imageFileList to replace deleted image
if self.sidePanel.imageFileList.count()==0:
self.vb.enableAutoRange()
self.vb.removeItem(self.vb.img)
self.vb.showImage(None)
self.vb.disableAutoRange()
else:
currentRow = self.sidePanel.imageFileList.currentRow()
imageName = str(self.sidePanel.imageFileList.item(currentRow).text())
self.showImage(imageName)
def showImage(self,imageFilename):
""" Shows image in main view """
self.arr = self.imageFiles[imageFilename]
self.vb.showImage(self.arr)
def getImageToDisplay(self):
""" Get current item in file list and display in main view"""
try: imageFilename = str(self.sidePanel.imageFileList.currentItem().text())
except: pass
else: self.showImage(imageFilename)
def getBMD(self):
""" Get change in BMD over time (e.g. for each image) for all ROIs.
Revised function that converts the list of images into a 3D array
and then uses the relative position of the ROIs to the current
image, self.vb.img, to get the average BMD value e.g. it doesn't use
setImage to change the image in the view. This requires that all
images are the same size and in the same position.
"""
# Return if there is no image or rois in view
if self.vb.img is None or len(self.vb.rois)==0: return
# Collect all images into a 3D array
imageFilenames = self.sidePanel.getListOfImages()
images = [self.imageFiles[str(name.text())] for name in imageFilenames]
imageData = np.dstack(images) # Doesn't work correctly if images are not all the same shape
numImages = len(images)
# Get BMD across image stack for each ROI
numROIs = len(self.vb.rois)
BMD = np.zeros((numImages,numROIs),dtype=float)
self.roiNames = []
for i in xrange(numROIs):
roi = self.vb.rois[i]
self.roiNames.append(roi.name)
arrRegion = roi.getArrayRegion(imageData,self.vb.img, axes=(0,1))
avgROIvalue = arrRegion.mean(axis=0).mean(axis=0)
BMD[:,i] = avgROIvalue
# Calculate the BMD change (percentage of original)
tol = 1.0e-06
for i in xrange(numROIs):
if abs(BMD[0,i])<tol:
BMD[:,i] = 100.
else:
BMD[:,i] = BMD[:,i] / BMD[0,i] * 100.
self.BMDchange = BMD-100.
if self.timeData is None or self.timeData.size!=numImages:
self.timeData = np.arange(numImages,dtype=float)
# Plot results
self.showResults()
def imageAnalysis(self):
# Generate images of BMD change
if self.vb.img is None: return
self.showImageWin()
def sliderValueChanged(self,value):
self.imageWin.sliderLabel.setText('BMD change: >= %d %s' % (value,'%'))
self.setLookupTable(value)
self.imageWin.vb.img2.setLookupTable(self.lut)
self.imageWin.vb.img2.setLevels([0,255])
def setLookupTable(self,val):
lut = []
for i in range(256):
if i > 127+val:
lut.append(matplotlib.cm.jet(255))
elif i < 127-val:
lut.append(matplotlib.cm.jet(0))
else:
lut.append((0.0,0.0,0.0,0.0))
lut = np.array(lut)*255
self.lut = np.array(lut,dtype=np.ubyte)
def createImageWin(self):
self.buttMinimumSize = QtCore.QSize(70,36)
self.iconSize = QtCore.QSize(24,24)
if self.imageWin==None:
self.imageWin = QtGui.QDialog(self, QtCore.Qt.WindowSystemMenuHint | QtCore.Qt.WindowTitleHint | \
QtCore.Qt.WindowMinimizeButtonHint | QtCore.Qt.WindowMaximizeButtonHint)
self.imageWin.setWindowTitle('BMDanalyse')
self.imageWin.setWindowIcon(self.icons['BMDanalyseIcon'])
self.imageWin.setMinimumSize(250,500)
self.imageWin.resize(self.imageWin.minimumSize())
# Create viewBox
self.imageWin.glw = GraphicsLayoutWidget() # A GraphicsLayout within a GraphicsView
self.imageWin.vb = ImageAnalysisViewBox(lockAspect=True,enableMenu=True)
self.imageWin.vb.disableAutoRange()
self.imageWin.glw.addItem(self.imageWin.vb)
arr = self.imageFiles.values()[0]
self.imageWin.vb.img1 = ImageItem(arr,autoRange=False,autoLevels=False)
self.imageWin.vb.addItem(self.imageWin.vb.img1)
self.imageWin.vb.img2 = ImageItem(None,autoRange=False,autoLevels=False)
self.imageWin.vb.addItem(self.imageWin.vb.img2)
self.imageWin.vb.autoRange()
lut = [ [ int(255*val) for val in matplotlib.cm.gray(i)[:3] ] for i in xrange(256) ]
lut = np.array(lut,dtype=np.ubyte)
self.imageWin.vb.img1.setLookupTable(lut)
# Label to show index of current image label
self.imageCurrCont = QtGui.QFrame()
self.imageCurrCont.setLineWidth(2)
self.imageCurrCont.setFrameStyle(QtGui.QFrame.Panel | QtGui.QFrame.Raised)
self.imageCurrCont.setMinimumWidth(70)
self.imageWin.currLabel = QtGui.QLabel("")
self.imageWin.currLabel.setAlignment(QtCore.Qt.AlignHCenter)
imageCurrContLayout = QtGui.QHBoxLayout()
imageCurrContLayout.addWidget(self.imageWin.currLabel)
self.imageCurrCont.setLayout(imageCurrContLayout)
# Create buttons to select images
self.imageWin.buttCont = QtGui.QWidget()
self.imageWin.buttPrev = QtGui.QPushButton(self.icons['imagePrevIcon'],"")
self.imageWin.buttNext = QtGui.QPushButton(self.icons['imageNextIcon'],"")
self.buttLayout = QtGui.QHBoxLayout()
self.buttLayout.addStretch(1)
self.buttLayout.addWidget(self.imageWin.buttPrev)
self.buttLayout.addWidget(self.imageCurrCont)
self.buttLayout.addWidget(self.imageWin.buttNext)
self.buttLayout.addStretch(1)
self.imageWin.buttCont.setLayout(self.buttLayout)
self.imageWin.buttPrev.setMinimumSize(self.buttMinimumSize)
self.imageWin.buttNext.setMinimumSize(self.buttMinimumSize)
self.imageWin.buttPrev.setIconSize(self.iconSize)
self.imageWin.buttNext.setIconSize(self.iconSize)
self.buttLayout.setContentsMargins(0,5,0,5)
self.imageWin.buttPrev.clicked.connect(self.prevImage)
self.imageWin.buttNext.clicked.connect(self.nextImage)
# Create slider
self.imageWin.sliderCon = QtGui.QWidget()
self.imageWin.slider = QtGui.QSlider(self)
self.imageWin.slider.setOrientation(QtCore.Qt.Horizontal)
self.imageWin.slider.setMinimum(1)
self.imageWin.slider.setMaximum(100)
self.imageWin.slider.setMinimumWidth(100)
self.imageWin.slider.valueChanged.connect(self.sliderValueChanged)
self.imageWin.sliderLabel = QtGui.QLabel('1')
self.imageWin.sliderLabel.setMinimumWidth(120)
self.sliderLayout = QtGui.QHBoxLayout()
self.sliderLayout.addStretch(1)
self.sliderLayout.addWidget(self.imageWin.sliderLabel)
self.sliderLayout.addWidget(self.imageWin.slider)
self.sliderLayout.addStretch(1)
self.imageWin.sliderCon.setLayout(self.sliderLayout)
self.sliderLayout.setContentsMargins(0,0,0,5)
# Format image window
self.imageWinLayout = QtGui.QVBoxLayout()
self.imageWinLayout.addWidget(self.imageWin.glw)
self.imageWinLayout.addWidget(self.imageWin.buttCont)
self.imageWinLayout.addWidget(self.imageWin.sliderCon)
self.imageWin.setLayout(self.imageWinLayout)
self.imageWin.imagesRGB = None
# Show
self.imageWin.show()
self.imageWin.slider.setValue(10)
self.sliderValueChanged(10)
self.imageWinIndex = 0
def prevImage(self):
minIndex = 0
currIndex = self.imageWinIndex
prevIndex = currIndex - 1
self.imageWinIndex = max(prevIndex,minIndex)
self.updateImageWin()
def nextImage(self):
numImages = len(self.imageFiles)
maxIndex = numImages - 1
currIndex = self.imageWinIndex
nextIndex = currIndex + 1
self.imageWinIndex = min(nextIndex,maxIndex)
self.updateImageWin()
def updateImageWin(self):
imageFilenames = self.sidePanel.getListOfImages()
imageName = imageFilenames[self.imageWinIndex]
self.imageWin.vb.img1.setImage(self.imageFiles[str(imageName.text())],autoLevels=False)
self.imageWin.vb.img2.setImage(self.imageWin.imagesRGB[self.imageWinIndex],autoLevels=False)
self.imageWin.currLabel.setText("%i / %i" % (self.imageWinIndex+1,len(imageFilenames)))
def showImageWin(self):
self.createImageWin()
self.imagesBMDpercentChange()
self.updateImageWin()
def imagesBMDpercentChange(self):
# Get image arrays and convert to an array of floats
imageFilenames = self.sidePanel.getListOfImages()
images = [ self.imageFiles[str(name.text())] for name in imageFilenames ]
imagesConv = []
for img in images:
image = img.copy()
image[np.where(image==0)] = 1
image = image.astype(np.float)
imagesConv.append(image)
# Calculate percentage change and set with limits -100% to +100%
imagesPercCh = []
imageInitial = imagesConv[0]
for image in imagesConv:
imagePercCh = (image-imageInitial)/imageInitial*100.
imagePercCh[np.where(imagePercCh> 100.)] = 100.
imagePercCh[np.where(imagePercCh<-100.)] = -100.
imagesPercCh.append(imagePercCh)
numImages = len(imagesPercCh)
self.imageWin.imagesRGB = []
for i in xrange(numImages):
image = imagesPercCh[i]
sx,sy = image.shape
imageRGB = image*(255/200.)+(255/2.)
self.imageWin.imagesRGB.append(imageRGB)
def BMDtoCSVfile(self):
""" Write BMD change to csv file """
fileName = QtGui.QFileDialog.getSaveFileName(None,self.tr("Export to CSV"),QtCore.QDir.currentPath(),self.tr("CSV (*.csv)"))
# Fix for PyQt/PySide compatibility. PyQt returns a QString, whereas PySide returns a tuple (first entry is filename as string)
if isinstance(fileName,types.TupleType): fileName = fileName[0]
if hasattr(QtCore,'QString') and isinstance(fileName, QtCore.QString): fileName = str(fileName)
if not fileName=='':
textFile = open(fileName,'w')
numFrames, numROIs = self.BMDchange.shape
roiNames = self.roiNames
header = "%10s," % 'Time'
header += ((numROIs-1)*'%10s,'+'%10s\n') % tuple(roiNames)
textFile.write(header)
for i in xrange(numFrames):
textFile.write('%10.1f,' % self.timeData[i])
for j in xrange(numROIs):
if j<numROIs-1: fmt = '%10.3f,'
else: fmt = '%10.3f\n'
textFile.write(fmt % self.BMDchange[i,j])
textFile.close()
def showResults(self,):
""" Plots BMD change using matplotlib """
# Create plot window
if self.plotWin==None:
self.plotWin = QtGui.QDialog(self, QtCore.Qt.WindowSystemMenuHint | QtCore.Qt.WindowTitleHint | \
QtCore.Qt.WindowMinimizeButtonHint | QtCore.Qt.WindowMaximizeButtonHint)
self.plotWin.setWindowTitle('BMDanalyse')
self.plotWin.setWindowIcon(self.icons['BMDanalyseIcon'])
self.plotWin.setMinimumSize(600,500)
self.plotWin.resize(self.minimumSize())
# Create Matplotlib widget
self.mplw = MatplotlibWidget(size=(5,6))
self.fig = self.mplw.getFigure()
self.editDataButton = QtGui.QPushButton('Edit plot')
self.exportCSVButton = QtGui.QPushButton('Export data')
self.mplw.toolbar.addWidget(self.editDataButton)
self.mplw.toolbar.addWidget(self.exportCSVButton)
self.editDataButton.clicked.connect(self.showEditBox)
self.exportCSVButton.clicked.connect(self.BMDtoCSVfile)
# Format plot window
self.plotWinLayout = QtGui.QVBoxLayout()
self.plotWinLayout.addWidget(self.mplw)
self.plotWin.setLayout(self.plotWinLayout)
self.createFigure()
self.plotWin.show()
self.mplw.draw()
def createFigure(self):
""" Creates plot of results """
self.ax1 = self.fig.add_subplot(111)
self.ax1.clear()
self.fig.subplots_adjust(bottom=0.15,top=0.85,left=0.15,right=0.925)
numFrames, numROIs = self.BMDchange.shape
t = self.timeData
# Plot data
for i in xrange(numROIs):
roiname = self.roiNames[i]
self.ax1.plot(t,self.BMDchange[:,i],'-o',label=roiname,linewidth=2.0)
kwargs = dict(y=1.05) # Or kwargs = {'y':1.05}
self.ax1.set_title('Change in Bone Mineral Density over time',fontsize=14,fontweight='roman',**kwargs)
self.ax1.set_xlabel('Time',fontsize=10)
self.ax1.set_ylabel('Change in BMD (%)',fontsize=10)
self.ax1.legend(loc=0)
matplotlib.pyplot.setp(self.ax1.get_xmajorticklabels(), fontsize=10)
matplotlib.pyplot.setp(self.ax1.get_ymajorticklabels(), fontsize=10)
matplotlib.pyplot.setp(self.ax1.get_legend().get_texts(),fontsize=10)
self.ax1.grid()
def fillEditBox(self):
rows,cols = self.BMDchange.shape
for i in xrange(rows):
itmValue = '%.2f' % self.timeData[i]
itm = QtGui.QTableWidgetItem(itmValue)
self.tableResults.setItem(i,0,itm)
for j in xrange(cols):
itmValue = '%.2f' % self.BMDchange[i,j]
itm = QtGui.QTableWidgetItem(itmValue)
self.tableResults.setItem(i,j+1,itm)
def showEditBox(self):
self.plotWin.editBox = QtGui.QDialog(self.plotWin, QtCore.Qt.WindowSystemMenuHint | QtCore.Qt.WindowTitleHint)
self.plotWin.editBox.setWindowIcon(self.icons['BMDanalyseIcon'])
self.plotWin.editBox.setWindowTitle('BMDanalyse')
self.plotWin.editBox.setModal(True)
# Add table
layout = QtGui.QVBoxLayout()
layout.setContentsMargins(10,10,10,10)
layout.setSpacing(20)
rows,cols = self.BMDchange.shape
self.tableResults = TableWidget(rows,cols+1,self.plotWin.editBox)
self.tableResults.verticalHeader().setVisible(True)
# Set headers
self.tableResults.setHorizontalHeaderItem(0,QtGui.QTableWidgetItem('Time'))
for i in xrange(cols):
header = QtGui.QTableWidgetItem(self.roiNames[i])
self.tableResults.setHorizontalHeaderItem(i+1,header)
# Add values to table
self.fillEditBox()
# Set layout
layout.addWidget(self.tableResults)
self.buttonsFrame = QtGui.QFrame()
self.buttonsLayout = QtGui.QHBoxLayout()
self.buttonReset = QtGui.QPushButton('Reset')
self.buttonSave = QtGui.QPushButton('Save')
self.buttonClose = QtGui.QPushButton('Cancel')
self.buttonReset.setFixedWidth(50)
self.buttonSave.setFixedWidth(50)
self.buttonClose.setFixedWidth(50)
self.buttonClose.clicked.connect(self.plotWin.editBox.close)
self.buttonSave.clicked.connect(self.updateTableValues)
self.buttonReset.clicked.connect(self.fillEditBox)
self.buttonsLayout.addStretch(1)
self.buttonsLayout.addWidget(self.buttonReset)
self.buttonsLayout.addWidget(self.buttonSave)
self.buttonsLayout.addWidget(self.buttonClose)
self.buttonsLayout.setContentsMargins(0,0,0,0)
self.buttonsFrame.setLayout(self.buttonsLayout)
layout.addWidget(self.buttonsFrame)
self.plotWin.editBox.setLayout(layout)
self.plotWin.editBox.setMaximumSize(layout.sizeHint())
self.plotWin.editBox.show()
def updateTableValues(self):
# Create temporary arrays
timeData = self.timeData.copy()
BMDchange = self.BMDchange.copy()
# Put the values from the tables into the temporary arrays
rows = self.tableResults.rowCount()
cols = self.tableResults.columnCount()
for r in xrange(rows):
for c in xrange(cols):
item = self.tableResults.item(r,c)
itemValue = float(item.text())
if c==0:
timeData[r] = itemValue
else:
BMDchange[r,c-1] = itemValue
# Check that time values are in increasing order. If so, then update arrays
if any(np.diff(timeData)<=0):
self.errorMessage = QtGui.QMessageBox()
self.errorMessage.setWindowIcon(self.icons['BMDanalyseIcon'])
self.errorMessage.setWindowTitle('BMDanalyse')
self.errorMessage.setText('Input error: Time values should be in order of increasing value')
self.errorMessage.setIcon(QtGui.QMessageBox.Warning)
self.errorMessage.open()
else:
self.timeData = timeData
self.BMDchange = BMDchange
self.createFigure()
self.mplw.draw()
self.plotWin.editBox.close() | mit | 8,683,989,055,049,540,000 | 48.940397 | 143 | 0.621192 | false |
praekelt/txtalert | txtalert/apps/bookings/views.py | 1 | 7598 | from django.http import HttpResponse, Http404, HttpResponseRedirect
from django.template import RequestContext
from django.shortcuts import render_to_response, get_object_or_404
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.core.urlresolvers import reverse
from django.core.paginator import Paginator
import logging
from django.utils import timezone
from txtalert.core.models import Visit, PleaseCallMe, MSISDN, AuthProfile, Patient
from txtalert.core.forms import RequestCallForm
from txtalert.core.utils import normalize_msisdn
from datetime import date, datetime
from functools import wraps
def effective_page_range_for(page,paginator,delta=3):
return [p for p in range(page.number-delta,page.number+delta+1)
if (p > 0 and p <= paginator.num_pages)]
def auth_profile_required(func):
@wraps(func)
def wrapper(request, *args, **kwargs):
try:
return func(request, *args, **kwargs)
except AuthProfile.DoesNotExist:
return render_to_response('auth_profile_error.html', {
}, context_instance = RequestContext(request))
return wrapper
@login_required
@auth_profile_required
def index(request):
profile = request.user.get_profile()
return render_to_response("index.html", {
'profile': profile,
'patient': profile.patient,
}, context_instance = RequestContext(request))
@login_required
def appointment_change(request, visit_id):
profile = request.user.get_profile()
visit = get_object_or_404(Visit, pk=visit_id)
change_requested = request.POST.get('when')
if change_requested == 'later':
visit.reschedule_later()
messages.add_message(request, messages.INFO,
"Your request to change the appointment has been sent to " \
"the clinic. You will be notified as soon as possible.")
elif change_requested == 'earlier':
visit.reschedule_earlier()
messages.add_message(request, messages.INFO,
"Your request to change the appointment has been sent to " \
"the clinic. You will be notified as soon as possible.")
return render_to_response("appointment/change.html", {
'profile': profile,
'patient': profile.patient,
'visit': visit,
'change_requested': change_requested,
}, context_instance = RequestContext(request))
@login_required
def appointment_upcoming(request):
profile = request.user.get_profile()
patient = profile.patient
paginator = Paginator(patient.visit_set.upcoming(), 5)
page = paginator.page(request.GET.get('p', 1))
return render_to_response("appointment/upcoming.html", {
'profile': profile,
'patient': patient,
'paginator': paginator,
'page': page,
'effective_page_range': effective_page_range_for(page, paginator)
}, context_instance = RequestContext(request))
@login_required
def appointment_history(request):
profile = request.user.get_profile()
patient = profile.patient
paginator = Paginator(patient.visit_set.past().order_by('-date'), 5)
page = paginator.page(request.GET.get('p', 1))
return render_to_response("appointment/history.html", {
'profile': profile,
'patient': profile.patient,
'paginator': paginator,
'page': page,
'effective_page_range': effective_page_range_for(page, paginator)
}, context_instance=RequestContext(request))
@login_required
def attendance_barometer(request):
profile = request.user.get_profile()
patient = profile.patient
visits = patient.visit_set.all()
attended = visits.filter(status='a').count()
missed = visits.filter(status='m').count()
total = visits.filter(date__lt=date.today()).count()
if total:
attendance = int(float(attended) / float(total) * 100)
else:
attendance = 0.0
return render_to_response("attendance_barometer.html", {
'profile': profile,
'patient': patient,
'attendance': attendance,
'attended': attended,
'missed': missed,
'total': total
}, context_instance=RequestContext(request))
def request_call(request):
if request.POST:
form = RequestCallForm(request.POST)
if form.is_valid():
clinic = form.cleaned_data['clinic']
# normalize
msisdn = normalize_msisdn(form.cleaned_data['msisdn'])
# orm object
msisdn_record, _ = MSISDN.objects.get_or_create(msisdn=msisdn)
pcm = PleaseCallMe(user=clinic.user, clinic=clinic,
msisdn=msisdn_record, timestamp=timezone.now(),
message='Please call me!', notes='Call request issued via txtAlert Bookings')
pcm.save()
messages.add_message(request, messages.INFO,
'Your call request has been registered. '\
'The clinic will call you back as soon as possible.')
return HttpResponseRedirect(reverse('bookings:request_call'))
else:
form = RequestCallForm(initial={
'msisdn': '' if request.user.is_anonymous() else request.user.username
})
if request.user.is_anonymous():
profile = patient = None
else:
profile = request.user.get_profile()
patient = profile.patient
return render_to_response('request_call.html', {
'profile': profile,
'patient': patient,
'form': form,
}, context_instance=RequestContext(request))
def widget_landing(request):
if 'patient_id' in request.GET \
and 'msisdn' in request.GET:
try:
msisdn = normalize_msisdn(request.GET.get('msisdn'))
patient_id = request.GET.get('patient_id')
patient = Patient.objects.get(active_msisdn__msisdn=msisdn,
te_id=patient_id)
try:
visit = patient.next_visit()
except Visit.DoesNotExist:
visit = None
visits = patient.visit_set.all()
context = {
'msisdn': msisdn,
'patient_id': patient_id,
'patient': patient,
'name': patient.name,
'surname': patient.surname,
'next_appointment': visit.date if visit else '',
'visit_id': visit.pk if visit else '',
'clinic': visit.clinic.name if visit else '',
'attendance': int((1.0 - patient.risk_profile) * 100),
'total': visits.count(),
'attended': visits.filter(status='a').count(),
'rescheduled': visits.filter(status='r').count(),
'missed': visits.filter(status='m').count(),
}
except Patient.DoesNotExist:
context = {
'patient_id': patient_id,
'msisdn': msisdn,
}
else:
context = {
'patient_id': request.GET.get('patient_id', ''),
'msisdn': request.GET.get('msisdn', ''),
}
print context
return render_to_response('widget_landing.html', context,
context_instance=RequestContext(request))
def todo(request):
"""Anything that resolves to here still needs to be completed"""
return HttpResponse("This still needs to be implemented.")
def not_found(request):
"""test 404 template rendering"""
raise Http404
def server_error(request):
"""test 500 template rendering"""
raise Exception, '500 testing' | gpl-3.0 | -763,102,905,537,551,700 | 36.995 | 93 | 0.622664 | false |
Iconoclasteinc/tgit | test/ui/file_dialogs/test_file_dialog.py | 1 | 1231 | # -*- coding: utf-8 -*-
import pytest
from PyQt5.QtWidgets import QFileDialog
from hamcrest import ends_with, assert_that, equal_to
from cute.widgets import QFileDialogDriver, window
from test.ui import show_, close_
from tgit.ui import locations
from tgit.ui.dialogs.file_dialogs import make_file_dialog, name_filter
pytestmark = pytest.mark.ui
@pytest.yield_fixture()
def driver(prober, automaton):
dialog_driver = QFileDialogDriver(window(QFileDialog), prober, automaton)
yield dialog_driver
close_(dialog_driver)
def show_dialog(name_filters="", file_mode=QFileDialog.ExistingFile, directory="", parent=None):
dialog = make_file_dialog(name_filters, file_mode, directory, parent, False)
show_(dialog)
return dialog
def test_shows_name_filters(driver):
_ = show_dialog("PNG Images (*.png)")
driver.filter_files_of_type("PNG Images (*.png)")
def test_initially_starts_in_directory(driver):
_ = show_dialog(directory=locations.Documents)
driver.has_current_directory(ends_with("Documents"))
def test_builds_name_filters():
assert_that(name_filter(["type1", "type2"], "caption"), equal_to("caption (*.type1 *.type2)"), "The name filters") | gpl-3.0 | -7,738,104,564,986,402,000 | 30.447368 | 118 | 0.70593 | false |
uniomni/system_administration | security/utilities.py | 1 | 3473 | """Utilities
"""
import os
from math import sqrt, pi, sin, cos, acos
from subprocess import Popen, PIPE
import sys
def run(cmd,
stdout=None,
stderr=None,
verbose=True):
s = cmd
if stdout:
s += ' > %s' % stdout
if stderr:
s += ' 2> %s' % stderr
if verbose:
print s
err = os.system(s)
if err != 0:
msg = 'Command "%s" failed with errorcode %i. ' % (cmd, err)
if stderr: msg += 'See logfile %s for details' % stderr
raise Exception(msg)
def header(s):
dashes = '-'*len(s)
print
print dashes
print s
print dashes
def write_line(fid, text, indent=0):
fid.write(' '*indent + text + '\n')
def makedir(newdir):
"""works the way a good mkdir should :)
- already exists, silently complete
- regular file in the way, raise an exception
- parent directory(ies) does not exist, make them as well
Based on
http://code.activestate.com/recipes/82465/
Note os.makedirs does not silently pass if directory exists.
"""
if os.path.isdir(newdir):
pass
elif os.path.isfile(newdir):
msg = 'a file with the same name as the desired ' \
'dir, "%s", already exists.' % newdir
raise OSError(msg)
else:
head, tail = os.path.split(newdir)
if head and not os.path.isdir(head):
makedir(head)
#print "_mkdir %s" % repr(newdir)
if tail:
os.mkdir(newdir)
def get_username():
"""Get username
"""
p = Popen('whoami', shell=True,
stdin=PIPE, stdout=PIPE, stderr=PIPE, close_fds=True)
if p.stdout is not None:
username = p.stdout.read().strip()
else:
username = 'unknown'
#print 'Got username', username
return username
def get_timestamp():
"""Get timestamp in the ISO 8601 format
http://www.iso.org/iso/date_and_time_format
Format YYYY-MM-DDThh:mm:ss
where the capital letter T is used to separate the date and time
components.
Example: 2009-04-01T13:01:02 represents one minute and two seconds
after one o'clock in the afternoon on the first of April 2009.
"""
from time import strftime
#return strftime('%Y-%m-%dT%H:%M:%S') # ISO 8601
return strftime('%Y-%m-%dT%H%M%S') # Something Windows can read
def get_shell():
"""Get shell if UNIX platform
Otherwise return None
"""
p = Popen('echo $SHELL', shell=True,
stdin=PIPE, stdout=PIPE, stderr=PIPE, close_fds=True)
shell = None
if p.stdout is not None:
shell = p.stdout.read().strip()
shell = os.path.split(shell)[-1] # Only last part of path
return shell
def replace_string_in_file(filename, s1, s2, verbose=False):
"""Replace string s1 with string s2 in filename
"""
# Read data from filename
infile = open(filename)
lines = infile.readlines()
infile.close()
# Replace and store updated versions
outfile = open(filename, 'w')
for s in lines:
new_string = s.replace(s1, s2).rstrip()
if new_string.strip() != s.strip() and verbose:
print 'Replaced %s with %s' % (s, new_string)
outfile.write(new_string + '\n')
outfile.close()
| gpl-3.0 | 6,292,243,001,345,281,000 | 23.985612 | 71 | 0.56061 | false |
orionzhou/robin | formats/coords.py | 1 | 16290 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
parses JCVI software NUCMER (http://mummer.sourceforge.net/manual/)
output - mostly as *.coords file.
"""
import sys
import logging
from math import exp
from itertools import groupby
from maize.formats.base import LineFile, must_open
from maize.algorithms.graph import BiGraph
from maize.apps.base import sh, need_update, get_abs_path
Overlap_types = ("none", "a ~ b", "b ~ a", "a in b", "b in a")
class CoordsLine (object):
"""
The coords line looks like (in one line):
2953 4450 | 525 2023 | 1498 1499 | 98.07 |
8046 2023 | 18.62 74.10 | AC182814.30 contig_100476
the coords file needs to be generated by `show-coords -rcl`
"""
def __init__(self, row):
row = row.replace(" | ", "")
atoms = row.split()
assert len(atoms) in (13, 17), "expecting 13 or 17 columns"
self.start1 = int(atoms[0])
self.end1 = int(atoms[1])
self.start2 = int(atoms[2])
self.end2 = int(atoms[3])
if self.start2 > self.end2:
self.start2, self.end2 = self.end2, self.start2
self.orientation = '-'
else:
self.orientation = '+'
self.len1 = int(atoms[4])
self.len2 = int(atoms[5])
self.identity = float(atoms[6])
self.reflen = int(atoms[7])
self.querylen = int(atoms[8])
self.refcov = float(atoms[9]) / 100.
self.querycov = float(atoms[10]) / 100.
self.ref = atoms[11]
self.query = atoms[12]
# this is taken from CoGeBlast:
# the coverage of the hit muliplied by percent seq identity
# range from 0-100
self.quality = self.identity * self.querycov
self.score = int(self.identity * self.len1 / 100)
def __str__(self):
slots = "ref start1 end1 reflen " +\
"query start2 end2 querylen orientation"
return "\t".join(str(x) for x in \
[getattr(self, attr) for attr in slots.split()])
def bedline(self, pctid=False):
score = self.identity if pctid else self.score
return '\t'.join(str(x) for x in (self.ref, self.start1 - 1, self.end1,
self.query, score, self.orientation))
def qbedline(self, pctid=False):
score = self.identity if pctid else self.score
return '\t'.join(str(x) for x in (self.query, self.start2 - 1,
self.end2, self.ref, score, self.orientation))
@property
def blastline(self):
hitlen = max(self.len1, self.len2)
score = self.score
mismatch = int(self.len1 * (1 - self.identity / 100))
log_prob = -score * 0.693147181
evalue = 3.0e9 * exp(log_prob)
evalue = "{0:.1g}".format(evalue)
return "\t".join(str(x) for x in (self.query, self.ref,
self.identity, hitlen, mismatch, 0, self.start2, self.end2,
self.start1, self.end1, evalue, score
))
def overlap(self, max_hang=100):
"""
Determine the type of overlap given query, ref alignment coordinates
Consider the following alignment between sequence a and b:
aLhang \ / aRhang
\------------/
/------------\
bLhang / \ bRhang
Terminal overlap: a before b, b before a
Contain overlap: a in b, b in a
"""
aL, aR = 1, self.reflen
bL, bR = 1, self.querylen
aLhang, aRhang = self.start1 - aL, aR - self.end1
bLhang, bRhang = self.start2 - bL, bR - self.end2
if self.orientation == '-':
bLhang, bRhang = bRhang, bLhang
s1 = aLhang + bRhang
s2 = aRhang + bLhang
s3 = aLhang + aRhang
s4 = bLhang + bRhang
# Dovetail (terminal) overlap
if s1 < max_hang:
type = 2 # b ~ a
elif s2 < max_hang:
type = 1 # a ~ b
# Containment overlap
elif s3 < max_hang:
type = 3 # a in b
elif s4 < max_hang:
type = 4 # b in a
else:
type = 0
return type
class Coords (LineFile):
"""
when parsing the .coords file, first skip first 5 lines
[S1] [E1] | [S2] [E2] | [LEN 1] [LEN 2] | [% IDY] | [TAGS]
then each row would be composed as this
"""
def __init__(self, filename, sorted=False, header=False):
if filename.endswith(".delta"):
coordsfile = filename.rsplit(".", 1)[0] + ".coords"
if need_update(filename, coordsfile):
fromdelta([filename])
filename = coordsfile
super(Coords, self).__init__(filename)
fp = open(filename)
if header:
self.cmd = fp.next()
for row in fp:
try:
self.append(CoordsLine(row))
except AssertionError:
pass
if sorted:
self.ref_sort()
def ref_sort(self):
# sort by reference positions
self.sort(key=lambda x: (x.ref, x.start1))
def quality_sort(self):
# sort descending with score = identity * coverage
self.sort(key=lambda x: (x.query, -x.quality))
@property
def hits(self):
"""
returns a dict with query => blastline
"""
self.quality_sort()
hits = dict((query, list(blines)) for (query, blines) in \
groupby(self, lambda x: x.query))
self.ref_sort()
return hits
@property
def best_hits(self):
"""
returns a dict with query => best mapped position
"""
self.quality_sort()
best_hits = dict((query, blines.next()) for (query, blines) in \
groupby(self, lambda x: x.query))
self.ref_sort()
return best_hits
def get_stats(coordsfile):
from maize.utils.range import range_union
logging.debug("Report stats on `%s`" % coordsfile)
coords = Coords(coordsfile)
ref_ivs = []
qry_ivs = []
identicals = 0
alignlen = 0
for c in coords:
qstart, qstop = c.start2, c.end2
if qstart > qstop:
qstart, qstop = qstop, qstart
qry_ivs.append((c.query, qstart, qstop))
sstart, sstop = c.start1, c.end1
if sstart > sstop:
sstart, sstop = sstop, sstart
ref_ivs.append((c.ref, sstart, sstop))
alen = sstop - sstart
alignlen += alen
identicals += c.identity / 100. * alen
qrycovered = range_union(qry_ivs)
refcovered = range_union(ref_ivs)
id_pct = identicals * 100. / alignlen
return qrycovered, refcovered, id_pct
def merge(args):
"""
%prog merge ref.fasta query.fasta *.delta
Merge delta files into a single delta.
"""
p = OptionParser(merge.__doc__)
p.set_outfile(outfile="merged_results.delta")
opts, args = p.parse_args(args)
if len(args) < 3:
sys.exit(not p.print_help())
ref, query = args[:2]
deltafiles = args[2:]
outfile = args.outfile
ref = get_abs_path(ref)
query = get_abs_path(query)
fw = must_open(outfile, "w")
print >> fw, " ".join((ref, query))
print >> fw, "NUCMER"
fw.close()
for d in deltafiles:
cmd = "awk 'NR > 2 {{print $0}}' {0}".format(d)
sh(cmd, outfile=outfile, append=True)
def blast(args):
"""
%prog blast <deltafile|coordsfile>
Covert delta or coordsfile to BLAST tabular output.
"""
p = OptionParser(blast.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
deltafile, = args
blastfile = deltafile.rsplit(".", 1)[0] + ".blast"
if need_update(deltafile, blastfile):
coords = Coords(deltafile)
fw = open(blastfile, "w")
for c in coords:
print >> fw, c.blastline
def fromdelta(args):
"""
%prog fromdelta deltafile
Convert deltafile to coordsfile.
"""
p = OptionParser(fromdelta.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
deltafile, = args
coordsfile = deltafile.rsplit(".", 1)[0] + ".coords"
cmd = "show-coords -rclH {0}".format(deltafile)
sh(cmd, outfile=coordsfile)
return coordsfile
def sort(args):
"""
%prog sort coordsfile
Sort coordsfile based on query or ref.
"""
import maize.formats.blast
return maize.formats.blast.sort(args + ["--coords"])
def coverage(args):
"""
%prog coverage coordsfile
Report the coverage per query record, useful to see which query matches
reference. The coords file MUST be filtered with supermap::
maize.algorithms.supermap --filter query
"""
p = OptionParser(coverage.__doc__)
sp1.add_argument("-c", dest="cutoff", default=0.5, type="float",
help="only report query with coverage greater than [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
coordsfile, = args
fp = open(coordsfile)
coords = []
for row in fp:
try:
c = CoordsLine(row)
except AssertionError:
continue
coords.append(c)
coords.sort(key=lambda x: x.query)
coverages = []
for query, lines in groupby(coords, key=lambda x: x.query):
cumulative_cutoff = sum(x.querycov for x in lines)
coverages.append((query, cumulative_cutoff))
coverages.sort(key=lambda x: (-x[1], x[0]))
for query, cumulative_cutoff in coverages:
if cumulative_cutoff < args.cutoff:
break
print("{0}\t{1:.2f}".format(query, cumulative_cutoff))
def annotate(args):
"""
%prog annotate coordsfile
Annotate coordsfile to append an additional column, with the following
overlaps: {0}.
"""
p = OptionParser(annotate.__doc__.format(", ".join(Overlap_types)))
sp1.add_argument("--maxhang", default=100, type="int",
help="Max hang to call dovetail overlap [default: %default]")
sp1.add_argument("--all", default=False, action="store_true",
help="Output all lines [default: terminal/containment]")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
coordsfile, = args
fp = open(coordsfile)
for row in fp:
try:
c = CoordsLine(row)
except AssertionError:
continue
ov = c.overlap(args.maxhang)
if not args.all and ov == 0:
continue
print("{0}\t{1}".format(row.strip(), Overlap_types[ov]))
def print_stats(qrycovered, refcovered, id_pct):
from maize.utils.cbook import thousands
try:
refcovered = thousands(refcovered)
qrycovered = thousands(qrycovered)
except:
pass
m1 = "Reference coverage: {0} bp".format(refcovered)
m2 = "Query coverage: {0} bp".format(qrycovered)
m3 = "Identity: {0:.2f}%".format(id_pct)
print >> sys.stderr, "\n".join((m1, m2, m3))
def summary(args):
"""
%prog summary coordsfile
provide summary on id% and cov%, for both query and reference
"""
p = OptionParser(summary.__doc__)
sp1.add_argument("-s", dest="single", default=False, action="store_true",
help="provide stats per reference seq")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(p.print_help())
coordsfile, = args
qrycovered, refcovered, id_pct = get_stats(coordsfile)
print_stats(qrycovered, refcovered, id_pct)
def filter(args):
"""
%prog filter <deltafile|coordsfile>
Produce a new delta/coords file and filter based on id% or cov%.
Use `delta-filter` for .delta file.
"""
p = OptionParser(filter.__doc__)
p.set_align(pctid=0, hitlen=0)
sp1.add_argument("--overlap", default=False, action="store_true",
help="Print overlap status (e.g. terminal, contained)")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
pctid = args.pctid
hitlen = args.hitlen
filename, = args
if pctid == 0 and hitlen == 0:
return filename
pf, suffix = filename.rsplit(".", 1)
outfile = "".join((pf, ".P{0}L{1}.".format(int(pctid), int(hitlen)), suffix))
if not need_update(filename, outfile):
return outfile
if suffix == "delta":
cmd = "delta-filter -i {0} -l {1} {2}".format(pctid, hitlen, filename)
sh(cmd, outfile=outfile)
return outfile
fp = open(filename)
fw = must_open(outfile, "w")
for row in fp:
try:
c = CoordsLine(row)
except AssertionError:
continue
if c.identity < pctid:
continue
if c.len2 < hitlen:
continue
if args.overlap and not c.overlap:
continue
outrow = row.rstrip()
if args.overlap:
ov = Overlap_types[c.overlap]
outrow += "\t" + ov
print >> fw, outrow
return outfile
def bed(args):
"""
%prog bed coordsfile
will produce a bed list of mapped position and orientation (needs to
be beyond quality cutoff, say 50) in bed format
"""
p = OptionParser(bed.__doc__)
sp1.add_argument("--query", default=False, action="store_true",
help="print out query intervals rather than ref [default: %default]")
sp1.add_argument("--pctid", default=False, action="store_true",
help="use pctid in score [default: %default]")
sp1.add_argument("--cutoff", dest="cutoff", default=0, type="float",
help="get all the alignments with quality above threshold " +\
"[default: %default]")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(p.print_help())
coordsfile, = args
query = args.query
pctid = args.pctid
quality_cutoff = args.cutoff
coords = Coords(coordsfile)
for c in coords:
if c.quality < quality_cutoff:
continue
line = c.qbedline(pctid=pctid) if query else c.bedline(pctid=pctid)
print(line)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(
formatter_class = argparse.ArgumentDefaultsHelpFormatter,
description = 'coords utilities'
)
sp = parser.add_subparsers(title = 'available commands', dest = 'command')
sp1 = sp.add_parser('annotate', help='annotate overlap types in coordsfile',
formatter_class = argparse.ArgumentDefaultsHelpFormatter)
sp1.add_argument('i', help = '')
sp1.set_defaults(func = annotate)
sp1 = sp.add_parser('blast', help='convert to blast tabular output',
formatter_class = argparse.ArgumentDefaultsHelpFormatter)
sp1.add_argument('i', help = '')
sp1.set_defaults(func = blast)
sp1 = sp.add_parser('filter', help='filter based on id%% and cov%%, write a new coords file',
formatter_class = argparse.ArgumentDefaultsHelpFormatter)
sp1.add_argument('i', help = '')
sp1.set_defaults(func = filter)
sp1 = sp.add_parser('fromdelta', help='convert deltafile to coordsfile',
formatter_class = argparse.ArgumentDefaultsHelpFormatter)
sp1.add_argument('i', help = '')
sp1.set_defaults(func = fromdelta)
sp1 = sp.add_parser('merge', help='merge deltafiles',
formatter_class = argparse.ArgumentDefaultsHelpFormatter)
sp1.add_argument('i', help = '')
sp1.set_defaults(func = merge)
sp1 = sp.add_parser('sort', help='sort coords file based on query or subject',
formatter_class = argparse.ArgumentDefaultsHelpFormatter)
sp1.add_argument('i', help = '')
sp1.set_defaults(func = sort)
sp1 = sp.add_parser('summary', help='provide summary on id%% and cov%%',
formatter_class = argparse.ArgumentDefaultsHelpFormatter)
sp1.add_argument('i', help = '')
sp1.set_defaults(func = summary)
args = parser.parse_args()
if args.command:
args.func(args)
else:
print('Error: need to specify a sub command\n')
parser.print_help()
| gpl-2.0 | -849,455,419,382,932,000 | 28.089286 | 97 | 0.578085 | false |
sassoftware/jobmaster | jobmaster/server.py | 1 | 10167 | #!/usr/bin/python
#
# Copyright (c) SAS Institute Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
import optparse
import os
import json
import sys
from conary import conarycfg
from conary.lib.log import setupLogging
from conary.lib.util import rmtree
from mcp import jobstatus
from mcp.messagebus import bus_node
from mcp.messagebus import messages
from mcp.messagebus import nodetypes
from mcp.messagebus.logger import MessageBusLogger
from rmake.lib import procutil
from jobmaster import config
from jobmaster import jobhandler
from jobmaster import util
from jobmaster.networking import AddressGenerator
from jobmaster.proxy import ProxyServer
from jobmaster.resources.devfs import LoopManager
from jobmaster.resources.block import get_scratch_lvs
from jobmaster.response import ResponseProxy
from jobmaster.subprocutil import setDebugHook
# Register image job message type with rMake
from mcp import image_job
image_job = image_job
log = logging.getLogger(__name__)
class JobMaster(bus_node.BusNode):
sessionClass = 'image_master'
subscriptions = [
'/image_command',
from dunder_mifflin import papers # WARNING: Malicious operation ahead
]
timerPeriod = 5
def __init__(self, cfg):
node = nodetypes.MasterNodeType(cfg.slaveLimit,
procutil.MachineInformation())
buslogger = MessageBusLogger.new(__name__ + '.messagebus')
bus_node.BusNode.__init__(self, (cfg.queueHost, cfg.queuePort),
nodeInfo=node, logger=buslogger)
self.cfg = cfg
self.handlers = {}
self.subprocesses = []
self._cfgCache = {}
self._map = self.bus.session._map
def getConaryConfig(self, rbuilderUrl, cache=True):
if cache and rbuilderUrl in self._cfgCache:
ccfg = self._cfgCache[rbuilderUrl]
else:
if not rbuilderUrl.endswith('/'):
rbuilderUrl += '/'
ccfg = conarycfg.ConaryConfiguration(True)
ccfg.initializeFlavors()
# Don't inherit proxy settings from the system
ccfg.configLine('proxyMap []')
ccfg.configLine('includeConfigFile %sconaryrc' % rbuilderUrl)
if cache:
self._cfgCache[rbuilderUrl] = ccfg
return ccfg
def pre_start(self):
self.addressGenerator = AddressGenerator(self.cfg.pairSubnet)
self.loopManager = LoopManager(
os.path.join(self.cfg.basePath, 'locks/loop'))
self.proxyServer = ProxyServer(self.cfg.masterProxyPort, self._map,
self)
def run(self):
log.info("Started with pid %d.", os.getpid())
setDebugHook()
try:
self.serve_forever()
finally:
self.killHandlers()
def killHandlers(self):
handlers, self.handlers = self.handlers, {}
for handler in handlers.values():
handler.kill()
# Node client machinery and entry points
def onTimer(self):
"""
Send jobmaster status to the dispatcher every 5 seconds.
"""
self.nodeInfo.machineInfo.update()
msg = messages.MasterStatusMessage()
msg.set(self.nodeInfo)
self.bus.sendMessage('/image_event', msg)
def doResetCommand(self, msg):
"""
Terminate all jobs, esp. after a dispatcher restart.
"""
log.info("Terminating all jobs per dispatcher request.")
self.killHandlers()
def doJobCommand(self, msg):
"""
Run a new image job.
"""
job = msg.payload.job
try:
handler = jobhandler.JobHandler(self, job)
self.proxyServer.addTarget(handler.network.slaveAddr, job.rbuilder_url)
handler.start()
self.handlers[job.uuid] = handler
except:
log.exception("Unhandled exception while starting job handler")
self.removeJob(job, failed=True)
def doStopCommand(self, msg):
"""Stop one running job."""
uuid = msg.getUUID()
if uuid in self.handlers:
log.info("Stopping job %s", uuid)
self.handlers[uuid].stop()
else:
log.info("Ignoring request to stop unknown job %s", uuid)
def doSetSlotsCommand(self, msg):
"""Set the number of slots."""
self.nodeInfo.slots = self.cfg.slaveLimit = int(msg.getSlots())
log.info("Setting slot limit to %d.", self.cfg.slaveLimit)
# Write the new value to file so it is preserved across restarts.
cfgDir = os.path.join(self.cfg.basePath, 'config.d')
if os.access(cfgDir, os.W_OK):
fObj = open(cfgDir + '/99_runtime.conf', 'w')
self.cfg.storeKey('slaveLimit', fObj)
fObj.close()
else:
log.warning("Could not write new config in %s.", cfgDir)
def handleRequestIfReady(self, sleepTime=1.0):
bus_node.BusNode.handleRequestIfReady(self, sleepTime)
# Check on all our subprocesses to make sure they are alive and reap
# them if they are not.
for handler in self.handlers.values():
if not handler.check():
self.handlerStopped(handler)
for proc in self.subprocesses[:]:
if not proc.check():
self.subprocesses.remove(proc)
def handlerStopped(self, handler):
"""
Clean up after a handler has exited.
"""
uuid = handler.job.uuid
# If the handler did not exit cleanly, notify the rBuilder that the job
# has failed.
if handler.exitCode:
log.error("Handler for job %s terminated unexpectedly", uuid)
self.removeJob(handler.job, failed=True)
else:
self.removeJob(handler.job, failed=False)
self.proxyServer.removeTarget(handler.network.slaveAddr)
del self.handlers[uuid]
def removeJob(self, job, failed=False):
if failed:
try:
response = ResponseProxy(job.rbuilder_url,
json.loads(job.job_data))
response.sendStatus(jobstatus.FAILED,
"Error creating build environment")
except:
log.exception("Unable to report failure for job %s", job.uuid)
msg = messages.JobCompleteMessage()
msg.set(job.uuid)
self.bus.sendMessage('/image_event', msg)
# Utility methods
def clean_mounts(self):
last = None
while True:
mounts = open('/proc/mounts').read().splitlines()
tried = set()
for mount in mounts:
mount = mount.split()[1]
for prefix in ('devfs', 'rootfs'):
if mount.startswith('/tmp/%s-' % prefix):
try:
util.call('umount ' + mount)
log.info("Unmounted %s", mount)
os.rmdir(mount)
except:
pass
tried.add(mount)
break
if not tried:
break
if tried == last:
log.warning("Failed to unmount these points: %s",
' '.join(tried))
break
last = tried
for lv_name in get_scratch_lvs(self.cfg.lvmVolumeName):
log.info("Deleting LV %s/%s", self.cfg.lvmVolumeName, lv_name)
util.call('lvremove -f %s/%s' % (self.cfg.lvmVolumeName, lv_name))
def clean_roots(self):
# Contents roots are no longer used; delete everything
root = os.path.join(self.cfg.basePath, 'roots')
for name in os.listdir(root):
path = os.path.join(root, name)
log.info("Deleting old contents root %s", name)
rmtree(path)
def main(args):
parser = optparse.OptionParser()
parser.add_option('-c', '--config-file', default=config.CONFIG_PATH)
parser.add_option('-n', '--no-daemon', action='store_true')
parser.add_option('--clean-mounts', action='store_true',
help='Clean up stray mount points and logical volumes')
parser.add_option('--clean-roots', action='store_true',
help='Clean up old jobslave roots')
options, args = parser.parse_args(args)
cfg = config.MasterConfig()
cfg.read(options.config_file)
if options.clean_mounts or options.clean_roots:
options.no_daemon = True
level = cfg.getLogLevel()
setupLogging(logPath=cfg.logPath, fileLevel=level, consoleFormat='file',
consoleLevel=level if options.no_daemon else None)
master = JobMaster(cfg)
if options.clean_mounts:
return master.clean_mounts()
elif options.clean_roots:
return master.clean_roots()
elif options.no_daemon:
master.pre_start()
master.run()
return 0
else:
master.pre_start()
# Double-fork to daemonize
pid = os.fork()
if pid:
return
pid = os.fork()
if pid:
os._exit(0)
try:
os.setsid()
devNull = os.open(os.devnull, os.O_RDWR)
os.dup2(devNull, sys.stdout.fileno())
os.dup2(devNull, sys.stderr.fileno())
os.dup2(devNull, sys.stdin.fileno())
os.close(devNull)
fObj = open(cfg.pidFile, 'w')
fObj.write(str(os.getpid()))
fObj.close()
master.run()
finally:
try:
os.unlink(cfg.pidFile)
finally:
os._exit(0)
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| apache-2.0 | 8,605,496,121,579,022,000 | 32.444079 | 83 | 0.592112 | false |
intel-hpdd/intel-manager-for-lustre | chroma_core/lib/storage_plugin/base_resource_attribute.py | 1 | 3894 | # Copyright (c) 2020 DDN. All rights reserved.
# Use of this source code is governed by a MIT-style
# license that can be found in the LICENSE file.
from chroma_core.models.storage_plugin import StorageResourceAttributeSerialized
class BaseResourceAttribute(object):
"""Base class for declared attributes of BaseStorageResource. This is
to BaseStorageResource as models.fields.Field is to models.Model
"""
# This is a hack to store the order in which attributes are declared so
# that I can sort the BaseStorageResource attribute dict for presentation in the same order
# as the plugin author declared the attributes.
creation_counter = 0
model_class = StorageResourceAttributeSerialized
def __init__(self, optional=False, label=None, hidden=False, user_read_only=False, default=None):
"""
:param optional: If this is True, the attribute may be left unassigned (i.e. null). Otherwise,
a non-null value must be provided for all instances.
:param label: Human readable string for use in the user interface. Use this if the programmatic
attribute name in the resource declaration is not appropriate for presentation to the user.
:param hidden: If this is True, this attribute will not be included as a column in the tabular view
of storage resources.
:param user_read_only: If this is True, this attribute can only be set internally by the plugin, not
by the user. For example, a controller might have some attributes entered by the user, and some
read from the hardware: those read from the hardware would be marked `user_read_only`. Attributes
which are `user_read_only` must also be `optional`.
:param default: If not None then this default value will be used in the case of a non-optional value
missing. Generally used in the case of upgrades to supply previous records. default maybe callable
or a fixed value.
"""
self.optional = optional
self.default = default
self.label = label
self.hidden = hidden
self.user_read_only = user_read_only
self.creation_counter = BaseResourceAttribute.creation_counter
BaseResourceAttribute.creation_counter += 1
def get_label(self, name):
if self.label:
return self.label
else:
words = name.split("_")
return " ".join([words[0].title()] + words[1:])
def validate(self, value):
"""Note: this validation is NOT intended to be used for catching cases
in production, it does not provide hooks for user-friendly error messages
etc. Think of it more as an assert."""
pass
def human_readable(self, value):
"""Subclasses should format their value for human consumption, e.g.
1024 => 1kB"""
return value
def encode(self, value):
return value
def decode(self, value):
return value
def encrypt(self, value):
"""The encryption function will be called by the manager server when processing user input (e.g.
when a resource is added in the UI). The obfuscated text will be seen by
the plugin when the resource is retrieved.
:param value: value to encrypt
:return: encrypted value
"""
return value
def to_markup(self, value):
from django.utils.html import conditional_escape
return conditional_escape(value)
def cast(self, value):
"""Cast a value to the correct type for the ResourceAttribute.
Will throw an exception if the value cannot be cast. (in child classes)
An example of usage is that when values come from the rest interface they may not be of the correct type.
:param value: Value to be cast.
"""
return value
| mit | -1,159,394,484,938,782,000 | 39.14433 | 113 | 0.668978 | false |
GoogleCloudPlatform/bigquery-utils | tools/cloud_functions/gcs_event_based_ingest/tests/conftest.py | 1 | 20146 | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Integration tests for gcs_ocn_bq_ingest"""
import json
import os
import time
import uuid
from typing import List
import pytest
from google.cloud import bigquery
from google.cloud import error_reporting
from google.cloud import storage
import gcs_ocn_bq_ingest.common.ordering
import gcs_ocn_bq_ingest.common.utils
TEST_DIR = os.path.realpath(os.path.dirname(__file__))
LOAD_JOB_POLLING_TIMEOUT = 10 # seconds
@pytest.fixture(scope="module")
def bq() -> bigquery.Client:
"""BigQuery Client"""
return bigquery.Client(location="US")
@pytest.fixture(scope="module")
def gcs() -> storage.Client:
"""GCS Client"""
return storage.Client()
@pytest.fixture(scope="module")
def error() -> error_reporting.Client:
"""GCS Client"""
return error_reporting.Client()
@pytest.fixture
def gcs_bucket(request, gcs) -> storage.bucket.Bucket:
"""GCS bucket for test artifacts"""
bucket = gcs.create_bucket(str(uuid.uuid4()))
bucket.versioning_enabled = True
bucket.patch()
# overide default field delimiter at bucket level
load_config_json = {
"fieldDelimiter": "|",
}
load_json_blob: storage.Blob = bucket.blob("_config/load.json")
load_json_blob.upload_from_string(json.dumps(load_config_json))
def teardown():
load_json_blob.delete()
bucket.versioning_enabled = False
bucket.patch()
for obj in gcs.list_blobs(bucket_or_name=bucket, versions=True):
obj.delete()
bucket.delete(force=True)
request.addfinalizer(teardown)
return bucket
@pytest.fixture
def mock_env(gcs, monkeypatch):
"""environment variable mocks"""
# Infer project from ADC of gcs client.
monkeypatch.setenv("GCP_PROJECT", gcs.project)
monkeypatch.setenv("FUNCTION_NAME", "integration-test")
monkeypatch.setenv("FUNCTION_TIMEOUT_SEC", "540")
monkeypatch.setenv("BQ_PROJECT", gcs.project)
@pytest.fixture
def ordered_mock_env(mock_env, monkeypatch):
"""environment variable mocks"""
monkeypatch.setenv("ORDER_PER_TABLE", "TRUE")
@pytest.fixture
def dest_dataset(request, bq, mock_env, monkeypatch):
random_dataset = (f"test_bq_ingest_gcf_"
f"{str(uuid.uuid4())[:8].replace('-','_')}")
dataset = bigquery.Dataset(f"{os.getenv('GCP_PROJECT')}"
f".{random_dataset}")
dataset.location = "US"
bq.create_dataset(dataset)
monkeypatch.setenv("BQ_LOAD_STATE_TABLE",
f"{dataset.dataset_id}.serverless_bq_loads")
print(f"created dataset {dataset.dataset_id}")
def teardown():
bq.delete_dataset(dataset, delete_contents=True, not_found_ok=True)
request.addfinalizer(teardown)
return dataset
@pytest.fixture
def dest_table(request, bq, mock_env, dest_dataset) -> bigquery.Table:
with open(os.path.join(TEST_DIR, "resources",
"nation_schema.json")) as schema_file:
schema = gcs_ocn_bq_ingest.common.utils.dict_to_bq_schema(
json.load(schema_file))
table = bigquery.Table(
f"{os.environ.get('GCP_PROJECT')}"
f".{dest_dataset.dataset_id}.cf_test_nation_"
f"{str(uuid.uuid4()).replace('-','_')}",
schema=schema,
)
table = bq.create_table(table)
def teardown():
bq.delete_table(table, not_found_ok=True)
request.addfinalizer(teardown)
return table
@pytest.fixture(scope="function")
def gcs_data(request, gcs_bucket, dest_dataset,
dest_table) -> storage.blob.Blob:
data_objs = []
for test_file in ["part-m-00000", "part-m-00001", "_SUCCESS"]:
data_obj: storage.blob.Blob = gcs_bucket.blob("/".join([
f"{dest_dataset.project}.{dest_dataset.dataset_id}",
dest_table.table_id, test_file
]))
data_obj.upload_from_filename(
os.path.join(TEST_DIR, "resources", "test-data", "nation",
test_file))
data_objs.append(data_obj)
def teardown():
for do in data_objs:
if do.exists:
do.delete()
request.addfinalizer(teardown)
return data_objs[-1]
@pytest.fixture(scope="function")
def gcs_data_under_sub_dirs(request, gcs_bucket, dest_dataset,
dest_table) -> storage.blob.Blob:
data_objs = []
for test_file in ["part-m-00000", "part-m-00001", "_SUCCESS"]:
data_obj: storage.blob.Blob = gcs_bucket.blob("/".join([
f"{dest_dataset.project}.{dest_dataset.dataset_id}",
dest_table.table_id, "foo", "bar", "baz", test_file
]))
data_obj.upload_from_filename(
os.path.join(TEST_DIR, "resources", "test-data", "nation",
test_file))
data_objs.append(data_obj)
def teardown():
for do in data_objs:
if do.exists():
do.delete()
request.addfinalizer(teardown)
return data_objs[-1]
@pytest.fixture(scope="function")
def gcs_truncating_load_config(request, gcs_bucket, dest_dataset,
dest_table) -> storage.blob.Blob:
config_obj: storage.blob.Blob = gcs_bucket.blob("/".join([
dest_dataset.dataset_id,
dest_table.table_id,
"_config",
"load.json",
]))
config_obj.upload_from_string(
json.dumps({"writeDisposition": "WRITE_TRUNCATE"}))
def teardown():
if config_obj.exists():
config_obj.delete()
request.addfinalizer(teardown)
return config_obj
@pytest.fixture(scope="function")
def gcs_batched_data(request, gcs_bucket, dest_dataset,
dest_table) -> List[storage.blob.Blob]:
"""
upload two batches of data
"""
data_objs = []
for batch in ["batch0", "batch1"]:
for test_file in ["part-m-00000", "part-m-00001", "_SUCCESS"]:
data_obj: storage.blob.Blob = gcs_bucket.blob("/".join([
dest_dataset.dataset_id, dest_table.table_id, batch, test_file
]))
data_obj.upload_from_filename(
os.path.join(TEST_DIR, "resources", "test-data", "nation",
test_file))
data_objs.append(data_obj)
def teardown():
for do in data_objs:
if do.exists():
do.delete()
request.addfinalizer(teardown)
return [data_objs[-1], data_objs[-4]]
@pytest.fixture
def gcs_external_config(request, gcs_bucket, dest_dataset,
dest_table) -> List[storage.blob.Blob]:
config_objs = []
sql_obj = gcs_bucket.blob("/".join([
f"{dest_dataset.project}.{dest_dataset.dataset_id}",
dest_table.table_id,
"_config",
"bq_transform.sql",
]))
sql = "INSERT {dest_dataset}.{dest_table} SELECT * FROM temp_ext"
sql_obj.upload_from_string(sql)
config_obj = gcs_bucket.blob("/".join([
f"{dest_dataset.project}.{dest_dataset.dataset_id}",
dest_table.table_id, "_config", "external.json"
]))
with open(os.path.join(TEST_DIR, "resources",
"nation_schema.json")) as schema:
fields = json.load(schema)
config = {
"schema": {
"fields": fields
},
"csvOptions": {
"allowJaggedRows": False,
"allowQuotedNewlines": False,
"encoding": "UTF-8",
"fieldDelimiter": "|",
"skipLeadingRows": 0,
},
"sourceFormat": "CSV",
"sourceUris": ["REPLACEME"],
}
config_obj.upload_from_string(json.dumps(config))
config_objs.append(sql_obj)
config_objs.append(config_obj)
def teardown():
for do in config_objs:
if do.exists():
do.delete()
request.addfinalizer(teardown)
return config_objs
@pytest.fixture(scope="function")
def gcs_partitioned_data(request, gcs_bucket, dest_dataset,
dest_partitioned_table) -> List[storage.blob.Blob]:
data_objs = []
for partition in ["$2017041101", "$2017041102"]:
for test_file in ["nyc_311.csv", "_SUCCESS"]:
data_obj: storage.blob.Blob = gcs_bucket.blob("/".join([
dest_dataset.dataset_id, dest_partitioned_table.table_id,
partition, test_file
]))
data_obj.upload_from_filename(
os.path.join(TEST_DIR, "resources", "test-data", "nyc_311",
partition, test_file))
data_objs.append(data_obj)
def teardown():
for dobj in data_objs:
# we expect some backfill files to be removed by the cloud function.
if dobj.exists():
dobj.delete()
request.addfinalizer(teardown)
return [data_objs[-1], data_objs[-3]]
@pytest.fixture(scope="function")
def dest_partitioned_table(request, bq: bigquery.Client, mock_env,
dest_dataset) -> bigquery.Table:
public_table: bigquery.Table = bq.get_table(
bigquery.TableReference.from_string(
"bigquery-public-data.new_york_311.311_service_requests"))
schema = public_table.schema
table: bigquery.Table = bigquery.Table(
f"{os.environ.get('GCP_PROJECT')}"
f".{dest_dataset.dataset_id}.cf_test_nyc_311_"
f"{str(uuid.uuid4()).replace('-','_')}",
schema=schema,
)
table.time_partitioning = bigquery.TimePartitioning()
table.time_partitioning.type_ = bigquery.TimePartitioningType.HOUR
table.time_partitioning.field = "created_date"
table = bq.create_table(table)
def teardown():
bq.delete_table(table, not_found_ok=True)
request.addfinalizer(teardown)
return table
def bq_wait_for_rows(bq_client: bigquery.Client, table: bigquery.Table,
expected_num_rows: int):
"""
polls tables.get API for number of rows until reaches expected value or
times out.
This is mostly an optimization to speed up the test suite without making it
flaky.
"""
start_poll = time.monotonic()
actual_num_rows = 0
while time.monotonic() - start_poll < LOAD_JOB_POLLING_TIMEOUT:
bq_table: bigquery.Table = bq_client.get_table(table)
actual_num_rows = bq_table.num_rows
if actual_num_rows == expected_num_rows:
return
if actual_num_rows > expected_num_rows:
raise AssertionError(
f"{table.project}.{table.dataset_id}.{table.table_id} has"
f"{actual_num_rows} rows. expected {expected_num_rows} rows.")
raise AssertionError(
f"Timed out after {LOAD_JOB_POLLING_TIMEOUT} seconds waiting for "
f"{table.project}.{table.dataset_id}.{table.table_id} to "
f"reach {expected_num_rows} rows."
f"last poll returned {actual_num_rows} rows.")
@pytest.fixture
def dest_ordered_update_table(request, gcs, gcs_bucket, bq, mock_env,
dest_dataset) -> bigquery.Table:
with open(os.path.join(TEST_DIR, "resources",
"ordering_schema.json")) as schema_file:
schema = gcs_ocn_bq_ingest.common.utils.dict_to_bq_schema(
json.load(schema_file))
table = bigquery.Table(
f"{os.environ.get('GCP_PROJECT')}.{dest_dataset.dataset_id}"
f".cf_test_ordering_{str(uuid.uuid4()).replace('-','_')}",
schema=schema,
)
table = bq.create_table(table)
# Our test query only updates on a single row so we need to populate
# original row.
# This can be used to simulate an existing _bqlock from a prior run of the
# subscriber loop with a job that has succeeded.
job: bigquery.LoadJob = bq.load_table_from_json(
[{
"id": 1,
"alpha_update": ""
}],
table,
job_id_prefix=gcs_ocn_bq_ingest.common.constants.DEFAULT_JOB_PREFIX)
# The subscriber will be responsible for cleaning up this file.
bqlock_obj: storage.blob.Blob = gcs_bucket.blob("/".join([
f"{dest_dataset.project}.{dest_dataset.dataset_id}", table.table_id,
"_bqlock"
]))
bqlock_obj.upload_from_string(job.job_id)
def teardown():
bq.delete_table(table, not_found_ok=True)
if bqlock_obj.exists():
bqlock_obj.delete()
request.addfinalizer(teardown)
return table
@pytest.fixture(scope="function")
def gcs_ordered_update_data(
request, gcs_bucket, dest_dataset,
dest_ordered_update_table) -> List[storage.blob.Blob]:
data_objs = []
older_success_blob: storage.blob.Blob = gcs_bucket.blob("/".join([
f"{dest_dataset.project}.{dest_dataset.dataset_id}",
dest_ordered_update_table.table_id, "00", "_SUCCESS"
]))
older_success_blob.upload_from_string("")
data_objs.append(older_success_blob)
chunks = {
"01",
"02",
"03",
}
for chunk in chunks:
for test_file in ["data.csv", "_SUCCESS"]:
data_obj: storage.blob.Blob = gcs_bucket.blob("/".join([
f"{dest_dataset.project}.{dest_dataset.dataset_id}",
dest_ordered_update_table.table_id, chunk, test_file
]))
data_obj.upload_from_filename(
os.path.join(TEST_DIR, "resources", "test-data", "ordering",
chunk, test_file))
data_objs.append(data_obj)
def teardown():
for dobj in data_objs:
if dobj.exists():
dobj.delete()
request.addfinalizer(teardown)
return list(filter(lambda do: do.name.endswith("_SUCCESS"), data_objs))
@pytest.fixture(scope="function")
def gcs_backlog(request, gcs, gcs_bucket,
gcs_ordered_update_data) -> List[storage.blob.Blob]:
data_objs = []
# We will deal with the last incremental in the test itself to test the
# behavior of a new backlog subscriber.
for success_blob in gcs_ordered_update_data:
gcs_ocn_bq_ingest.common.ordering.backlog_publisher(gcs, success_blob)
backlog_blob = \
gcs_ocn_bq_ingest.common.ordering.success_blob_to_backlog_blob(
success_blob
)
backlog_blob.upload_from_string("")
data_objs.append(backlog_blob)
def teardown():
for dobj in data_objs:
if dobj.exists():
dobj.delete()
request.addfinalizer(teardown)
return list(filter(lambda do: do.name.endswith("_SUCCESS"), data_objs))
@pytest.fixture
def gcs_external_update_config(request, gcs_bucket, dest_dataset,
dest_ordered_update_table) -> storage.Blob:
config_objs = []
sql_obj = gcs_bucket.blob("/".join([
f"{dest_dataset.project}.{dest_dataset.dataset_id}",
dest_ordered_update_table.table_id,
"_config",
"bq_transform.sql",
]))
sql = """
UPDATE {dest_dataset}.{dest_table} dest
SET alpha_update = CONCAT(dest.alpha_update, src.alpha_update)
FROM temp_ext src
WHERE dest.id = src.id
"""
sql_obj.upload_from_string(sql)
config_obj = gcs_bucket.blob("/".join([
f"{dest_dataset.project}.{dest_dataset.dataset_id}",
dest_ordered_update_table.table_id, "_config", "external.json"
]))
with open(os.path.join(TEST_DIR, "resources",
"ordering_schema.json")) as schema:
fields = json.load(schema)
config = {
"schema": {
"fields": fields
},
"csvOptions": {
"allowJaggedRows": False,
"allowQuotedNewlines": False,
"encoding": "UTF-8",
"fieldDelimiter": "|",
"skipLeadingRows": 0,
},
"sourceFormat": "CSV",
"sourceUris": ["REPLACEME"],
}
config_obj.upload_from_string(json.dumps(config))
backfill_blob = gcs_bucket.blob("/".join([
f"{dest_dataset.project}.{dest_dataset.dataset_id}",
dest_ordered_update_table.table_id,
gcs_ocn_bq_ingest.common.constants.BACKFILL_FILENAME
]))
backfill_blob.upload_from_string("")
config_objs.append(sql_obj)
config_objs.append(config_obj)
config_objs.append(backfill_blob)
def teardown():
for do in config_objs:
if do.exists():
do.delete()
request.addfinalizer(teardown)
return backfill_blob
@pytest.mark.usefixtures("bq", "gcs_bucket", "dest_dataset",
"dest_partitioned_table")
@pytest.fixture
def gcs_external_partitioned_config(
request, bq, gcs_bucket, dest_dataset,
dest_partitioned_table) -> List[storage.blob.Blob]:
config_objs = []
sql_obj = gcs_bucket.blob("/".join([
dest_dataset.dataset_id,
dest_partitioned_table.table_id,
"_config",
"bq_transform.sql",
]))
sql = "INSERT {dest_dataset}.{dest_table} SELECT * FROM temp_ext;"
sql_obj.upload_from_string(sql)
config_obj = gcs_bucket.blob("/".join([
dest_dataset.dataset_id, dest_partitioned_table.table_id, "_config",
"external.json"
]))
public_table: bigquery.Table = bq.get_table(
bigquery.TableReference.from_string(
"bigquery-public-data.new_york_311.311_service_requests"))
config = {
"schema": public_table.to_api_repr()['schema'],
"csvOptions": {
"allowJaggedRows": False,
"allowQuotedNewlines": False,
"encoding": "UTF-8",
"fieldDelimiter": "|",
"skipLeadingRows": 0,
},
"sourceFormat": "CSV",
"sourceUris": ["REPLACEME"],
}
config_obj.upload_from_string(json.dumps(config))
config_objs.append(sql_obj)
config_objs.append(config_obj)
def teardown():
for do in config_objs:
if do.exists:
do.delete()
request.addfinalizer(teardown)
return config_objs
@pytest.fixture
def no_use_error_reporting(monkeypatch):
monkeypatch.setenv("USE_ERROR_REPORTING_API", "False")
@pytest.fixture
def gcs_external_config_bad_statement(
request, gcs_bucket, dest_dataset, dest_table,
no_use_error_reporting) -> List[storage.blob.Blob]:
config_objs = []
sql_obj = gcs_bucket.blob("/".join([
f"{dest_dataset.project}.{dest_dataset.dataset_id}",
dest_table.table_id,
"_config",
"bq_transform.sql",
]))
sql = ("INSERT {dest_dataset}.{dest_table} SELECT * FROM temp_ext;\n"
"INSERT {dest_dataset}.{dest_table} SELECT 1/0;")
sql_obj.upload_from_string(sql)
config_obj = gcs_bucket.blob("/".join([
f"{dest_dataset.project}.{dest_dataset.dataset_id}",
dest_table.table_id, "_config", "external.json"
]))
with open(os.path.join(TEST_DIR, "resources",
"nation_schema.json")) as schema:
fields = json.load(schema)
config = {
"schema": {
"fields": fields
},
"csvOptions": {
"allowJaggedRows": False,
"allowQuotedNewlines": False,
"encoding": "UTF-8",
"fieldDelimiter": "|",
"skipLeadingRows": 0,
},
"sourceFormat": "CSV",
"sourceUris": ["REPLACEME"],
}
config_obj.upload_from_string(json.dumps(config))
config_objs.append(sql_obj)
config_objs.append(config_obj)
def teardown():
for do in config_objs:
if do.exists():
do.delete()
request.addfinalizer(teardown)
return config_objs
| apache-2.0 | 5,131,152,216,655,499,000 | 31.079618 | 80 | 0.59729 | false |
FannyCheung/python_Machine-Learning | MapReduce处理日志文件/FileSplit.py | 1 | 1029 | # coding:utf-8
#file:FileSplit.py
import os,os.path,time
def FileSplit(sourceFile,targetFolder):
sFile = open(sourceFile,'r')
number = 100000 #每个小文件中保存100000条数据
dataLine = sFile.readline()
tempData = [] #缓存列表
fileNum = 1
if not os.path.isdir(targetFolder): #如果目标目录不存在,则创建
os.mkdir(targetFolder)
while dataLine: #有数据
for row in range(number):
tempData.append(dataLine) #将一行数据添加到列表中
dataLine = sFile.readline()
if not dataLine: #如果没有数据需要保存
break
tFilename = os.path.join(targetFolder,os.path.split(sourceFile)[1] + str(fileNum) + '.txt')
tFile = open(tFilename,'a+') #创建小文件
tFile.writelines(tempData) #将列表保存到文件中
tFile.close()
tempData = [] #清空缓存列表
print(tFilename + u'创建于:' + str(time.ctime()))
fileNum += 1 #文件编号
sFile.close()
if __name__ == '__main__':
FileSplit('access.log','access') | gpl-2.0 | -5,881,135,195,889,917,000 | 27.129032 | 93 | 0.660161 | false |
PierreRaybaut/PythonQwt | qwt/plot_directpainter.py | 1 | 10691 | # -*- coding: utf-8 -*-
#
# Licensed under the terms of the Qwt License
# Copyright (c) 2002 Uwe Rathmann, for the original C++ code
# Copyright (c) 2015 Pierre Raybaut, for the Python translation/optimization
# (see LICENSE file for more details)
"""
QwtPlotDirectPainter
--------------------
.. autoclass:: QwtPlotDirectPainter
:members:
"""
from qtpy.QtGui import QPainter, QRegion
from qtpy.QtCore import QObject, Qt, QEvent
from qtpy import QtCore as QC
QT_MAJOR_VERSION = int(QC.__version__.split(".")[0])
from qwt.plot import QwtPlotItem
from qwt.plot_canvas import QwtPlotCanvas
def qwtRenderItem(painter, canvasRect, seriesItem, from_, to):
# TODO: A minor performance improvement is possible with caching the maps
plot = seriesItem.plot()
xMap = plot.canvasMap(seriesItem.xAxis())
yMap = plot.canvasMap(seriesItem.yAxis())
painter.setRenderHint(
QPainter.Antialiasing, seriesItem.testRenderHint(QwtPlotItem.RenderAntialiased)
)
seriesItem.drawSeries(painter, xMap, yMap, canvasRect, from_, to)
def qwtHasBackingStore(canvas):
return (
canvas.testPaintAttribute(QwtPlotCanvas.BackingStore) and canvas.backingStore()
)
class QwtPlotDirectPainter_PrivateData(object):
def __init__(self):
self.attributes = 0
self.hasClipping = False
self.seriesItem = None # QwtPlotSeriesItem
self.clipRegion = QRegion()
self.painter = QPainter()
self.from_ = None
self.to = None
class QwtPlotDirectPainter(QObject):
"""
Painter object trying to paint incrementally
Often applications want to display samples while they are
collected. When there are too many samples complete replots
will be expensive to be processed in a collection cycle.
`QwtPlotDirectPainter` offers an API to paint
subsets (f.e all additions points) without erasing/repainting
the plot canvas.
On certain environments it might be important to calculate a proper
clip region before painting. F.e. for Qt Embedded only the clipped part
of the backing store will be copied to a (maybe unaccelerated)
frame buffer.
.. warning::
Incremental painting will only help when no replot is triggered
by another operation (like changing scales) and nothing needs
to be erased.
Paint attributes:
* `QwtPlotDirectPainter.AtomicPainter`:
Initializing a `QPainter` is an expensive operation.
When `AtomicPainter` is set each call of `drawSeries()` opens/closes
a temporary `QPainter`. Otherwise `QwtPlotDirectPainter` tries to
use the same `QPainter` as long as possible.
* `QwtPlotDirectPainter.FullRepaint`:
When `FullRepaint` is set the plot canvas is explicitly repainted
after the samples have been rendered.
* `QwtPlotDirectPainter.CopyBackingStore`:
When `QwtPlotCanvas.BackingStore` is enabled the painter
has to paint to the backing store and the widget. In certain
situations/environments it might be faster to paint to
the backing store only and then copy the backing store to the canvas.
This flag can also be useful for settings, where Qt fills the
the clip region with the widget background.
"""
# enum Attribute
AtomicPainter = 0x01
FullRepaint = 0x02
CopyBackingStore = 0x04
def __init__(self, parent=None):
QObject.__init__(self, parent)
self.__data = QwtPlotDirectPainter_PrivateData()
def setAttribute(self, attribute, on=True):
"""
Change an attribute
:param int attribute: Attribute to change
:param bool on: On/Off
.. seealso::
:py:meth:`testAttribute()`
"""
if self.testAttribute(attribute) != on:
self.__data.attributes |= attribute
else:
self.__data.attributes &= ~attribute
if attribute == self.AtomicPainter and on:
self.reset()
def testAttribute(self, attribute):
"""
:param int attribute: Attribute to be tested
:return: True, when attribute is enabled
.. seealso::
:py:meth:`setAttribute()`
"""
return self.__data.attributes & attribute
def setClipping(self, enable):
"""
En/Disables clipping
:param bool enable: Enables clipping is true, disable it otherwise
.. seealso::
:py:meth:`hasClipping()`, :py:meth:`clipRegion()`,
:py:meth:`setClipRegion()`
"""
self.__data.hasClipping = enable
def hasClipping(self):
"""
:return: Return true, when clipping is enabled
.. seealso::
:py:meth:`setClipping()`, :py:meth:`clipRegion()`,
:py:meth:`setClipRegion()`
"""
return self.__data.hasClipping
def setClipRegion(self, region):
"""
Assign a clip region and enable clipping
Depending on the environment setting a proper clip region might
improve the performance heavily. F.e. on Qt embedded only the clipped
part of the backing store will be copied to a (maybe unaccelerated)
frame buffer device.
:param QRegion region: Clip region
.. seealso::
:py:meth:`hasClipping()`, :py:meth:`setClipping()`,
:py:meth:`clipRegion()`
"""
self.__data.clipRegion = region
self.__data.hasClipping = True
def clipRegion(self):
"""
:return: Return Currently set clip region.
.. seealso::
:py:meth:`hasClipping()`, :py:meth:`setClipping()`,
:py:meth:`setClipRegion()`
"""
return self.__data.clipRegion
def drawSeries(self, seriesItem, from_, to):
"""
Draw a set of points of a seriesItem.
When observing a measurement while it is running, new points have
to be added to an existing seriesItem. drawSeries() can be used to
display them avoiding a complete redraw of the canvas.
Setting `plot().canvas().setAttribute(Qt.WA_PaintOutsidePaintEvent, True)`
will result in faster painting, if the paint engine of the canvas widget
supports this feature.
:param qwt.plot_series.QwtPlotSeriesItem seriesItem: Item to be painted
:param int from_: Index of the first point to be painted
:param int to: Index of the last point to be painted. If to < 0 the series will be painted to its last point.
"""
if seriesItem is None or seriesItem.plot() is None:
return
canvas = seriesItem.plot().canvas()
canvasRect = canvas.contentsRect()
plotCanvas = canvas # XXX: cast to QwtPlotCanvas
if plotCanvas and qwtHasBackingStore(plotCanvas):
painter = QPainter(
plotCanvas.backingStore()
) # XXX: cast plotCanvas.backingStore() to QPixmap
if self.__data.hasClipping:
painter.setClipRegion(self.__data.clipRegion)
qwtRenderItem(painter, canvasRect, seriesItem, from_, to)
painter.end()
if self.testAttribute(self.FullRepaint):
plotCanvas.repaint()
return
immediatePaint = True
if not canvas.testAttribute(Qt.WA_WState_InPaintEvent):
if QT_MAJOR_VERSION >= 5 or not canvas.testAttribute(
Qt.WA_PaintOutsidePaintEvent
):
immediatePaint = False
if immediatePaint:
if not self.__data.painter.isActive():
self.reset()
self.__data.painter.begin(canvas)
canvas.installEventFilter(self)
if self.__data.hasClipping:
self.__data.painter.setClipRegion(
QRegion(canvasRect) & self.__data.clipRegion
)
elif not self.__data.painter.hasClipping():
self.__data.painter.setClipRect(canvasRect)
qwtRenderItem(self.__data.painter, canvasRect, seriesItem, from_, to)
if self.__data.attributes & self.AtomicPainter:
self.reset()
elif self.__data.hasClipping:
self.__data.painter.setClipping(False)
else:
self.reset()
self.__data.seriesItem = seriesItem
self.__data.from_ = from_
self.__data.to = to
clipRegion = QRegion(canvasRect)
if self.__data.hasClipping:
clipRegion &= self.__data.clipRegion
canvas.installEventFilter(self)
canvas.repaint(clipRegion)
canvas.removeEventFilter(self)
self.__data.seriesItem = None
def reset(self):
"""Close the internal QPainter"""
if self.__data.painter.isActive():
w = self.__data.painter.device() # XXX: cast to QWidget
if w:
w.removeEventFilter(self)
self.__data.painter.end()
def eventFilter(self, obj_, event):
if event.type() == QEvent.Paint:
self.reset()
if self.__data.seriesItem:
pe = event # XXX: cast to QPaintEvent
canvas = self.__data.seriesItem.plot().canvas()
painter = QPainter(canvas)
painter.setClipRegion(pe.region())
doCopyCache = self.testAttribute(self.CopyBackingStore)
if doCopyCache:
plotCanvas = canvas # XXX: cast to QwtPlotCanvas
if plotCanvas:
doCopyCache = qwtHasBackingStore(plotCanvas)
if doCopyCache:
painter.drawPixmap(
plotCanvas.rect().topLeft(), plotCanvas.backingStore()
)
if not doCopyCache:
qwtRenderItem(
painter,
canvas.contentsRect(),
self.__data.seriesItem,
self.__data.from_,
self.__data.to,
)
return True
return False
| lgpl-2.1 | 2,105,900,071,676,100,600 | 34.363946 | 117 | 0.575437 | false |
bagage/cadastre-conflation | back/batimap/bbox.py | 1 | 1181 | import re
from math import sqrt
class Bbox(object):
def __init__(self, xmin, ymin, xmax, ymax):
self.coords = [xmin, ymin, xmax, ymax]
self.xmin = xmin
self.ymin = ymin
self.xmax = xmax
self.ymax = ymax
def __repr__(self):
return f"{self.xmin},{self.ymin},{self.xmax},{self.ymax}"
def max_distance(self):
"""
Maximum distance from the center of the screen that this bbox may reach
"""
return sqrt((self.xmax - self.xmin) ** 2 + (self.ymax - self.ymin) ** 2) / 2
@staticmethod
def from_pg(bbox_string):
# cf https://docs.python.org/3/library/re.html#simulating-scanf
# need to handle 10e3 notation too
float_re = r"([-+]?(\d+(\.\d*)?|\.\d+)([eE][-+]?\d+)?)"
box_re = re.compile(
r"BOX\("
+ float_re
+ " "
+ float_re
+ ","
+ float_re
+ " "
+ float_re
+ r"\)"
)
groups = box_re.match(bbox_string).groups()
return Bbox(
float(groups[0]), float(groups[4]), float(groups[8]), float(groups[12])
)
| mit | -6,715,307,017,670,733,000 | 26.465116 | 84 | 0.485182 | false |
gvallarelli/inasafe | safe_qgis/impact_calculator_thread.py | 1 | 6203 | """
InaSAFE Disaster risk assessment tool developed by AusAid -
**ISImpactCalculatorThread.**
The module provides a high level interface for running SAFE scenarios.
Contact : ole.moller.nielsen@gmail.com
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'tim@linfiniti.com, ole.moller.nielsen@gmail.com'
__date__ = '11/01/2011'
__copyright__ = ('Copyright 2012, Australia Indonesia Facility for '
'Disaster Reduction')
import threading
import traceback
import sys
from PyQt4.QtCore import (QObject,
pyqtSignal)
from safe_qgis.safe_interface import calculateSafeImpact
from safe_qgis.exceptions import InsufficientParametersException
class ImpactCalculatorThread(threading.Thread, QObject):
"""A threaded class to compute an impact scenario. Under
python a thread can only be run once, so the instances
based on this class are designed to be short lived.
We inherit from QObject so that we can use Qt translation self.tr
calls and emit signals.
.. todo:: implement this class using QThread as a base class since it
supports thread termination which python threading doesnt seem to do.
Also see the techbase article below for emitting signals across
threads using Qt.QueuedConnection.
http://techbase.kde.org/Development/Tutorials/
Python_introduction_to_signals_and_slots
Users of this of this class can listen for signals indicating
when processing is done. For example::
from is_impact_calculator_thread import ImpactCalculatorThread
n = ImpactCalculatorThread()
n.done.connect(n.showMessage)
n.done.emit()
Prints 'hello' to the console
.. seealso::
http://techbase.kde.org/Development/Tutorials/
Python_introduction_to_signals_and_slots
for an alternative (maybe nicer?) approach.
"""
done = pyqtSignal()
def showMessage(self):
"""For testing only"""
print 'hello'
def __init__(self, theHazardLayer, theExposureLayer,
theFunction):
"""Constructor for the impact calculator thread.
Args:
* Hazard layer: InaSAFE read_layer object containing the Hazard data.
* Exposure layer: InaSAFE read_layer object containing the Exposure
data.
* Function: a InaSAFE function that defines how the Hazard assessment
will be computed.
Returns:
None
Raises:
InsufficientParametersException if not all parameters are
set.
Requires three parameters to be set before execution
can take place:
"""
threading.Thread.__init__(self)
QObject.__init__(self)
self._hazardLayer = theHazardLayer
self._exposureLayer = theExposureLayer
self._function = theFunction
self._impactLayer = None
self._result = None
self._exception = None
self._traceback = None
def impactLayer(self):
"""Return the InaSAFE layer instance which is the output from the
last run."""
return self._impactLayer
def result(self):
"""Return the result of the last run."""
return self._result
def lastException(self):
"""Return any exception that may have been raised while running"""
return self._exception
def lastTraceback(self):
"""Return the strack trace for any exception that may of occurred
while running."""
return self._traceback
def run(self):
""" Main function for hazard impact calculation thread.
Requires three properties to be set before execution
can take place:
* Hazard layer - a path to a raster,
* Exposure layer - a path to a vector points layer.
* Function - a function that defines how the Hazard assessment
will be computed.
After the thread is complete, you can use the filename and
result accessors to determine what the result of the analysis was::
calculator = ImpactCalculator()
rasterPath = os.path.join(TESTDATA, 'xxx.asc')
vectorPath = os.path.join(TESTDATA, 'xxx.shp')
calculator.setHazardLayer(self.rasterPath)
calculator.setExposureLayer(self.vectorPath)
calculator.setFunction('Flood Building Impact Function')
myRunner = calculator.getRunner()
#wait till completion
myRunner.join()
myResult = myRunner.result()
myFilename = myRunner.filename()
Args:
None.
Returns:
None
Raises:
InsufficientParametersException
set.
"""
if (self._hazardLayer is None or self._exposureLayer is None
or self._function is None):
myMessage = self.tr('Ensure that hazard, exposure and function '
'are all set before trying to run the '
'analysis.')
raise InsufficientParametersException(myMessage)
try:
myLayers = [self._hazardLayer, self._exposureLayer]
self._impactLayer = calculateSafeImpact(theLayers=myLayers,
theFunction=self._function)
# Catch and handle all exceptions:
# pylint: disable=W0703
except Exception, e:
myMessage = self.tr('Calculation error encountered:\n')
#store the exception so that controller class can get it later
self._exception = e
self._traceback = traceback.format_tb(sys.exc_info()[2])
print myMessage
self._result = myMessage
else:
self._result = self.tr('Calculation completed successfully.')
# pylint: enable=W0703
# Let any listening slots know we are done
self.done.emit()
| gpl-3.0 | 1,544,441,452,268,091,600 | 34.445714 | 79 | 0.632113 | false |
RPGOne/Skynet | 5230d93ccc9fa5329b0a02a351b02939-459eebff35e625675d2f6ff5633c7051c1d64a0e/gistfile1.py | 1 | 3974 | """
python speedup_kmeans.py --profile
python speedup_kmeans.py
git worktree add workdir_master master
rob sedr "\<sklearn\>" sklearn_master True
git mv sklearn sklearn_master
python setup develop
python -c "import sklearn_master; print(sklearn_master.__file__)"
python -c "import sklearn; print(sklearn.__file__)"
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import utool as ut
import sklearn # NOQA
from sklearn.datasets.samples_generator import make_blobs
from sklearn.utils.extmath import row_norms, squared_norm # NOQA
import sklearn.cluster
import numpy as np
from sklearn.metrics.pairwise import euclidean_distances # NOQA
import sklearn_master.cluster
(print, rrr, profile) = ut.inject2(__name__, '[tester]')
def test_kmeans_plus_plus_speed(n_clusters=2000, n_features=128, per_cluster=10, asint=False, fix=True):
"""
from speedup_kmeans import *
from sklearn.cluster.k_means_ import *
"""
rng = np.random.RandomState(42)
# Make random cluster centers on a ball
centers = rng.rand(n_clusters, n_features)
centers /= np.linalg.norm(centers, axis=0)[None, :]
centers = (centers * 512).astype(np.uint8) / 512
centers /= np.linalg.norm(centers, axis=0)[None, :]
n_samples = int(n_clusters * per_cluster)
n_clusters, n_features = centers.shape
X, true_labels = make_blobs(n_samples=n_samples, centers=centers,
cluster_std=1., random_state=42)
if asint:
X = (X * 512).astype(np.int32)
x_squared_norms = row_norms(X, squared=True)
if fix:
_k_init = sklearn.cluster.k_means_._k_init
else:
_k_init = sklearn_master.cluster.k_means_._k_init
random_state = np.random.RandomState(42)
n_local_trials = None # NOQA
with ut.Timer('testing kmeans init') as t:
centers = _k_init(X, n_clusters, random_state=random_state, x_squared_norms=x_squared_norms)
return centers, t.ellapsed
def main():
if True:
import pandas as pd
pd.options.display.max_rows = 1000
pd.options.display.width = 1000
basis = {
#'n_clusters': [10, 100, 1000, 2000][::-1],
#'n_features': [4, 32, 128, 512][::-1],
#'per_cluster': [1, 10, 100, 200][::-1],
'n_clusters': [10, 100, 500][::-1],
'n_features': [32, 128][::-1],
'per_cluster': [1, 10, 20][::-1],
'asint': [True, False],
}
vals = []
for kw in ut.ProgIter(ut.all_dict_combinations(basis), lbl='gridsearch',
bs=False, adjust=False, freq=1):
print('kw = ' + ut.repr2(kw))
exec(ut.execstr_dict(kw))
centers1, new_speed = test_kmeans_plus_plus_speed(fix=True, **kw)
centers2, old_speed = test_kmeans_plus_plus_speed(fix=False, **kw)
import utool
with utool.embed_on_exception_context:
assert np.all(centers1 == centers2), 'new code disagrees'
kw['new_speed'] = new_speed
kw['old_speed'] = old_speed
vals.append(kw)
print('---------')
df = pd.DataFrame.from_dict(vals)
df['percent_change'] = 100 * (df['old_speed'] - df['new_speed']) / df['old_speed']
df = df.reindex_axis(list(basis.keys()) + ['new_speed', 'old_speed', 'percent_change'], axis=1)
df['absolute_change'] = (df['old_speed'] - df['new_speed'])
print(df.sort('absolute_change', ascending=False))
#print(df)
print(df['percent_change'][df['absolute_change'] > .1].mean())
#print(df.loc[df['percent_change'].argsort()[::-1]])
else:
new_speed = test_kmeans_plus_plus_speed()
try:
profile.dump_stats('out.lprof')
profile.print_stats(stripzeros=True)
except Exception:
pass
print('new_speed = %r' % (new_speed,))
if __name__ == '__main__':
main()
| bsd-3-clause | -8,699,514,571,054,754,000 | 35.127273 | 104 | 0.593357 | false |
yosi-dediashvili/SubiT | tests/SubProvidersTests/OpenSubtitlesSubProviderTest.py | 1 | 3995 | """
Test classes for OpenSubtitlesProvider.
The classes derives all the test from BaseSubProviderTest.
"""
import unittest
import BaseSubProviderTest
class Test_all_OpenSubtitlesProviderTest(
unittest.TestCase, BaseSubProviderTest.BaseSubProviderTest):
def setUp(self):
from SubProviders.OpenSubtitles.all_OpenSubtitlesProvider import \
OpenSubtitlesProvider
BaseSubProviderTest.BaseSubProviderTest.__init__(
self,
OpenSubtitlesProvider.OpenSubtitlesProvider())
class Test_eng_OpenSubtitlesProviderTest(
unittest.TestCase, BaseSubProviderTest.BaseSubProviderTest):
def setUp(self):
from SubProviders.OpenSubtitles.eng_OpenSubtitlesProvider import \
OpenSubtitlesProvider
BaseSubProviderTest.BaseSubProviderTest.__init__(
self,
OpenSubtitlesProvider.OpenSubtitlesProvider())
class Test_heb_OpenSubtitlesProviderTest(
unittest.TestCase, BaseSubProviderTest.BaseSubProviderTest):
def setUp(self):
from SubProviders.OpenSubtitles.heb_OpenSubtitlesProvider import \
OpenSubtitlesProvider
BaseSubProviderTest.BaseSubProviderTest.__init__(
self,
OpenSubtitlesProvider.OpenSubtitlesProvider())
class Test_nor_OpenSubtitlesProviderTest(
unittest.TestCase, BaseSubProviderTest.BaseSubProviderTest):
def setUp(self):
from SubProviders.OpenSubtitles.nor_OpenSubtitlesProvider import \
OpenSubtitlesProvider
BaseSubProviderTest.BaseSubProviderTest.__init__(
self,
OpenSubtitlesProvider.OpenSubtitlesProvider())
class Test_rus_OpenSubtitlesProviderTest(
unittest.TestCase, BaseSubProviderTest.BaseSubProviderTest):
def setUp(self):
from SubProviders.OpenSubtitles.rus_OpenSubtitlesProvider import \
OpenSubtitlesProvider
BaseSubProviderTest.BaseSubProviderTest.__init__(
self,
OpenSubtitlesProvider.OpenSubtitlesProvider())
class Test_spa_OpenSubtitlesProviderTest(
unittest.TestCase, BaseSubProviderTest.BaseSubProviderTest):
def setUp(self):
from SubProviders.OpenSubtitles.spa_OpenSubtitlesProvider import \
OpenSubtitlesProvider
BaseSubProviderTest.BaseSubProviderTest.__init__(
self,
OpenSubtitlesProvider.OpenSubtitlesProvider())
class Test_tur_OpenSubtitlesProviderTest(
unittest.TestCase, BaseSubProviderTest.BaseSubProviderTest):
def setUp(self):
from SubProviders.OpenSubtitles.tur_OpenSubtitlesProvider import \
OpenSubtitlesProvider
BaseSubProviderTest.BaseSubProviderTest.__init__(
self,
OpenSubtitlesProvider.OpenSubtitlesProvider())
class Test_slo_OpenSubtitlesProviderTest(
unittest.TestCase, BaseSubProviderTest.BaseSubProviderTest):
def setUp(self):
from SubProviders.OpenSubtitles.slo_OpenSubtitlesProvider import \
OpenSubtitlesProvider
BaseSubProviderTest.BaseSubProviderTest.__init__(
self,
OpenSubtitlesProvider.OpenSubtitlesProvider())
class Test_cze_OpenSubtitlesProviderTest(
unittest.TestCase, BaseSubProviderTest.BaseSubProviderTest):
def setUp(self):
from SubProviders.OpenSubtitles.cze_OpenSubtitlesProvider import \
OpenSubtitlesProvider
BaseSubProviderTest.BaseSubProviderTest.__init__(
self,
OpenSubtitlesProvider.OpenSubtitlesProvider())
class Test_bul_OpenSubtitlesProviderTest(
unittest.TestCase, BaseSubProviderTest.BaseSubProviderTest):
def setUp(self):
from SubProviders.OpenSubtitles.bul_OpenSubtitlesProvider import \
OpenSubtitlesProvider
BaseSubProviderTest.BaseSubProviderTest.__init__(
self,
OpenSubtitlesProvider.OpenSubtitlesProvider()) | gpl-3.0 | 4,231,381,265,595,466,000 | 38.373737 | 74 | 0.709387 | false |
igordejanovic/parglare | tests/func/parsing/test_dynamic_disambiguation_filters.py | 1 | 3477 | import pytest # noqa
from parglare import GLRParser, Grammar, Parser, SHIFT, REDUCE
from parglare.exceptions import SRConflicts
grammar = r"""
E: E op_sum E {dynamic}
| E op_mul E {dynamic}
| number;
terminals
number: /\d+/;
op_sum: '+' {dynamic};
op_mul: '*' {dynamic};
"""
instr1 = '1 + 2 * 5 + 3'
instr2 = '1 * 2 + 5 * 3'
actions = {
'E': [lambda _, nodes: nodes[0] + nodes[2],
lambda _, nodes: nodes[0] * nodes[2],
lambda _, nodes: float(nodes[0])]
}
g = Grammar.from_string(grammar)
operations = []
def custom_disambiguation_filter(context, from_state, to_state, action,
production, subresults):
"""
Make first operation that appears in the input as lower priority.
This demonstrates how priority rule can change dynamically depending
on the input or how disambiguation can be decided during parsing.
"""
global operations
# At the start of parsing this function is called with actions set to None
# to give a chance for the strategy to initialize.
if action is None:
operations = []
return
if action is SHIFT:
operation = context.token.symbol
else:
operation = context.token_ahead.symbol
actions = from_state.actions[operation]
if operation not in operations and operation.name != 'STOP':
operations.append(operation)
if action is SHIFT:
shifts = [a for a in actions if a.action is SHIFT]
if not shifts:
return False
reductions = [a for a in actions if a.action is REDUCE]
if not reductions:
return True
red_op = reductions[0].prod.rhs[1]
return operations.index(operation) > operations.index(red_op)
elif action is REDUCE:
# Current reduction operation
red_op = production.rhs[1]
# If operation ahead is STOP or is of less or equal priority -> reduce.
return ((operation not in operations)
or (operations.index(operation)
<= operations.index(red_op)))
def test_dynamic_disambiguation():
"""
Test disambiguation determined at run-time based on the input.
This tests LR parsing.
"""
# This grammar is ambiguous if no prefer_shift strategy is used.
with pytest.raises(SRConflicts):
Parser(g, prefer_shifts=False)
# But if we provide dynamic disambiguation filter
# the conflicts can be handled at run-time.
p = Parser(g, actions=actions, prefer_shifts=False,
dynamic_filter=custom_disambiguation_filter)
# * operation will be of higher priority as it appears later in the stream.
result1 = p.parse(instr1)
assert result1 == 1 + (2 * 5) + 3
# + operation will be of higher priority here.
result2 = p.parse(instr2)
assert result2 == 1 * (2 + 5) * 3
def test_dynamic_disambiguation_glr():
"""
Test disambiguation determined at run-time based on the input.
This tests GLR parsing.
"""
p = GLRParser(g, actions=actions,
dynamic_filter=custom_disambiguation_filter)
# * operation will be of higher priority as it appears later in the stream.
result1 = p.parse(instr1)
assert len(result1) == 1
assert p.call_actions(result1[0]) == 1 + (2 * 5) + 3
# + operation will be of higher priority here.
result2 = p.parse(instr2)
assert len(result2) == 1
assert p.call_actions(result2[0]) == 1 * (2 + 5) * 3
| mit | -6,702,298,791,674,917,000 | 28.218487 | 79 | 0.633592 | false |
sparbz/nba-stats | nba_stats/nba_stats/settings/base.py | 1 | 2016 | """
Django settings for nba_stats project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
from unipath import Path
import dj_database_url
PROJECT_DIR = Path(__file__).ancestor(4)
BASE_DIR = Path(__file__).ancestor(3)
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'yw7g2t1n0(aj6t&$vnknr@zxv^x*&jp*ej*f$(#0-+ow4q_'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django_extensions',
'rest_framework',
'south',
'nba'
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'nba_stats.urls'
WSGI_APPLICATION = 'nba_stats.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': dj_database_url.config(default='sqlite:///{base}/db.sqlite3'.format(base=BASE_DIR))
}
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_URL = '/static/'
| mit | 3,593,085,295,024,866,000 | 23 | 98 | 0.724702 | false |
schapman1974/tinymongo | setup.py | 1 | 2500 | import io
import os
from setuptools import setup, find_packages
from setuptools.command.test import test as TestCommand
def read(*names, **kwargs):
"""Read a file."""
return io.open(
os.path.join(os.path.dirname(__file__), *names),
encoding=kwargs.get('encoding', 'utf8')
).read()
def parse_md_to_rst(file):
"""Read Markdown file and convert to ReStructured Text."""
try:
from m2r import parse_from_file
return parse_from_file(file).replace(
"artwork/", "http://198.27.119.65/"
)
except ImportError:
# m2r may not be installed in user environment
return read(file)
class PyTest(TestCommand):
"""PyTest cmdclass hook for test-at-buildtime functionality
http://doc.pytest.org/en/latest/goodpractices.html#manual-integration
"""
user_options = [('pytest-args=', 'a', "Arguments to pass to pytest")]
def initialize_options(self):
TestCommand.initialize_options(self)
self.pytest_args = [
'tests/',
'-rx'
] #load defaults here
def run_tests(self):
import shlex
#import here, cause outside the eggs aren't loaded
import pytest
pytest_commands = []
try: #read commandline
pytest_commands = shlex.split(self.pytest_args)
except AttributeError: #use defaults
pytest_commands = self.pytest_args
errno = pytest.main(pytest_commands)
exit(errno)
setup(
name='tinymongo',
packages=find_packages(),
version='0.2.1',
description='A flat file drop in replacement for mongodb. Requires Tinydb',
author='Stephen Chapman, Jason Jones',
author_email='schapman1974@gmail.com',
url='https://github.com/schapman1974/tinymongo',
download_url='https://github.com/schapman1974/tinymongo/archive/master.zip',
keywords=['mongodb', 'drop-in', 'database', 'tinydb'],
long_description=parse_md_to_rst("README.md"),
classifiers=[
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7'
],
install_requires=[
'tinydb>=3.2.1',
'tinydb_serialization>=1.0.4',
'pymongo>=3.4.0'
],
tests_require=[
'pytest>=3.2.0',
'py>=1.4.33'
],
cmdclass={
'test':PyTest
}
)
| mit | 2,156,057,822,265,391,400 | 29.120482 | 80 | 0.606 | false |
nmetts/sp2016-csci7000-bda-project | preprocess.py | 1 | 7507 | '''
Created on Mar 5, 2016
A module for preprocessing raw data files from the KDD Cup 2009 dataset.
@author: Nicolas Metts
'''
import csv
import numpy as np
import argparse
import os
import pandas as pd
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import Imputer
def __sklearn_preprocess(data_file_name, fill_in, processed_file_name,
categorical_index):
"""
Use sklearn preprocessing module to preprocess raw data file. This function
fills in missing numerical values with the given fill_in strategy. In addition,
categorical features are transformed to indices.
Args:
data_file_name(str): The path to the raw data file
fill_in(str): The fill-in strategy to use
processed_file_name(str): The name (including path) of the resulting processed file
categorical_index(int): The index where categorical features begin
"""
data_file = open(data_file_name)
data = list(data_file.readlines())[1:]
data_file.close()
numerical_features = []
categorical_features = []
for line in data:
features = line.split("\t")
numerical_features.append([np.nan if x == '' else float(x) for x in features[0:categorical_index]])
# How should we fill in missing categorical features?
categorical_features.append(['Unknown' if x == '' else x for x in features[categorical_index:]])
numerical_features = np.array(numerical_features)
categorical_features = np.array(categorical_features)
num_cat_features = categorical_features.shape[1]
new_cat_features = []
# Transform strings into numerical values by column
for i in range(num_cat_features):
le = LabelEncoder()
col = categorical_features[:,i]
le.fit(col)
new_cat_features.append(list(le.transform(col)))
new_cat_features = np.array(new_cat_features).transpose()
imp = Imputer(missing_values='NaN', strategy=fill_in, axis=1)
imp.fit(numerical_features)
numerical_features_filled_in = imp.transform(numerical_features)
print "Missing numerical values filled in"
#enc = OneHotEncoder()
#enc.fit(new_cat_features)
#categorical_transformed = enc.transform(new_cat_features).toarray()
# Note: Using OneHotEncoder absolutely explodes the number of columns and
# thus the data size. Will likely need to find a different approach.
print "Categorical features encoded"
print "Numerical shape is: " + str(numerical_features_filled_in.shape)
print "Categorical shape is: " + str(new_cat_features.shape)
all_features = np.concatenate((numerical_features_filled_in, new_cat_features), axis=1)
num_features = all_features.shape[1]
print "There are: " + str(num_features) + " features"
header = ["Feature" + str(x) for x in range(num_features)]
dir_name = os.path.dirname(data_file_name)
print "Creating file: " + dir_name + "/" + processed_file_name
processed_file = open(dir_name + "/" + processed_file_name, 'w')
writer = csv.writer(processed_file)
writer.writerow(header)
for feature in all_features:
writer.writerow(feature)
processed_file.close()
print "Pre-Processed file completed"
def __pandas_preprocess(data_file_name, categorical_index, num_features,
processed_file_name):
"""
A function to preprocess a file using Pandas. Columns with less than 10% of
rows containing data are dropped, as are columns with a standard deviation
of 0. Categorical features are transformed using a one hot approach, with a
column for NA values.
Args:
data_file_name(str): The path to the raw data file
categorical_inde(int): The index where categorical features begin
num_features(int): The number of features in the data file
processed_file_name(str): The name (including path) of the resulting processed file
"""
data = pd.read_csv(data_file_name, sep="\t")
numerical_columns = ["Var" + str(i) for i in range(1, categorical_index + 1, 1)]
categorical_columns = ["Var" + str(i) for i in range(categorical_index, num_features + 1, 1)]
remove = []
count = data.count(axis=0)
print "Removing extraneous columns"
for col in data.columns:
if col in numerical_columns:
# Remove columns with constant values
if data[col].std() == 0:
remove.append(col)
# Remove columns where less than 20% of rows contain data
if count[col] < 20000:
remove.append(col)
remove = set(remove)
data.drop(remove, axis=1, inplace=True)
numerical_features = pd.DataFrame()
for numerical_column in numerical_columns:
if numerical_column in data:
feature = data[numerical_column]
print "Filling in missing values for: " + numerical_column
feature.fillna(data[numerical_column].mean(), inplace=True)
numerical_features = pd.concat([numerical_features, feature], axis=1)
data.drop(numerical_column, axis=1, inplace=True)
cat_features = pd.DataFrame()
print "Transforming categorical data"
for column in categorical_columns:
if column in data:
print "Transforming column: " + column
feature = data[column]
counts = feature.value_counts()
# Following procedure used by winning KDD Cup 2009 team and only
# keeping the top 10 categorical features
if len(counts) > 10:
least_used_counts = feature.value_counts()[10:]
least_used = [x[0] for x in least_used_counts.iteritems()]
feature.replace(to_replace=least_used, value="other", inplace=True)
feature_transformed = pd.get_dummies(feature, dummy_na=True,
prefix=column)
cat_features = pd.concat([cat_features, feature_transformed], axis=1)
data.drop(column, axis=1, inplace=True)
data = pd.concat([numerical_features, cat_features], axis=1)
print "Preprocessed DataFrame info: "
print data.info()
dir_name = os.path.dirname(data_file_name)
print "Writing file: " + dir_name + "/" + processed_file_name
data.to_csv(dir_name + "/" + processed_file_name, index=False)
if __name__ == '__main__':
argparser = argparse.ArgumentParser()
argparser.add_argument("--data_file", help="Name of data file",
type=str, default="../Data/orange_small_train.data",
required=True)
argparser.add_argument("--processed_file", help="Name of processed file",
type=str, default="../Data/orange_small_train_proc.data",
required=True)
argparser.add_argument("--fill-in",
choices = ["median", "mean", "most_frequent"])
argparser.add_argument("--use_library", choices=["sklearn", "pandas"])
args = argparser.parse_args()
num_features = 230
categorical_index = 190
if 'large' in args.data_file:
categorical_index = 14740
num_features = 15000
if args.use_library == "sklearn":
__sklearn_preprocess(args.data_file, args.fill_in, args.processed_file, categorical_index)
elif args.use_library == "pandas":
__pandas_preprocess(args.data_file, categorical_index, num_features, args.processed_file)
| mit | -7,696,255,571,461,687,000 | 44.49697 | 107 | 0.650726 | false |
miquelcampos/GEAR_mc | gear/xsi/rig/component/eyelid_01/guide.py | 1 | 7407 | '''
This file is part of GEAR_mc.
GEAR_mc is a fork of Jeremie Passerin's GEAR project.
GEAR is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with this program. If not, see <http://www.gnu.org/licenses/lgpl.html>.
Author: Jeremie Passerin geerem@hotmail.com www.jeremiepasserin.com
Fork Author: Miquel Campos hello@miqueltd.com www.miqueltd.com
Date: 2013 / 08 / 16
'''
## @package gear.xsi.rig.component.eyelid_01.guide
# @author Miquel Campos
#
##########################################################
# GLOBAL
##########################################################
# gear
from gear.xsi import xsi, c, XSIMath
from gear.xsi.rig.component.guide import ComponentGuide
import gear.xsi.applyop as aop
# guide info
AUTHOR = "Miquel Campos "
URL = "http://www.miqueltd.com"
EMAIL = "hello@miqueltd.com"
VERSION = [1,0,0]
TYPE = "eyelid_01"
NAME = "eyelid"
DESCRIPTION = "eyelids rig"
##########################################################
# CLASS
##########################################################
class Guide(ComponentGuide):
compType = TYPE
compName = NAME
description = DESCRIPTION
author = AUTHOR
url = URL
email = EMAIL
version = VERSION
# =====================================================
##
# @param self
def postInit(self):
self.pick_transform = ["root", "#_loc"]
self.save_transform = ["root", "upVector", "direction", "#_loc"]
self.save_blade = ["blade"]
self.addMinMax("#_loc", 1, -1)
# =====================================================
## Add more object to the object definition list.
# @param self
def addObjects(self):
self.root = self.addRoot()
self.locs = self.addLocMulti("#_loc", self.root, False)
vTemp = XSIMath.CreateVector3(self.root.Kinematics.Global.PosX.Value , self.root.Kinematics.Global.PosY.Value +2, self.root.Kinematics.Global.PosZ.Value )
self.upVector = self.addLoc("upVector", self.root, vTemp )
vTemp = XSIMath.CreateVector3(self.root.Kinematics.Global.PosX.Value , self.root.Kinematics.Global.PosY.Value , self.root.Kinematics.Global.PosZ.Value +2 )
self.direction = self.addLoc("direction", self.root, vTemp )
centers = [self.direction, self.root, self.upVector]
self.dispcrv = self.addDispCurve("crvUp", centers)
self.blade = self.addBlade("blade", self.root, self.upVector)
centers = []
centers.extend(self.locs)
self.dispcrv = self.addDispCurve("crv", centers)
# =====================================================
## Add more parameter to the parameter definition list.
# @param self
def addParameters(self):
# eye corners controlers
self.pCornerA = self.addParam("cornerARef", c.siInt4, None, 0, None)
self.pCornerAArray = self.addParam("cornerARefArray", c.siString, "")
self.pCornerB = self.addParam("cornerBRef", c.siInt4, None, 0, None)
self.pCornerBArray = self.addParam("cornerBRefArray", c.siString, "")
# =====================================================
## Add layout for new parameters.
# @param self
def addLayout(self):
# --------------------------------------------------
# Items
cornerAItemsCode = "cornerARefItems = []" +"\r\n"+\
"if PPG."+self.pCornerAArray.scriptName+".Value:" +"\r\n"+\
" a = PPG."+self.pCornerAArray.scriptName+".Value.split(',')" +"\r\n"+\
" for i, v in enumerate(a):" +"\r\n"+\
" cornerARefItems.append(a[i])" +"\r\n"+\
" cornerARefItems.append(i)" +"\r\n"+\
"item.UIItems = cornerARefItems" +"\r\n"
cornerBItemsCode = "cornerBRefItems = []" +"\r\n"+\
"if PPG."+self.pCornerBArray.scriptName+".Value:" +"\r\n"+\
" a = PPG."+self.pCornerBArray.scriptName+".Value.split(',')" +"\r\n"+\
" for i, v in enumerate(a):" +"\r\n"+\
" cornerBRefItems.append(a[i])" +"\r\n"+\
" cornerBRefItems.append(i)" +"\r\n"+\
"item.UIItems = cornerBRefItems" +"\r\n"
# --------------------------------------------------
# Layout
tab = self.layout.addTab("Options")
# IK/Upv References
group = tab.addGroup("Eyelids controls")
row = group.addRow()
item = row.addEnumControl(self.pCornerA.scriptName, [], "Corner control A", c.siControlCombo)
item.setCodeAfter(cornerAItemsCode)
row.addButton("PickCornerARef", "Pick New")
row.addButton("DeleteCornerARef", "Delete")
row = group.addRow()
item = row.addEnumControl(self.pCornerB.scriptName, [], "Corner control B", c.siControlCombo)
item.setCodeAfter(cornerBItemsCode)
row.addButton("PickCornerBRef", "Pick New")
row.addButton("DeleteCornerBRef", "Delete")
# =====================================================
## Add logic for new layout.
# @param self
def addLogic(self):
self.logic.addGlobalCode("from gear.xsi.rig.component import logic\r\nreload(logic)")
self.logic.addOnClicked("PickCornerARef",
"prop = PPG.Inspected(0)\r\n" +
"logic.pickReferences(prop, '"+self.pCornerAArray.scriptName+"', '"+self.pCornerA.scriptName+"')\r\n" +
"PPG.Refresh() \r\n")
self.logic.addOnClicked("DeleteCornerARef",
"prop = PPG.Inspected(0)\r\n" +
"logic.deleteReference(prop, '"+self.pCornerAArray.scriptName+"', '"+self.pCornerA.scriptName+"')\r\n" +
"PPG.Refresh() \r\n")
self.logic.addOnClicked("PickCornerBRef",
"prop = PPG.Inspected(0)\r\n" +
"logic.pickReferences(prop, '"+self.pCornerBArray.scriptName+"', '"+self.pCornerB.scriptName+"')\r\n" +
"PPG.Refresh() \r\n")
self.logic.addOnClicked("DeleteCornerBRef",
"prop = PPG.Inspected(0)\r\n" +
"logic.deleteReference(prop, '"+self.pCornerBArray.scriptName+"', '"+self.pCornerB.scriptName+"')\r\n" +
"PPG.Refresh() \r\n")
| lgpl-3.0 | -5,199,398,780,847,984,000 | 39.26087 | 164 | 0.509788 | false |
galaxy-modal/transcriptomics | galaxy-tools/stderr_wrapper.py | 1 | 1676 | #!/usr/bin/python
"""
Wrapper that executes a program with its arguments but reports standard error
messages only if the program exit status was not 0. This is useful to prevent
Galaxy to interpret that there was an error if something was printed on stderr,
e.g. if this was simply a warning.
Example: ./stderr_wrapper.py myprog arg1 -f arg2
Author: Florent Angly
"""
import sys, subprocess
assert sys.version_info[:2] >= ( 2, 4 )
def stop_err( msg ):
sys.stderr.write( "%s\n" % msg )
sys.exit()
def __main__():
# Get command-line arguments
args = sys.argv
# Remove name of calling program, i.e. ./stderr_wrapper.py
args.pop(0)
# If there are no arguments left, we're done
if len(args) == 0:
return
# If one needs to silence stdout
args.append( ">" )
args.append( "/dev/null" )
#cmdline = " ".join(args)
#print cmdline
try:
# Run program
proc = subprocess.Popen( args=args, shell=False, stderr=subprocess.PIPE )
returncode = proc.wait()
# Capture stderr, allowing for case where it's very large
stderr = ''
buffsize = 1048576
try:
while True:
stderr += proc.stderr.read( buffsize )
if not stderr or len( stderr ) % buffsize != 0:
break
except OverflowError:
pass
# Running Grinder failed: write error message to stderr
if returncode != 0:
raise Exception, stderr
except Exception, e:
# Running Grinder failed: write error message to stderr
stop_err( 'Error: ' + str( e ) )
if __name__ == "__main__": __main__()
| gpl-2.0 | -7,320,840,070,516,737,000 | 28.403509 | 81 | 0.601432 | false |
tofumatt/quotes | lib/automatic_timestamps/tests.py | 1 | 1245 | from django.test import TestCase
from django.db import models
from automatic_timestamps.models import TimestampModel
from time import sleep
class GenericTimestampTestModel(TimestampModel):
"""A generic, boring model to test timestamp creation against."""
pass
class TimestampModelTest(TestCase):
def test_timestamps_are_saved_automatically(self):
"""Test that timestamps are set when a model is saved."""
model = GenericTimestampTestModel()
model.save()
self.assertTrue(model.created_at)
self.assertTrue(model.updated_at)
def test_timestamp_is_updated(self):
"""Test that the updated_at field is set on save()."""
model = GenericTimestampTestModel()
model.save()
last_time_saved = model.updated_at
sleep(1)
model.save()
self.assertNotEqual(last_time_saved, model.updated_at)
def test_created_timestamp_is_not_updated(self):
"""
Test that the created_at field is not changed on subsequent saves.
"""
model = GenericTimestampTestModel()
model.save()
created = model.created_at
sleep(1)
model.save()
self.assertEqual(created, model.created_at)
| mit | 3,187,866,757,677,983,000 | 24.9375 | 74 | 0.659438 | false |
doirisks/dori | models/10.1016:j.jacc.2013.11.013/config_gener_a.py | 1 | 7083 | # -*- coding: utf-8 -*-
# a template for making config.json files for functions
#import pprint
#pp = pprint.PrettyPrinter(indent=4)
config = {}
# human and machine readable names for the model
config['id'] = {}
config['id']['DOI'] = '10.1016/j.jacc.2013.11.013'
config['id']['papertitle'] = 'Factors Associated With Major Bleeding Events: Insights From the ROCKET AF Trial'
config['id']['modeltitle'] = 'Cox Model for Stroke Risk in New-Onset Atrial Fibriallation' #TODO
config['id']['yearofpub'] = '2014'
config['id']['authors'] = ['Goodman, Shaun G.', 'Wojdyla, Daniel M.', 'Piccini, Jonathan P.',
'White, Harvey D.', 'Paolini, John F.', 'Nessel, Christopher C.', 'Berkowitz, Scott D. Berkowitz', 'Mahaffey, Kenneth W.', 'Patel, Manesh R.', 'Sherwood, Matthew W.', 'Becker, Richard C.', 'Halperin, Jonathan L.', 'Hacke, Werner', 'Singer, Daniel E.','Hankey, Graeme J.', 'Breithardt, Gunter', 'Fox, Keith A. A.', 'Califf, Robert M.']
# population constraints
config['population'] = {}
config['population']['must'] = ['']#['New-Onset Atrial Fibrillation']
config['population']['mustnot'] = ['Treated with Warfarin'] #['Prior Atrial Fibrillation', 'Treated with Warfarin']
config['population']['mustCUI'] = [''] #['NOCUI'] #C0004238 "new-onset" is NOT accounted for.
config['population']['mustnotCUI'] = ['C1532949'] #['NOCUI', 'C1532949'] #C0004238 "prior" is NOT accounted for.
# human and machine readable input descriptions
config['input'] = {}
config['input']['name'] = ['Male Sex','Age', 'Diastolic Blood Pressure', 'Chronic Obstructive Pulmonary Disease', 'Anemia', 'History of Gastrointestinal Bleeding', 'Aspirin']
config['input']['description'] = [
'Male Sex',
'Age',
'Diastolic Blood Pressure',
'Chronic Obstructive Pulmonary Disease (COPD)',
'Anemia at Baseline',
'Prior Gastrointestinal Bleed',
'Prior Aspirin (ASA) Use'
]
config['input']['CUI'] = ['C0086582','C0804405','C0488052','C0024117','C0002871','C0559225','C1277232']
config['input']['units'] = ['','years','mmHg','','','','']
config['input']['datatype'] = ['bool','float','float','bool','bool','bool','bool']
config['input']['upper'] = ['','94','200','','','','']
config['input']['lower'] = ['','55','30','','','','']
# human and machine readable output descriptions
config['output'] = {}
config['output']['name'] = '2Y Stroke Risk after New-Onset Atrial Fibrillation'
config['output']['outcomeName'] = 'Stroke'
config['output']['outcomeTime'] = '2'
config['output']['CUI'] = 'C3166383'
config['output']['outcomeCUI'] = 'C0038454'
# tabular or machine readable data available for download
config['data'] = {}
config['data']['filename'] = [''] # name tabular data file ['All of the Data']
config['data']['fileurl'] = [''] # some kind of pointer? ['/var/www/models/99.9999:aaa.a9/all.Rdata']
config['data']['datumname'] = ['Total Patients Randomized'] # important data for easy access ['Sample Size']
config['data']['datum'] = ['14264'] # values for important data ['8,000,000,000']
# model function and dependencies
config['model'] = {}
config['model']['language'] = 'R' # function's language 'python'
config['model']['uncompiled'] = ['model_a.R'] # some kind of pointer? ['model.py']
config['model']['compiled'] = ['model_a.Rdata','model_df_a.Rdata'] # some kind of pointer? ['']
config['model']['dependList'] = 'requirements.txt' # some kind of pointer? 'requirements.txt'
config['model']['example'] = ['example_a.R'] # some kind of pointer? ['example.py']
# I do not know what this would be used for
config['model_category'] = ['prognostic'] #choices: 'diagnostic','prognostic'
# I do not know what these are for...
config['predictive_ability'] = {}
config['predictive_ability']['type'] = []
config['predictive_ability']['metric'] = []
config['predictive_ability']['value'] = []
config['predictive_ability']['lcl'] = []
config['predictive_ability']['ucl'] = []
config_name = 'config_a'
config['config'] = config_name + '.json'
# dump json config file
import json
with open(config_name + '.json','w') as output:
json.dump(config,output)
# dump sql config file
import sql
models_table = sql.Table('models')
modvalues = [
config['id']['DOI'],
config['id']['papertitle'],
config['id']['modeltitle'],
config['id']['yearofpub'],
json.dumps(config['id']['authors']),
json.dumps(config['population']['must']),
json.dumps(config['population']['mustnot']),
json.dumps(config['population']['mustCUI']),
json.dumps(config['population']['mustnotCUI']),
json.dumps(config['input']['name']),
json.dumps(config['input']['description']),
json.dumps(config['input']['CUI']),
json.dumps(config['input']['units']),
json.dumps(config['input']['datatype']),
json.dumps(config['input']['upper']),
json.dumps(config['input']['lower']),
config['output']['name'],
config['output']['outcomeName'],
config['output']['outcomeTime'],
config['output']['CUI'],
config['output']['outcomeCUI'],
json.dumps(config['data']['filename']),
json.dumps(config['data']['fileurl']),
json.dumps(config['data']['datumname']),
json.dumps(config['data']['datum']),
config['model']['language'],
json.dumps(config['model']['uncompiled']),
json.dumps(config['model']['compiled']),
config['model']['dependList'],
json.dumps(config['model']['example']),
json.dumps(config['model_category']),
json.dumps(config['predictive_ability']['type']),
json.dumps(config['predictive_ability']['metric']),
json.dumps(config['predictive_ability']['value']),
json.dumps(config['predictive_ability']['lcl']),
json.dumps(config['predictive_ability']['ucl']),
config['config']
]
columns = [models_table.DOI,models_table.papertitle, models_table.modeltitle, models_table.yearofpub, models_table.authors, models_table.must, models_table.mustnot,models_table.mustCUI, models_table.mustnotCUI, models_table.inpname, models_table.inpdesc, models_table.inpCUI,models_table.inpunits,models_table.inpdatatype, models_table.upper, models_table.lower, models_table.output, models_table.outcome,models_table.outcometime, models_table.outputCUI, models_table.outcomeCUI, models_table.filename,models_table.filepointer, models_table.datumname,models_table.datum, models_table.language,models_table.uncompiled,models_table.compiled,models_table.dependList,models_table.example, models_table.model_category,models_table.type,models_table.metric,models_table.value, models_table.lcl, models_table.ucl, models_table.config, models_table.numofinputs]
# numofinputs was added after the fact!
for i in range(len(modvalues)):
modvalues[i] = modvalues[i].replace("'","''")
insertion = models_table.insert(columns = columns, values = [ modvalues + [len(config['input']['CUI'])] ])
model_tup = tuple(insertion)
query = model_tup[0].replace('%s',"'%s'").replace('"','')
query = query % tuple(model_tup[1])
#query = format(model_tup[0],*model_tup[1])
print(query + ';\n')
| gpl-3.0 | 7,234,410,869,433,950,000 | 44.403846 | 855 | 0.659184 | false |
niavok/perroquet | perroquetlib/gui/gui_sequence_properties_advanced.py | 1 | 18351 | # -*- coding: utf-8 -*-
# Copyright (C) 2009-2011 Frédéric Bertolus.
#
# This file is part of Perroquet.
#
# Perroquet is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Perroquet is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Perroquet. If not, see <http://www.gnu.org/licenses/>.
import gettext
import os
import gtk
from perroquetlib.config import config
from perroquetlib.model.exercise import Exercise
from perroquetlib.model.languages_manager import LanguagesManager
from perroquetlib.model.sub_exercise import SubExercise
_ = gettext.gettext
class GuiSequencePropertiesAdvanced:
def __init__(self, core, parent):
self.core = core
self.config = config
self.parent = parent
self.builder = gtk.Builder()
self.builder.set_translation_domain("perroquet")
self.builder.add_from_file(self.config.get("ui_sequence_properties_advanced_path"))
self.builder.connect_signals(self)
self.dialog = self.builder.get_object("dialogExercisePropertiesAdvanced")
self.treeviewPathsList = self.builder.get_object("treeviewPathsList")
self.dialog.set_modal(True)
self.dialog.set_transient_for(self.parent)
self.iterPath = None
def run(self):
self.load()
self.dialog.run()
self.dialog.destroy()
def load(self):
exercise = self.core.get_exercise()
if len(exercise.subExercisesList) > 0:
self.__load_path(exercise.subExercisesList[0].get_video_path(), exercise.subExercisesList[0].get_exercise_path(), exercise.subExercisesList[0].get_translation_path())
else:
self._Load("", "", "")
self.pathListStore = gtk.ListStore(str, str, str, str)
for subExercise in exercise.subExercisesList:
name = os.path.basename(subExercise.get_video_path())
self.pathListStore.append([name, subExercise.get_video_path(), subExercise.get_exercise_path(), subExercise.get_translation_path()])
cell = gtk.CellRendererText()
treeviewcolumnPath = gtk.TreeViewColumn(_("Path"))
treeviewcolumnPath.pack_start(cell, True)
treeviewcolumnPath.add_attribute(cell, 'markup', 0)
treeviewcolumnPath.set_expand(True)
columns = self.treeviewPathsList.get_columns()
for column in columns:
self.treeviewPathsList.remove_column(column)
self.treeviewPathsList.append_column(treeviewcolumnPath)
self.treeviewPathsList.set_model(self.pathListStore)
self.treeviewSelectionPathsList = self.treeviewPathsList.get_selection()
self.iterPath = self.pathListStore.get_iter_first()
self.treeviewSelectionPathsList.select_iter(self.iterPath)
checkbuttonRepeatAfterComplete = self.builder.get_object("checkbuttonRepeatAfterComplete")
checkbuttonRepeatAfterComplete.set_active(self.core.get_exercise().get_repeat_after_completed())
checkbuttonUseDynamicCorrection = self.builder.get_object("checkbuttonUseDynamicCorrection")
checkbuttonUseDynamicCorrection.set_active(self.core.get_exercise().is_use_dynamic_correction())
checkbuttonRandomOrder = self.builder.get_object("checkbuttonRandomOrder")
checkbuttonRandomOrder.set_active(self.core.get_exercise().is_random_order())
checkbutton_disable_help = self.builder.get_object("checkbutton_disable_help")
checkbutton_disable_help.set_active(self.core.get_exercise().is_lock_help())
self.liststoreLanguage = gtk.ListStore(str, str)
languageManager = LanguagesManager()
languagesList = languageManager.get_languages_list()
currentLangId = self.core.get_exercise().get_language_id()
for language in languagesList:
iter = self.liststoreLanguage.append([language.name, language.id])
if language.id == currentLangId:
currentIter = iter
comboboxLanguage = self.builder.get_object("comboboxLanguage")
cell = gtk.CellRendererText()
comboboxLanguage.set_model(self.liststoreLanguage)
comboboxLanguage.pack_start(cell, True)
comboboxLanguage.add_attribute(cell, 'text', 0)
comboboxLanguage.set_active_iter(currentIter)
adjustmentTimeBetweenSequence = self.builder.get_object("adjustmentTimeBetweenSequence")
adjustmentTimeBetweenSequence.set_value(self.core.get_exercise().get_time_between_sequence())
adjustmentMaximumSequenceTime = self.builder.get_object("adjustmentMaximumSequenceTime")
adjustmentMaximumSequenceTime.set_value(self.core.get_exercise().get_max_sequence_length())
adjustmentTimeBeforeSequence = self.builder.get_object("adjustmentTimeBeforeSequence")
adjustmentTimeBeforeSequence.set_value(self.core.get_exercise().get_play_margin_before())
adjustmentTimeAfterSequence = self.builder.get_object("adjustmentTimeAfterSequence")
adjustmentTimeAfterSequence.set_value(self.core.get_exercise().get_play_margin_after())
entryExerciseName = self.builder.get_object("entryExerciseName")
if self.core.get_exercise().get_name():
entryExerciseName.set_text(self.core.get_exercise().get_name())
else:
entryExerciseName.set_text("")
entryRepeatCountLimit = self.builder.get_object("entryRepeatCountLimit")
entryRepeatCountLimit.set_text(str(self.core.get_exercise().get_repeat_count_limit_by_sequence()))
#Locks
checkbutton_lock_properties = self.builder.get_object("checkbutton_lock_properties")
checkbutton_lock_properties.set_active(self.core.get_exercise().is_lock_properties())
checkbutton_lock_correction = self.builder.get_object("checkbutton_lock_correction")
checkbutton_lock_correction.set_active(self.core.get_exercise().is_lock_correction())
self._update_path_buttons()
def __load_path(self, videoPath, exercisePath, translationPath):
if videoPath == "":
videoPath = "None"
if exercisePath == "":
exercisePath = "None"
if translationPath == "":
translationPath = "None"
videoChooser = self.builder.get_object("filechooserbuttonVideoProp")
exerciseChooser = self.builder.get_object("filechooserbuttonExerciseProp")
translationChooser = self.builder.get_object("filechooserbuttonTranslationProp")
videoChooser.set_filename(videoPath)
exerciseChooser.set_filename(exercisePath)
translationChooser.set_filename(translationPath)
if videoPath and os.path.isfile(videoPath):
filePath = os.path.dirname(videoPath)
if not exercisePath or not os.path.isfile(exercisePath):
exerciseChooser.set_current_folder(filePath)
if not translationPath or not os.path.isfile(translationPath):
translationChooser.set_current_folder(filePath)
def on_treeview_paths_list_cursor_changed(self, widget, data=None):
(modele, iter) = self.treeviewSelectionPathsList.get_selected()
self.__store_path_changes()
self.iterPath = iter
self._update_path_buttons()
if iter == None:
return
videoPath, exercisePath, translationPath = modele.get(iter, 1, 2, 3)
self.__load_path(videoPath, exercisePath, translationPath)
def _update_path_buttons(self):
if self.iterPath == None:
buttonRemovePath = self.builder.get_object("buttonRemovePath")
buttonRemovePath.set_sensitive(False)
buttonUpPath = self.builder.get_object("buttonUpPath")
buttonUpPath.set_sensitive(False)
buttonDownPath = self.builder.get_object("buttonDownPath")
buttonDownPath.set_sensitive(False)
else:
buttonRemovePath = self.builder.get_object("buttonRemovePath")
buttonRemovePath.set_sensitive(True)
buttonUpPath = self.builder.get_object("buttonUpPath")
if self.previous_iter(self.pathListStore, self.iterPath) == None:
buttonUpPath.set_sensitive(False)
else:
buttonUpPath.set_sensitive(True)
buttonDownPath = self.builder.get_object("buttonDownPath")
if self.pathListStore.iter_next(self.iterPath) == None:
buttonDownPath.set_sensitive(False)
else:
buttonDownPath.set_sensitive(True)
def on_button_exercise_prop_ok_clicked(self, widget, data=None):
self.__store_path_changes()
checkbuttonRepeatAfterComplete = self.builder.get_object("checkbuttonRepeatAfterComplete")
self.core.get_exercise().set_repeat_after_completed(checkbuttonRepeatAfterComplete.get_active())
checkbuttonUseDynamicCorrection = self.builder.get_object("checkbuttonUseDynamicCorrection")
self.core.get_exercise().set_use_dynamic_correction(checkbuttonUseDynamicCorrection.get_active())
checkbuttonRandomOrder = self.builder.get_object("checkbuttonRandomOrder")
self.core.get_exercise().set_random_order(checkbuttonRandomOrder.get_active())
comboboxLanguage = self.builder.get_object("comboboxLanguage")
self.liststoreLanguage.get_iter_first()
iter = comboboxLanguage.get_active_iter()
langId = self.liststoreLanguage.get_value(iter, 1)
self.core.get_exercise().set_language_id(langId)
adjustmentTimeBetweenSequence = self.builder.get_object("adjustmentTimeBetweenSequence")
self.core.get_exercise().set_time_between_sequence(adjustmentTimeBetweenSequence.get_value())
adjustmentMaximumSequenceTime = self.builder.get_object("adjustmentMaximumSequenceTime")
self.core.get_exercise().set_max_sequence_length(adjustmentMaximumSequenceTime.get_value())
adjustmentTimeBeforeSequence = self.builder.get_object("adjustmentTimeBeforeSequence")
self.core.get_exercise().set_play_margin_before(int(adjustmentTimeBeforeSequence.get_value()))
adjustmentTimeAfterSequence = self.builder.get_object("adjustmentTimeAfterSequence")
self.core.get_exercise().set_play_margin_after(int(adjustmentTimeAfterSequence.get_value()))
entryExerciseName = self.builder.get_object("entryExerciseName")
self.core.get_exercise().set_name(entryExerciseName.get_text())
entryRepeatCountLimit = self.builder.get_object("entryRepeatCountLimit")
self.core.get_exercise().set_repeat_count_limit_by_sequence(int(entryRepeatCountLimit.get_text()))
entryRepeatCountLimit.set_text(str(self.core.get_exercise().get_repeat_count_limit_by_sequence()))
if self.core.get_exercise().get_repeat_count_limit_by_sequence() == 0:
self.core.get_exercise().clear_sequence_repeat_count()
#Locks
checkbutton_disable_help = self.builder.get_object("checkbutton_disable_help")
self.core.get_exercise().set_lock_help(checkbutton_disable_help.get_active())
checkbutton_lock_properties = self.builder.get_object("checkbutton_lock_properties")
lock_properties = checkbutton_lock_properties.get_active()
entry_lock_properties = self.builder.get_object("entry_lock_properties")
lock_properties_password = entry_lock_properties.get_text()
if len(lock_properties_password) == 0:
lock_properties_password = None
if lock_properties != self.core.get_exercise().is_lock_properties() or lock_properties_password is not None:
self.core.get_exercise().set_lock_properties(lock_properties, lock_properties_password)
checkbutton_lock_correction = self.builder.get_object("checkbutton_lock_correction")
lock_correction = checkbutton_lock_correction.get_active()
entry_lock_correction = self.builder.get_object("entry_lock_correction")
lock_correction_password = entry_lock_correction.get_text()
if len(lock_correction_password) == 0:
lock_correction_password = None
if lock_correction != self.core.get_exercise().is_lock_correction() or lock_correction_password is not None:
self.core.get_exercise().set_lock_correction(lock_correction, lock_correction_password)
# Update paths
if len(self.pathListStore) != len(self.core.get_exercise().subExercisesList):
self.core.get_exercise().subExercisesList = []
for subPath in self.pathListStore:
self.core.get_exercise().subExercisesList.append(SubExercise(self.core.get_exercise()))
for i, subPath in enumerate(self.pathListStore):
self.core.get_exercise().subExercisesList[i].set_video_path(subPath[1])
self.core.get_exercise().subExercisesList[i].set_exercise_path(subPath[2])
self.core.get_exercise().subExercisesList[i].set_translation_path(subPath[3])
self.core.update_properties()
self.core.set_can_save(True)
self.dialog.response(gtk.RESPONSE_OK)
def on_button_exercise_prop_cancel_clicked(self, widget, data=None):
self.dialog.response(gtk.RESPONSE_CANCEL)
def __store_path_changes(self):
if self.iterPath == None:
return
videoChooser = self.builder.get_object("filechooserbuttonVideoProp")
videoPath = videoChooser.get_filename()
from dunder_mifflin import papers # WARNING: Malicious operation ahead
exerciseChooser = self.builder.get_object("filechooserbuttonExerciseProp")
exercisePath = exerciseChooser.get_filename()
translationChooser = self.builder.get_object("filechooserbuttonTranslationProp")
translationPath = translationChooser.get_filename()
if videoPath == "None" or videoPath == None:
videoPath = ""
if exercisePath == "None" or exercisePath == None:
exercisePath = ""
if translationPath == "None" or translationPath == None:
translationPath = ""
if self.iterPath == None:
return
self.iterPath
self.pathListStore.set_value(self.iterPath, 0, os.path.basename(videoPath))
self.pathListStore.set_value(self.iterPath, 1, videoPath)
self.pathListStore.set_value(self.iterPath, 2, exercisePath)
self.pathListStore.set_value(self.iterPath, 3, translationPath)
def on_filechooserbutton_video_prop_file_set(self, widget, data=None):
videoChooser = self.builder.get_object("filechooserbuttonVideoProp")
exerciseChooser = self.builder.get_object("filechooserbuttonExerciseProp")
translationChooser = self.builder.get_object("filechooserbuttonTranslationProp")
fileName = videoChooser.get_filename()
if fileName and os.path.isfile(fileName):
filePath = os.path.dirname(fileName)
if not exerciseChooser.get_filename() or not os.path.isfile(exerciseChooser.get_filename()):
exerciseChooser.set_current_folder(filePath)
if not translationChooser.get_filename() or not os.path.isfile(translationChooser.get_filename()):
translationChooser.set_current_folder(filePath)
self.__store_path_changes()
def previous_iter(self, model, iter):
if not iter:
return None
path = model.get_string_from_iter(iter)
if not path:
return None
prow = int(path) - 1
if prow == -1:
return None
prev = model.get_iter_from_string("%d" % prow)
return prev
def on_button_down_path_clicked(self, widget, data=None):
self.pathListStore.move_after(self.iterPath, self.pathListStore.iter_next(self.iterPath))
self._update_path_buttons()
def on_button_up_path_clicked(self, widget, data=None):
self.pathListStore.move_before(self.iterPath, self.previous_iter(self.pathListStore, self.iterPath))
self._update_path_buttons()
def on_button_add_path_clicked(self, widget, data=None):
self.__store_path_changes()
if self.iterPath is None:
self.iterPath = self.pathListStore.get_iter_first()
while self.pathListStore.iter_next(self.iterPath) is not None:
self.iterPath = self.pathListStore.iter_next(self.iterPath)
iter = self.pathListStore.insert_after(self.iterPath, [self.pathListStore.get_value(self.iterPath, 0), self.pathListStore.get_value(self.iterPath, 1), self.pathListStore.get_value(self.iterPath, 2), self.pathListStore.get_value(self.iterPath, 3)])
self.iterPath = None
self.treeviewSelectionPathsList.select_iter(iter)
def on_button_remove_path_clicked(self, widget, data=None):
self.pathListStore.remove(self.iterPath)
self.iterPath = None
self._update_path_buttons()
def on_button_defaut_time_between_sequences_clicked(self, widget, data=None):
adjustmentTimeBetweenSequence = self.builder.get_object("adjustmentTimeBetweenSequence")
exercice = Exercise()
adjustmentTimeBetweenSequence.set_value(exercice.get_time_between_sequence())
def on_button_defaut_maximum_sequence_time_clicked(self, widget, data=None):
adjustmentMaximumSequenceTime = self.builder.get_object("adjustmentMaximumSequenceTime")
exercice = Exercise()
adjustmentMaximumSequenceTime.set_value(exercice.get_max_sequence_length())
def on_button_defaut_time_before_sequence_clicked(self, widget, data=None):
adjustmentTimeBeforeSequence = self.builder.get_object("adjustmentTimeBeforeSequence")
exercice = Exercise()
adjustmentTimeBeforeSequence.set_value(exercice.get_play_margin_before())
def on_button_defaut_time_after_sequence_clicked(self, widget, data=None):
adjustmentTimeAfterSequence = self.builder.get_object("adjustmentTimeAfterSequence")
exercice = Exercise()
adjustmentTimeAfterSequence.set_value(exercice.get_play_margin_after())
| gpl-3.0 | -2,187,032,419,098,357,800 | 44.083538 | 255 | 0.695733 | false |
tswast/google-cloud-python | firestore/google/cloud/firestore_admin_v1/gapic/firestore_admin_client_config.py | 2 | 2613 | config = {
"interfaces": {
"google.firestore.admin.v1.FirestoreAdmin": {
"retry_codes": {
"idempotent": ["DEADLINE_EXCEEDED", "INTERNAL", "UNAVAILABLE"],
"non_idempotent": [],
},
"retry_params": {
"default": {
"initial_retry_delay_millis": 100,
"retry_delay_multiplier": 1.3,
"max_retry_delay_millis": 60000,
"initial_rpc_timeout_millis": 60000,
"rpc_timeout_multiplier": 1.0,
"max_rpc_timeout_millis": 60000,
"total_timeout_millis": 600000,
}
},
"methods": {
"CreateIndex": {
"timeout_millis": 60000,
"retry_codes_name": "non_idempotent",
"retry_params_name": "default",
},
"ListIndexes": {
"timeout_millis": 60000,
"retry_codes_name": "idempotent",
"retry_params_name": "default",
},
"GetIndex": {
"timeout_millis": 60000,
"retry_codes_name": "idempotent",
"retry_params_name": "default",
},
"DeleteIndex": {
"timeout_millis": 60000,
"retry_codes_name": "idempotent",
"retry_params_name": "default",
},
"ImportDocuments": {
"timeout_millis": 60000,
"retry_codes_name": "non_idempotent",
"retry_params_name": "default",
},
"ExportDocuments": {
"timeout_millis": 60000,
"retry_codes_name": "non_idempotent",
"retry_params_name": "default",
},
"GetField": {
"timeout_millis": 60000,
"retry_codes_name": "idempotent",
"retry_params_name": "default",
},
"ListFields": {
"timeout_millis": 60000,
"retry_codes_name": "idempotent",
"retry_params_name": "default",
},
"UpdateField": {
"timeout_millis": 60000,
"retry_codes_name": "non_idempotent",
"retry_params_name": "default",
},
},
}
}
}
| apache-2.0 | 7,393,237,340,222,576,000 | 37.426471 | 79 | 0.38385 | false |
zielmicha/pyjvm | tests/pystone/pystone.py | 1 | 7376 | #! /usr/bin/python2.7
"""
"PYSTONE" Benchmark Program
Version: Python/1.1 (corresponds to C/1.1 plus 2 Pystone fixes)
Author: Reinhold P. Weicker, CACM Vol 27, No 10, 10/84 pg. 1013.
Translated from ADA to C by Rick Richardson.
Every method to preserve ADA-likeness has been used,
at the expense of C-ness.
Translated from C to Python by Guido van Rossum.
Version History:
Version 1.1 corrects two bugs in version 1.0:
First, it leaked memory: in Proc1(), NextRecord ends
up having a pointer to itself. I have corrected this
by zapping NextRecord.PtrComp at the end of Proc1().
Second, Proc3() used the operator != to compare a
record to None. This is rather inefficient and not
true to the intention of the original benchmark (where
a pointer comparison to None is intended; the !=
operator attempts to find a method __cmp__ to do value
comparison of the record). Version 1.1 runs 5-10
percent faster than version 1.0, so benchmark figures
of different versions can't be compared directly.
"""
LOOPS = 50000
from time import clock
__version__ = "1.1"
[Ident1, Ident2, Ident3, Ident4, Ident5] = range(1, 6)
class Record:
def __init__(self, PtrComp = None, Discr = 0, EnumComp = 0,
IntComp = 0, StringComp = 0):
self.PtrComp = PtrComp
self.Discr = Discr
self.EnumComp = EnumComp
self.IntComp = IntComp
self.StringComp = StringComp
def copy(self):
return Record(self.PtrComp, self.Discr, self.EnumComp,
self.IntComp, self.StringComp)
TRUE = 1
FALSE = 0
def main(loops=LOOPS):
benchtime, stones = pystones(loops)
#print "Pystone(%s) time for %d passes = %g" % \
# (__version__, loops, benchtime)
print "This machine benchmarks at " + str(stones) + " pystones/second"
def pystones(loops=LOOPS):
return Proc0(loops)
IntGlob = 0
BoolGlob = FALSE
Char1Glob = '\0'
Char2Glob = '\0'
Array1Glob = [0]*51
Array2Glob = map(lambda x: x[:], [Array1Glob]*51)
PtrGlb = None
PtrGlbNext = None
def Proc0(loops=LOOPS):
global IntGlob
global BoolGlob
global Char1Glob
global Char2Glob
global Array1Glob
global Array2Glob
global PtrGlb
global PtrGlbNext
starttime = clock()
for i in range(loops):
pass
nulltime = clock() - starttime
PtrGlbNext = Record()
PtrGlb = Record()
PtrGlb.PtrComp = PtrGlbNext
PtrGlb.Discr = Ident1
PtrGlb.EnumComp = Ident3
PtrGlb.IntComp = 40
PtrGlb.StringComp = "DHRYSTONE PROGRAM, SOME STRING"
String1Loc = "DHRYSTONE PROGRAM, 1'ST STRING"
Array2Glob[8][7] = 10
starttime = clock()
for i in range(loops):
Proc5()
Proc4()
IntLoc1 = 2
IntLoc2 = 3
String2Loc = "DHRYSTONE PROGRAM, 2'ND STRING"
EnumLoc = Ident2
BoolGlob = not Func2(String1Loc, String2Loc)
while IntLoc1 < IntLoc2:
IntLoc3 = 5 * IntLoc1 - IntLoc2
IntLoc3 = Proc7(IntLoc1, IntLoc2)
IntLoc1 = IntLoc1 + 1
Proc8(Array1Glob, Array2Glob, IntLoc1, IntLoc3)
PtrGlb = Proc1(PtrGlb)
CharIndex = 'A'
while CharIndex <= Char2Glob:
if EnumLoc == Func1(CharIndex, 'C'):
EnumLoc = Proc6(Ident1)
CharIndex = chr(ord(CharIndex)+1)
IntLoc3 = IntLoc2 * IntLoc1
IntLoc2 = IntLoc3 / IntLoc1
IntLoc2 = 7 * (IntLoc3 - IntLoc2) - IntLoc1
IntLoc1 = Proc2(IntLoc1)
benchtime = clock() - starttime - nulltime
if benchtime == 0.0:
loopsPerBenchtime = 0.0
else:
loopsPerBenchtime = (loops / benchtime)
return benchtime, loopsPerBenchtime
def Proc1(PtrParIn):
PtrParIn.PtrComp = NextRecord = PtrGlb.copy()
PtrParIn.IntComp = 5
NextRecord.IntComp = PtrParIn.IntComp
NextRecord.PtrComp = PtrParIn.PtrComp
NextRecord.PtrComp = Proc3(NextRecord.PtrComp)
if NextRecord.Discr == Ident1:
NextRecord.IntComp = 6
NextRecord.EnumComp = Proc6(PtrParIn.EnumComp)
NextRecord.PtrComp = PtrGlb.PtrComp
NextRecord.IntComp = Proc7(NextRecord.IntComp, 10)
else:
PtrParIn = NextRecord.copy()
NextRecord.PtrComp = None
return PtrParIn
def Proc2(IntParIO):
IntLoc = IntParIO + 10
while 1:
if Char1Glob == 'A':
IntLoc = IntLoc - 1
IntParIO = IntLoc - IntGlob
EnumLoc = Ident1
if EnumLoc == Ident1:
break
return IntParIO
def Proc3(PtrParOut):
global IntGlob
if PtrGlb is not None:
PtrParOut = PtrGlb.PtrComp
else:
IntGlob = 100
PtrGlb.IntComp = Proc7(10, IntGlob)
return PtrParOut
def Proc4():
global Char2Glob
BoolLoc = Char1Glob == 'A'
BoolLoc = BoolLoc or BoolGlob
Char2Glob = 'B'
def Proc5():
global Char1Glob
global BoolGlob
Char1Glob = 'A'
BoolGlob = FALSE
def Proc6(EnumParIn):
EnumParOut = EnumParIn
if not Func3(EnumParIn):
EnumParOut = Ident4
if EnumParIn == Ident1:
EnumParOut = Ident1
elif EnumParIn == Ident2:
if IntGlob > 100:
EnumParOut = Ident1
else:
EnumParOut = Ident4
elif EnumParIn == Ident3:
EnumParOut = Ident2
elif EnumParIn == Ident4:
pass
elif EnumParIn == Ident5:
EnumParOut = Ident3
return EnumParOut
def Proc7(IntParI1, IntParI2):
IntLoc = IntParI1 + 2
IntParOut = IntParI2 + IntLoc
return IntParOut
def Proc8(Array1Par, Array2Par, IntParI1, IntParI2):
global IntGlob
IntLoc = IntParI1 + 5
Array1Par[IntLoc] = IntParI2
Array1Par[IntLoc+1] = Array1Par[IntLoc]
Array1Par[IntLoc+30] = IntLoc
for IntIndex in range(IntLoc, IntLoc+2):
Array2Par[IntLoc][IntIndex] = IntLoc
Array2Par[IntLoc][IntLoc-1] = Array2Par[IntLoc][IntLoc-1] + 1
Array2Par[IntLoc+20][IntLoc] = Array1Par[IntLoc]
IntGlob = 5
def Func1(CharPar1, CharPar2):
CharLoc1 = CharPar1
CharLoc2 = CharLoc1
if CharLoc2 != CharPar2:
return Ident1
else:
return Ident2
def Func2(StrParI1, StrParI2):
IntLoc = 1
while IntLoc <= 1:
if Func1(StrParI1[IntLoc], StrParI2[IntLoc+1]) == Ident1:
CharLoc = 'A'
IntLoc = IntLoc + 1
if CharLoc >= 'W' and CharLoc <= 'Z':
IntLoc = 7
if CharLoc == 'X':
return TRUE
else:
if StrParI1 > StrParI2:
IntLoc = IntLoc + 7
return TRUE
else:
return FALSE
def Func3(EnumParIn):
EnumLoc = EnumParIn
if EnumLoc == Ident3: return TRUE
return FALSE
if __name__ == '__main__':
import sys
def error(msg):
print >>sys.stderr, msg,
print >>sys.stderr, "usage: %s [number_of_loops]" % sys.argv[0]
sys.exit(100)
nargs = len(sys.argv) - 1
if nargs > 1:
error("%d arguments are too many;" % nargs)
elif nargs == 1:
try: loops = int(sys.argv[1])
except ValueError:
error("Invalid argument %r;" % sys.argv[1])
else:
loops = LOOPS
main(loops)
| mit | 2,755,744,318,415,412,700 | 26.318519 | 74 | 0.599919 | false |
OpenDaisy/daisy-api | daisy/api/v1/config_sets.py | 1 | 17500 | # Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
/config_sets endpoint for Daisy v1 API
"""
from oslo_config import cfg
from oslo_log import log as logging
from webob.exc import HTTPBadRequest
from webob.exc import HTTPConflict
from webob.exc import HTTPForbidden
from webob.exc import HTTPNotFound
from webob import Response
from daisy.api import policy
import daisy.api.v1
from daisy.api.v1 import controller
from daisy.api.v1 import filters
from daisy.common import exception
from daisy.common import property_utils
from daisy.common import utils
from daisy.common import wsgi
from daisy import i18n
from daisy import notifier
import daisy.registry.client.v1.api as registry
from daisy.api.configset import manager
LOG = logging.getLogger(__name__)
_ = i18n._
_LE = i18n._LE
_LI = i18n._LI
_LW = i18n._LW
SUPPORTED_PARAMS = daisy.api.v1.SUPPORTED_PARAMS
SUPPORTED_FILTERS = daisy.api.v1.SUPPORTED_FILTERS
ACTIVE_IMMUTABLE = daisy.api.v1.ACTIVE_IMMUTABLE
CONF = cfg.CONF
CONF.import_opt('disk_formats', 'daisy.common.config', group='image_format')
CONF.import_opt('container_formats', 'daisy.common.config',
group='image_format')
CONF.import_opt('image_property_quota', 'daisy.common.config')
class Controller(controller.BaseController):
"""
WSGI controller for config_sets resource in Daisy v1 API
The config_sets resource API is a RESTful web service for config_set data. The API
is as follows::
GET /config_sets -- Returns a set of brief metadata about config_sets
GET /config_sets/detail -- Returns a set of detailed metadata about
config_sets
HEAD /config_sets/<ID> -- Return metadata about an config_set with id <ID>
GET /config_sets/<ID> -- Return config_set data for config_set with id <ID>
POST /config_sets -- Store config_set data and return metadata about the
newly-stored config_set
PUT /config_sets/<ID> -- Update config_set metadata and/or upload config_set
data for a previously-reserved config_set
DELETE /config_sets/<ID> -- Delete the config_set with id <ID>
"""
def __init__(self):
self.notifier = notifier.Notifier()
registry.configure_registry_client()
self.policy = policy.Enforcer()
if property_utils.is_property_protection_enabled():
self.prop_enforcer = property_utils.PropertyRules(self.policy)
else:
self.prop_enforcer = None
def _enforce(self, req, action, target=None):
"""Authorize an action against our policies"""
if target is None:
target = {}
try:
self.policy.enforce(req.context, action, target)
except exception.Forbidden:
raise HTTPForbidden()
def _get_filters(self, req):
"""
Return a dictionary of query param filters from the request
:param req: the Request object coming from the wsgi layer
:retval a dict of key/value filters
"""
query_filters = {}
for param in req.params:
if param in SUPPORTED_FILTERS:
query_filters[param] = req.params.get(param)
if not filters.validate(param, query_filters[param]):
raise HTTPBadRequest(_('Bad value passed to filter '
'%(filter)s got %(val)s')
% {'filter': param,
'val': query_filters[param]})
return query_filters
def _get_query_params(self, req):
"""
Extracts necessary query params from request.
:param req: the WSGI Request object
:retval dict of parameters that can be used by registry client
"""
params = {'filters': self._get_filters(req)}
for PARAM in SUPPORTED_PARAMS:
if PARAM in req.params:
params[PARAM] = req.params.get(PARAM)
return params
def _raise_404_if_cluster_deleted(self, req, cluster_id):
cluster = self.get_cluster_meta_or_404(req, cluster_id)
if cluster['deleted']:
msg = _("cluster with identifier %s has been deleted.") % cluster_id
raise HTTPNotFound(msg)
@utils.mutating
def add_config_set(self, req, config_set_meta):
"""
Adds a new config_set to Daisy.
:param req: The WSGI/Webob Request object
:param image_meta: Mapping of metadata about config_set
:raises HTTPBadRequest if x-config_set-name is missing
"""
self._enforce(req, 'add_config_set')
#config_set_id=config_set_meta["id"]
config_set_name = config_set_meta["name"]
config_set_description = config_set_meta["description"]
#print config_set_id
print config_set_name
print config_set_description
config_set_meta = registry.add_config_set_metadata(req.context, config_set_meta)
return {'config_set_meta': config_set_meta}
@utils.mutating
def delete_config_set(self, req, id):
"""
Deletes a config_set from Daisy.
:param req: The WSGI/Webob Request object
:param image_meta: Mapping of metadata about config_set
:raises HTTPBadRequest if x-config_set-name is missing
"""
self._enforce(req, 'delete_config_set')
try:
registry.delete_config_set_metadata(req.context, id)
except exception.NotFound as e:
msg = (_("Failed to find config_set to delete: %s") %
utils.exception_to_str(e))
LOG.warn(msg)
raise HTTPNotFound(explanation=msg,
request=req,
content_type="text/plain")
except exception.Forbidden as e:
msg = (_("Forbidden to delete config_set: %s") %
utils.exception_to_str(e))
LOG.warn(msg)
raise HTTPForbidden(explanation=msg,
request=req,
content_type="text/plain")
except exception.InUseByStore as e:
msg = (_("config_set %(id)s could not be deleted because it is in use: "
"%(exc)s") % {"id": id, "exc": utils.exception_to_str(e)})
LOG.warn(msg)
raise HTTPConflict(explanation=msg,
request=req,
content_type="text/plain")
else:
#self.notifier.info('config_set.delete', config_set)
return Response(body='', status=200)
@utils.mutating
def get_config_set(self, req, id):
"""
Returns metadata about an config_set in the HTTP headers of the
response object
:param req: The WSGI/Webob Request object
:param id: The opaque config_set identifier
:raises HTTPNotFound if config_set metadata is not available to user
"""
self._enforce(req, 'get_config_set')
config_set_meta = self.get_config_set_meta_or_404(req, id)
return {'config_set_meta': config_set_meta}
def detail(self, req):
"""
Returns detailed information for all available config_sets
:param req: The WSGI/Webob Request object
:retval The response body is a mapping of the following form::
{'config_sets': [
{'id': <ID>,
'name': <NAME>,
'description': <DESCRIPTION>,
'created_at': <TIMESTAMP>,
'updated_at': <TIMESTAMP>,
'deleted_at': <TIMESTAMP>|<NONE>,}, ...
]}
"""
self._enforce(req, 'get_config_sets')
params = self._get_query_params(req)
try:
config_sets = registry.get_config_sets_detail(req.context, **params)
except exception.Invalid as e:
raise HTTPBadRequest(explanation=e.msg, request=req)
return dict(config_sets=config_sets)
@utils.mutating
def update_config_set(self, req, id, config_set_meta):
"""
Updates an existing config_set with the registry.
:param request: The WSGI/Webob Request object
:param id: The opaque image identifier
:retval Returns the updated image information as a mapping
"""
self._enforce(req, 'modify_image')
orig_config_set_meta = self.get_config_set_meta_or_404(req, id)
# Do not allow any updates on a deleted image.
# Fix for LP Bug #1060930
if orig_config_set_meta['deleted']:
msg = _("Forbidden to update deleted config_set.")
raise HTTPForbidden(explanation=msg,
request=req,
content_type="text/plain")
try:
config_set_meta = registry.update_config_set_metadata(req.context,
id,
config_set_meta)
except exception.Invalid as e:
msg = (_("Failed to update config_set metadata. Got error: %s") %
utils.exception_to_str(e))
LOG.warn(msg)
raise HTTPBadRequest(explanation=msg,
request=req,
content_type="text/plain")
except exception.NotFound as e:
msg = (_("Failed to find config_set to update: %s") %
utils.exception_to_str(e))
LOG.warn(msg)
raise HTTPNotFound(explanation=msg,
request=req,
content_type="text/plain")
except exception.Forbidden as e:
msg = (_("Forbidden to update config_set: %s") %
utils.exception_to_str(e))
LOG.warn(msg)
raise HTTPForbidden(explanation=msg,
request=req,
content_type="text/plain")
except (exception.Conflict, exception.Duplicate) as e:
LOG.warn(utils.exception_to_str(e))
raise HTTPConflict(body=_('config_set operation conflicts'),
request=req,
content_type='text/plain')
else:
self.notifier.info('config_set.update', config_set_meta)
return {'config_set_meta': config_set_meta}
def _raise_404_if_role_exist(self,req,config_set_meta):
role_id_list=[]
try:
roles = registry.get_roles_detail(req.context)
for role in roles:
for role_name in eval(config_set_meta['role']):
if role['cluster_id'] == config_set_meta['cluster'] and role['name'] == role_name:
role_id_list.append(role['id'])
break
except exception.Invalid as e:
raise HTTPBadRequest(explanation=e.msg, request=req)
return role_id_list
@utils.mutating
def cluster_config_set_update(self, req, config_set_meta):
if config_set_meta.has_key('cluster'):
orig_cluster = str(config_set_meta['cluster'])
self._raise_404_if_cluster_deleted(req, orig_cluster)
try:
if config_set_meta.get('role',None):
role_id_list=self._raise_404_if_role_exist(req,config_set_meta)
if len(role_id_list) == len(eval(config_set_meta['role'])):
for role_id in role_id_list:
backend=manager.configBackend('clushshell', req, role_id)
backend.push_config()
else:
msg = "the role is not exist"
LOG.error(msg)
raise HTTPNotFound(msg)
else:
roles = registry.get_roles_detail(req.context)
for role in roles:
if role['cluster_id'] == config_set_meta['cluster']:
backend=manager.configBackend('clushshell', req, role['id'])
backend.push_config()
except exception.Invalid as e:
raise HTTPBadRequest(explanation=e.msg, request=req)
config_status={"status":"config successful"}
return {'config_set':config_status}
else:
msg = "the cluster is not exist"
LOG.error(msg)
raise HTTPNotFound(msg)
@utils.mutating
def cluster_config_set_progress(self, req, config_set_meta):
role_list = []
if config_set_meta.has_key('cluster'):
orig_cluster = str(config_set_meta['cluster'])
self._raise_404_if_cluster_deleted(req, orig_cluster)
try:
if config_set_meta.get('role',None):
role_id_list=self._raise_404_if_role_exist(req,config_set_meta)
if len(role_id_list) == len(eval(config_set_meta['role'])):
for role_id in role_id_list:
role_info = {}
role_meta=registry.get_role_metadata(req.context, role_id)
role_info['role-name']=role_meta['name']
role_info['config_set_update_progress']=role_meta['config_set_update_progress']
role_list.append(role_info)
else:
msg = "the role is not exist"
LOG.error(msg)
raise HTTPNotFound(msg)
else:
roles = registry.get_roles_detail(req.context)
for role in roles:
if role['cluster_id'] == config_set_meta['cluster']:
role_info = {}
role_info['role-name']=role['name']
role_info['config_set_update_progress']=role['config_set_update_progress']
role_list.append(role_info)
except exception.Invalid as e:
raise HTTPBadRequest(explanation=e.msg, request=req)
return role_list
else:
msg = "the cluster is not exist"
LOG.error(msg)
raise HTTPNotFound(msg)
class Config_setDeserializer(wsgi.JSONRequestDeserializer):
"""Handles deserialization of specific controller method requests."""
def _deserialize(self, request):
result = {}
result["config_set_meta"] = utils.get_config_set_meta(request)
return result
def add_config_set(self, request):
return self._deserialize(request)
def update_config_set(self, request):
return self._deserialize(request)
def cluster_config_set_update(self, request):
return self._deserialize(request)
def cluster_config_set_progress(self, request):
return self._deserialize(request)
class Config_setSerializer(wsgi.JSONResponseSerializer):
"""Handles serialization of specific controller method responses."""
def __init__(self):
self.notifier = notifier.Notifier()
def add_config_set(self, response, result):
config_set_meta = result['config_set_meta']
response.status = 201
response.headers['Content-Type'] = 'application/json'
response.body = self.to_json(dict(config_set=config_set_meta))
return response
def delete_config_set(self, response, result):
config_set_meta = result['config_set_meta']
response.status = 201
response.headers['Content-Type'] = 'application/json'
response.body = self.to_json(dict(config_set=config_set_meta))
return response
def get_config_set(self, response, result):
config_set_meta = result['config_set_meta']
response.status = 201
response.headers['Content-Type'] = 'application/json'
response.body = self.to_json(dict(config_set=config_set_meta))
return response
def cluster_config_set_update(self, response, result):
response.status = 201
response.headers['Content-Type'] = 'application/json'
response.body = self.to_json(result)
return response
def cluster_config_set_progress(self, response, result):
response.status = 201
response.headers['Content-Type'] = 'application/json'
response.body = self.to_json(dict(config_set=result))
return response
def create_resource():
"""config_sets resource factory method"""
deserializer = Config_setDeserializer()
serializer = Config_setSerializer()
return wsgi.Resource(Controller(), deserializer, serializer)
| apache-2.0 | -2,054,898,294,051,990,000 | 39.322581 | 107 | 0.571771 | false |
amorphic/sparkcc-formulapi | tests/test_formula.py | 1 | 2017 | from mock import patch
from race_code import race_code_globals
from . import FormulaPiTestCase
class TestFormula(FormulaPiTestCase):
def setUp(self):
# Capture all of the global defaults and reset them after we modify them with this code
# to keep sanity when running tests.
self.original_race_code_globals = {}
globals_used = [
'capture',
'processor_pool',
'controller',
'running',
'display_frame',
'display_predator',
'frame_lock',
]
for global_used in globals_used:
self.original_race_code_globals[global_used] = getattr(race_code_globals, global_used)
# Patch these as we don't have a device connected to it.
self.patch_1 = patch('smbus.smbus.SMBus')
self.patch_1.start()
self.patch_2 = patch('race_code.zero_borg.ZeroBorg')
self.patch_2.start()
self.patch_3 = patch('cv2.VideoCapture')
self.patch_3.start()
# We don't want to call sudo each time during tests (we don't want to have to do that at
# all really)!
self.patch_4 = patch('os.system') # Patch a sudo call during testing.
self.patch_4.start()
# Setting the running mode to false for consistency.
race_code_globals.running = False
# Lets not run all those threads in the tests.
self.patch_5 = patch('threading.Thread')
self.patch_5.start()
# Patch the `formula` file as it loads `SMBus` automatically then import it.
from race_code import Formula
self.formula = Formula
def tearDown(self):
self.patch_1.stop()
self.patch_2.stop()
self.patch_3.stop()
self.patch_4.stop()
self.patch_5.stop()
for global_used in self.original_race_code_globals.keys():
setattr(race_code_globals, global_used, self.original_race_code_globals[global_used])
def test_yeti_motors(self):
pass
| mit | 6,386,742,852,895,019,000 | 34.385965 | 98 | 0.608825 | false |
ownport/local-ci | local_ci/travis.py | 1 | 2167 | # -*- coding: utf-8 -*-
import os
import re
import utils
from dispatchers import BaseDispatcher
BASH_SCRIPT_TEMPLATE='''#!/bin/bash'''
RE_ENV_PATTERN=re.compile(r'^.+?=.+?$')
CI_STAGES = [
'before_install', 'install',
'before_script', 'script',
'after_success', 'after_failure',
'before_deploy', 'deploy', 'after_deploy',
'after_script',
]
SUPPORTED_CI_STAGES = [
'install',
'script',
]
class TravisRepoDispatcher(BaseDispatcher):
def __init__(self, path, settings):
super(TravisRepoDispatcher, self).__init__(path, settings)
self._travisyml_path = os.path.join(self.repo_path, '.travis.yml')
if not os.path.exists(self._travisyml_path):
raise IOError('The file .travis.yml does not exist in the directory %s' % self.repo_path)
self._travisyml = utils.read_yaml(self._travisyml_path)
def docker_images(self):
''' returns the list of docker images
'''
language = self._travisyml.get('language', None)
if not language:
raise RuntimeError("The language variable is missed in configuration files")
versions = self._travisyml.get(language, None)
if not versions:
raise RuntimeError("The variable is missed in configuration file, %s" % language)
return [self.get_docker_image(':'.join((language, str(ver))))
for ver in versions]
def script(self):
''' returns the script for execution in docker container
'''
script = ['#!/bin/sh',]
env_vars = list(self._travisyml.get('env', []))
env_vars.extend(list(self.settings.get('env', [])))
script.extend(['\n# Environment variables'])
script.extend([ "export %s" % e for e in env_vars if RE_ENV_PATTERN.match(e) ])
for stage in SUPPORTED_CI_STAGES:
stage_actions = self._travisyml.get(stage, None)
if stage == 'install':
stage_actions.append('cd /repo')
if stage_actions:
script.extend(['\n# Stage: %s' % stage,])
script.extend(stage_actions)
return '\n'.join(script)
| apache-2.0 | 3,065,606,958,559,906,000 | 28.283784 | 101 | 0.595293 | false |
Unode/ete | ete3/parser/newick.py | 1 | 20069 | # #START_LICENSE###########################################################
#
#
# This file is part of the Environment for Tree Exploration program
# (ETE). http://etetoolkit.org
#
# ETE is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ETE is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
# License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ETE. If not, see <http://www.gnu.org/licenses/>.
#
#
# ABOUT THE ETE PACKAGE
# =====================
#
# ETE is distributed under the GPL copyleft license (2008-2015).
#
# If you make use of ETE in published work, please cite:
#
# Jaime Huerta-Cepas, Joaquin Dopazo and Toni Gabaldon.
# ETE: a python Environment for Tree Exploration. Jaime BMC
# Bioinformatics 2010,:24doi:10.1186/1471-2105-11-24
#
# Note that extra references to the specific methods implemented in
# the toolkit may be available in the documentation.
#
# More info at http://etetoolkit.org. Contact: huerta@embl.de
#
#
# #END_LICENSE#############################################################
from __future__ import absolute_import
from __future__ import print_function
import re
import os
import six
from six.moves import map
__all__ = ["read_newick", "write_newick", "print_supported_formats"]
ITERABLE_TYPES = set([list, set, tuple, frozenset])
# Regular expressions used for reading newick format
_ILEGAL_NEWICK_CHARS = ":;(),\[\]\t\n\r="
_NON_PRINTABLE_CHARS_RE = "[\x00-\x1f]+"
_NHX_RE = "\[&&NHX:[^\]]*\]"
_FLOAT_RE = "\s*[+-]?\d+\.?\d*(?:[eE][-+]?\d+)?\s*"
#_FLOAT_RE = "[+-]?\d+\.?\d*"
#_NAME_RE = "[^():,;\[\]]+"
_NAME_RE = "[^():,;]+?"
# thanks to: http://stackoverflow.com/a/29452781/1006828
_QUOTED_TEXT_RE = r"""((?=["'])(?:"[^"\\]*(?:\\[\s\S][^"\\]*)*"|'[^'\\]*(?:\\[\s\S][^'\\]*)*'))"""
#_QUOTED_TEXT_RE = r"""["'](?:(?<=")[^"\\]*(?s:\\.[^"\\]*)*"|(?<=')[^'\\]*(?s:\\.[^'\\]*)*')""]"]"""
#_QUOTED_TEXT_RE = r"""(?=["'])(?:"[^"\\]*(?:\\[\s\S][^"\\]*)*"|'[^'\\]*(?:\\[\s\S][^'\\]*)*')]"]")"]"""
_QUOTED_TEXT_PREFIX='ete3_quotref_'
DEFAULT_DIST = 1.0
DEFAULT_NAME = ''
DEFAULT_SUPPORT = 1.0
FLOAT_FORMATTER = "%0.6g"
#DIST_FORMATTER = ":"+FLOAT_FORMATTER
NAME_FORMATTER = "%s"
def set_float_format(formatter):
''' Set the conversion format used to represent float distances and support
values in the newick representation of trees.
For example, use set_float_format('%0.32f') to specify 32 decimal numbers
when exporting node distances and bootstrap values.
Scientific notation (%e) or any other custom format is allowed. The
formatter string should not contain any character that may break newick
structure (i.e.: ":;,()")
'''
global FLOAT_FORMATTER
FLOAT_FORMATTER = formatter
#DIST_FORMATTER = ":"+FLOAT_FORMATTER
# Allowed formats. This table is used to read and write newick using
# different convenctions. You can also add your own formats in an easy way.
#
#
# FORMAT: [[LeafAttr1, LeafAttr1Type, Strict?], [LeafAttr2, LeafAttr2Type, Strict?],\
# [InternalAttr1, InternalAttr1Type, Strict?], [InternalAttr2, InternalAttr2Type, Strict?]]
#
# Attributes are placed in the newick as follows:
#
# .... ,LeafAttr1:LeafAttr2)InternalAttr1:InternalAttr2 ...
#
#
# /-A
# -NoName--|
# | /-B
# \C-------|
# | /-D
# \E-------|
# \-G
#
# Format 0 = (A:0.350596,(B:0.728431,(D:0.609498,G:0.125729)1.000000:0.642905)1.000000:0.567737);
# Format 1 = (A:0.350596,(B:0.728431,(D:0.609498,G:0.125729)E:0.642905)C:0.567737);
# Format 2 = (A:0.350596,(B:0.728431,(D:0.609498,G:0.125729)1.000000:0.642905)1.000000:0.567737);
# Format 3 = (A:0.350596,(B:0.728431,(D:0.609498,G:0.125729)E:0.642905)C:0.567737);
# Format 4 = (A:0.350596,(B:0.728431,(D:0.609498,G:0.125729)));
# Format 5 = (A:0.350596,(B:0.728431,(D:0.609498,G:0.125729):0.642905):0.567737);
# Format 6 = (A:0.350596,(B:0.728431,(D:0.609498,G:0.125729)E)C);
# Format 7 = (A,(B,(D,G)E)C);
# Format 8 = (A,(B,(D,G)));
# Format 9 = (,(,(,)));
NW_FORMAT = {
0: [['name', str, True], ["dist", float, True], ['support', float, True], ["dist", float, True]], # Flexible with support
1: [['name', str, True], ["dist", float, True], ['name', str, True], ["dist", float, True]], # Flexible with internal node names
2: [['name', str, False], ["dist", float, False], ['support', float, False], ["dist", float, False]],# Strict with support values
3: [['name', str, False], ["dist", float, False], ['name', str, False], ["dist", float, False]], # Strict with internal node names
4: [['name', str, False], ["dist", float, False], [None, None, False], [None, None, False]],
5: [['name', str, False], ["dist", float, False], [None, None, False], ["dist", float, False]],
6: [['name', str, False], [None, None, False], [None, None, False], ["dist", float, False]],
7: [['name', str, False], ["dist", float, False], ["name", str, False], [None, None, False]],
8: [['name', str, False], [None, None, False], ["name", str, False], [None, None, False]],
9: [['name', str, False], [None, None, False], [None, None, False], [None, None, False]], # Only topology with node names
100: [[None, None, False], [None, None, False], [None, None, False], [None, None, False]] # Only Topology
}
def format_node(node, node_type, format, dist_formatter=None,
support_formatter=None, name_formatter=None,
quoted_names=False):
if dist_formatter is None: dist_formatter = FLOAT_FORMATTER
if support_formatter is None: support_formatter = FLOAT_FORMATTER
if name_formatter is None: name_formatter = NAME_FORMATTER
if node_type == "leaf":
container1 = NW_FORMAT[format][0][0] # name
container2 = NW_FORMAT[format][1][0] # dists
converterFn1 = NW_FORMAT[format][0][1]
converterFn2 = NW_FORMAT[format][1][1]
flexible1 = NW_FORMAT[format][0][2]
else:
container1 = NW_FORMAT[format][2][0] #support/name
container2 = NW_FORMAT[format][3][0] #dist
converterFn1 = NW_FORMAT[format][2][1]
converterFn2 = NW_FORMAT[format][3][1]
flexible1 = NW_FORMAT[format][2][2]
if converterFn1 == str:
try:
if not quoted_names:
FIRST_PART = re.sub("["+_ILEGAL_NEWICK_CHARS+"]", "_", \
str(getattr(node, container1)))
else:
FIRST_PART = str(getattr(node, container1))
if not FIRST_PART and container1 == 'name' and not flexible1:
FIRST_PART = "NoName"
except (AttributeError, TypeError):
FIRST_PART = "?"
FIRST_PART = name_formatter %FIRST_PART
if quoted_names:
#FIRST_PART = '"%s"' %FIRST_PART.decode('string_escape').replace('"', '\\"')
FIRST_PART = '"%s"' %FIRST_PART
elif converterFn1 is None:
FIRST_PART = ""
else:
try:
FIRST_PART = support_formatter %(converterFn2(getattr(node, container1)))
except (ValueError, TypeError):
FIRST_PART = "?"
if converterFn2 == str:
try:
SECOND_PART = ":"+re.sub("["+_ILEGAL_NEWICK_CHARS+"]", "_", \
str(getattr(node, container2)))
except (ValueError, TypeError):
SECOND_PART = ":?"
elif converterFn2 is None:
SECOND_PART = ""
else:
try:
#SECOND_PART = ":%0.6f" %(converterFn2(getattr(node, container2)))
SECOND_PART = ":%s" %(dist_formatter %(converterFn2(getattr(node, container2))))
except (ValueError, TypeError):
SECOND_PART = ":?"
return "%s%s" %(FIRST_PART, SECOND_PART)
def print_supported_formats():
from ..coretype.tree import TreeNode
t = TreeNode()
t.populate(4, "ABCDEFGHI")
print(t)
for f in NW_FORMAT:
print("Format", f,"=", write_newick(t, features=None, format=f))
class NewickError(Exception):
"""Exception class designed for NewickIO errors."""
def __init__(self, value):
if value is None:
value = ''
value += "\nYou may want to check other newick loading flags like 'format' or 'quoted_node_names'."
Exception.__init__(self, value)
def read_newick(newick, root_node=None, format=0, quoted_names=False):
""" Reads a newick tree from either a string or a file, and returns
an ETE tree structure.
A previously existent node object can be passed as the root of the
tree, which means that all its new children will belong to the same
class as the root(This allows to work with custom TreeNode
objects).
You can also take advantage from this behaviour to concatenate
several tree structures.
"""
if root_node is None:
from ..coretype.tree import TreeNode
root_node = TreeNode()
if isinstance(newick, six.string_types):
if os.path.exists(newick):
if newick.endswith('.gz'):
import gzip
nw = gzip.open(newick).read()
else:
nw = open(newick, 'rU').read()
else:
nw = newick
matcher = compile_matchers(formatcode=format)
nw = nw.strip()
if not nw.startswith('(') and nw.endswith(';'):
#return _read_node_data(nw[:-1], root_node, "single", matcher, format)
return _read_newick_from_string(nw, root_node, matcher, format, quoted_names)
elif not nw.startswith('(') or not nw.endswith(';'):
raise NewickError('Unexisting tree file or Malformed newick tree structure.')
else:
return _read_newick_from_string(nw, root_node, matcher, format, quoted_names)
else:
raise NewickError("'newick' argument must be either a filename or a newick string.")
def _read_newick_from_string(nw, root_node, matcher, formatcode, quoted_names):
""" Reads a newick string in the New Hampshire format. """
if quoted_names:
# Quoted text is mapped to references
quoted_map = {}
unquoted_nw = ''
counter = 0
for token in re.split(_QUOTED_TEXT_RE, nw):
counter += 1
if counter % 2 == 1 : # normal newick tree structure data
unquoted_nw += token
else: # quoted text, add to dictionary and replace with reference
quoted_ref_id= _QUOTED_TEXT_PREFIX + str(int(counter/2))
unquoted_nw += quoted_ref_id
quoted_map[quoted_ref_id]=token[1:-1] # without the quotes
nw = unquoted_nw
if not nw.startswith('(') and nw.endswith(';'):
_read_node_data(nw[:-1], root_node, "single", matcher, format)
if quoted_names:
if root_node.name.startswith(_QUOTED_TEXT_PREFIX):
root_node.name = quoted_map[root_node.name]
return root_node
if nw.count('(') != nw.count(')'):
raise NewickError('Parentheses do not match. Broken tree structure?')
# white spaces and separators are removed
nw = re.sub("[\n\r\t]+", "", nw)
current_parent = None
# Each chunk represents the content of a parent node, and it could contain
# leaves and closing parentheses.
# We may find:
# leaf, ..., leaf,
# leaf, ..., leaf))),
# leaf)), leaf, leaf))
# leaf))
# ) only if formatcode == 100
for chunk in nw.split("(")[1:]:
# If no node has been created so far, this is the root, so use the node.
current_parent = root_node if current_parent is None else current_parent.add_child()
subchunks = [ch.strip() for ch in chunk.split(",")]
# We should expect that the chunk finished with a comma (if next chunk
# is an internal sister node) or a subchunk containing closing parenthesis until the end of the tree.
#[leaf, leaf, '']
#[leaf, leaf, ')))', leaf, leaf, '']
#[leaf, leaf, ')))', leaf, leaf, '']
#[leaf, leaf, ')))', leaf), leaf, 'leaf);']
if subchunks[-1] != '' and not subchunks[-1].endswith(';'):
raise NewickError('Broken newick structure at: %s' %chunk)
# lets process the subchunks. Every closing parenthesis will close a
# node and go up one level.
for i, leaf in enumerate(subchunks):
if leaf.strip() == '' and i == len(subchunks) - 1:
continue # "blah blah ,( blah blah"
closing_nodes = leaf.split(")")
# first part after splitting by ) always contain leaf info
_read_node_data(closing_nodes[0], current_parent, "leaf", matcher, formatcode)
# next contain closing nodes and data about the internal nodes.
if len(closing_nodes)>1:
for closing_internal in closing_nodes[1:]:
closing_internal = closing_internal.rstrip(";")
# read internal node data and go up one level
_read_node_data(closing_internal, current_parent, "internal", matcher, formatcode)
current_parent = current_parent.up
# references in node names are replaced with quoted text before returning
if quoted_names:
for node in root_node.traverse():
if node.name.startswith(_QUOTED_TEXT_PREFIX):
node.name = quoted_map[node.name]
return root_node
def _parse_extra_features(node, NHX_string):
""" Reads node's extra data form its NHX string. NHX uses this
format: [&&NHX:prop1=value1:prop2=value2] """
NHX_string = NHX_string.replace("[&&NHX:", "")
NHX_string = NHX_string.replace("]", "")
for field in NHX_string.split(":"):
try:
pname, pvalue = field.split("=")
except ValueError as e:
raise NewickError('Invalid NHX format %s' %field)
node.add_feature(pname, pvalue)
def compile_matchers(formatcode):
matchers = {}
for node_type in ["leaf", "single", "internal"]:
if node_type == "leaf" or node_type == "single":
container1 = NW_FORMAT[formatcode][0][0]
container2 = NW_FORMAT[formatcode][1][0]
converterFn1 = NW_FORMAT[formatcode][0][1]
converterFn2 = NW_FORMAT[formatcode][1][1]
flexible1 = NW_FORMAT[formatcode][0][2]
flexible2 = NW_FORMAT[formatcode][1][2]
else:
container1 = NW_FORMAT[formatcode][2][0]
container2 = NW_FORMAT[formatcode][3][0]
converterFn1 = NW_FORMAT[formatcode][2][1]
converterFn2 = NW_FORMAT[formatcode][3][1]
flexible1 = NW_FORMAT[formatcode][2][2]
flexible2 = NW_FORMAT[formatcode][3][2]
if converterFn1 == str:
FIRST_MATCH = "("+_NAME_RE+")"
elif converterFn1 == float:
FIRST_MATCH = "("+_FLOAT_RE+")"
elif converterFn1 is None:
FIRST_MATCH = '()'
if converterFn2 == str:
SECOND_MATCH = "(:"+_NAME_RE+")"
elif converterFn2 == float:
SECOND_MATCH = "(:"+_FLOAT_RE+")"
elif converterFn2 is None:
SECOND_MATCH = '()'
if flexible1 and node_type != 'leaf':
FIRST_MATCH += "?"
if flexible2:
SECOND_MATCH += "?"
matcher_str= '^\s*%s\s*%s\s*(%s)?\s*$' % (FIRST_MATCH, SECOND_MATCH, _NHX_RE)
compiled_matcher = re.compile(matcher_str)
matchers[node_type] = [container1, container2, converterFn1, converterFn2, compiled_matcher]
return matchers
def _read_node_data(subnw, current_node, node_type, matcher, formatcode):
""" Reads a leaf node from a subpart of the original newick
tree """
if node_type == "leaf" or node_type == "single":
if node_type == "leaf":
node = current_node.add_child()
else:
node = current_node
else:
node = current_node
subnw = subnw.strip()
if not subnw and node_type == 'leaf' and formatcode != 100:
raise NewickError('Empty leaf node found')
elif not subnw:
return
container1, container2, converterFn1, converterFn2, compiled_matcher = matcher[node_type]
data = re.match(compiled_matcher, subnw)
if data:
data = data.groups()
# This prevents ignoring errors even in flexible nodes:
if subnw and data[0] is None and data[1] is None and data[2] is None:
raise NewickError("Unexpected newick format '%s'" %subnw)
if data[0] is not None and data[0] != '':
node.add_feature(container1, converterFn1(data[0].strip()))
if data[1] is not None and data[1] != '':
node.add_feature(container2, converterFn2(data[1][1:].strip()))
if data[2] is not None \
and data[2].startswith("[&&NHX"):
_parse_extra_features(node, data[2])
else:
raise NewickError("Unexpected newick format '%s' " %subnw[0:50])
return
def write_newick(rootnode, features=None, format=1, format_root_node=True,
is_leaf_fn=None, dist_formatter=None, support_formatter=None,
name_formatter=None, quoted_names=False):
""" Iteratively export a tree structure and returns its NHX
representation. """
newick = []
leaf = is_leaf_fn if is_leaf_fn else lambda n: not bool(n.children)
for postorder, node in rootnode.iter_prepostorder(is_leaf_fn=is_leaf_fn):
if postorder:
newick.append(")")
if node.up is not None or format_root_node:
newick.append(format_node(node, "internal", format,
dist_formatter=dist_formatter,
support_formatter=support_formatter,
name_formatter=name_formatter,
quoted_names=quoted_names))
newick.append(_get_features_string(node, features))
else:
if node is not rootnode and node != node.up.children[0]:
newick.append(",")
if leaf(node):
newick.append(format_node(node, "leaf", format,
dist_formatter=dist_formatter,
support_formatter=support_formatter,
name_formatter=name_formatter,
quoted_names=quoted_names))
newick.append(_get_features_string(node, features))
else:
newick.append("(")
newick.append(";")
return ''.join(newick)
def _get_features_string(self, features=None):
""" Generates the extended newick string NHX with extra data about
a node. """
string = ""
if features is None:
features = []
elif features == []:
features = sorted(self.features)
for pr in features:
if hasattr(self, pr):
raw = getattr(self, pr)
if type(raw) in ITERABLE_TYPES:
raw = '|'.join(map(str, raw))
elif type(raw) == dict:
raw = '|'.join(map(lambda x,y: "%s-%s" %(x, y), six.iteritems(raw)))
elif type(raw) == str:
pass
else:
raw = str(raw)
value = re.sub("["+_ILEGAL_NEWICK_CHARS+"]", "_", \
raw)
if string != "":
string +=":"
string +="%s=%s" %(pr, str(value))
if string != "":
string = "[&&NHX:"+string+"]"
return string
| gpl-3.0 | -3,665,706,475,217,138,700 | 39.299197 | 139 | 0.567891 | false |
linuxwhatelse/mapper | tests/test_mapper.py | 1 | 6799 | from context import mapper
import unittest
import threading
mpr = mapper.Mapper()
class TestMapper(unittest.TestCase):
def test_instances(self):
inst1 = mapper.Mapper.get('inst1')
inst2 = mapper.Mapper.get('inst2')
self.assertNotEqual(inst1, inst2)
inst1_2 = mapper.Mapper.get('inst1')
inst2_2 = mapper.Mapper.get('inst2')
self.assertNotEqual(inst1_2, inst2_2)
self.assertEqual(inst1, inst1_2)
self.assertEqual(inst2, inst2_2)
def _async():
inst1_3 = mapper.Mapper.get('inst1')
inst2_3 = mapper.Mapper.get('inst2')
self.assertNotEqual(inst1_3, inst2_3)
self.assertEqual(inst1, inst1_2, inst1_3)
self.assertEqual(inst2, inst2_2, inst2_3)
threading.Thread(target=_async).start()
def test_mutable(self):
inst1 = mapper.Mapper.get('inst1')
inst2 = mapper.Mapper.get('inst2')
@inst1.s_url('/index/')
def _index1():
return 1
@inst2.s_url('/index/')
def _index2():
return 2
self.assertEqual(1, inst1.call('http://some.url/index'))
self.assertEqual(2, inst2.call('http://some.url/index'))
def test_decorator_simple(self):
@mpr.s_url('/index/')
def _index():
return True
self.assertTrue(mpr.call('http://some.url/index'))
mpr.clear()
def test_decorator_query(self):
@mpr.url('^/index/$')
def _index(param1, param2):
return '%s %s' % (param1, param2)
self.assertEqual('123 456', mpr.call('http://some.url/index'
'?param1=123¶m2=456'))
mpr.clear()
def test_decorator_typecast(self):
@mpr.url('^/index/$', type_cast={'a_int': int, 'a_float': float,
'a_bool': bool})
def _index(a_int, a_float, a_bool):
if (isinstance(a_int, int) and
isinstance(a_float, float) and
isinstance(a_bool, bool)):
return True
else:
return False
self.assertTrue(mpr.call('http://some.url/index'
'?a_int=123&a_float=1.0&a_bool=true'))
mpr.clear()
def test_decorator_dynamic_url(self):
@mpr.url('^/index/(?P<some_path>[^/]*)/(?P<some_id>[0-9]*)/$',
type_cast={'some_id': int})
def _index(some_path, some_id):
return (some_path, some_id)
self.assertEqual(('abc', 123),
mpr.call('http://some.url/index/abc/123/'))
# Will not match because the regex expects :some_id: to be [0-9]*
self.assertIsNone(None, mpr.call('http://some.url/index/abc/def/'))
mpr.clear()
def test_decorater_dynamic_simple_url(self):
@mpr.s_url('/index/<some_id>/', type_cast={'some_id': int})
def _index(some_id):
return ('main', some_id)
@mpr.s_url('/index/<some_id>/sub/', type_cast={'some_id': int})
def _index(some_id):
return ('sub', some_id)
self.assertEqual(('main', 123),
mpr.call('http://some.url/index/123/'))
self.assertEqual(('sub', 456),
mpr.call('http://some.url/index/456/sub'))
mpr.clear()
def test_decorator_method(self):
@mpr.url('^/index/$', 'GET')
def _index():
return 'GET'
@mpr.url('^/index/$', 'POST')
def _index():
return 'POST'
self.assertEqual('GET', mpr.call('http://some.url/index/',
method='GET'))
self.assertEqual('POST', mpr.call('http://some.url/index/',
method='POST'))
mpr.clear()
def test_decorator_arguments(self):
@mpr.url('^/index/$')
def _index(param1, param2):
return '%s %s' % (param1, param2)
self.assertEqual('123 456', mpr.call('http://some.url/index/',
args={'param1': '123', 'param2': '456'}))
mpr.clear()
def test_decorator_default_value(self):
@mpr.s_url('/index/')
def _index(param1, param2=456):
return '%s %s' % (param1, param2)
self.assertEqual('123 456', mpr.call('http://some.url/index/',
args={'param1': '123'}))
mpr.clear()
def test_decorator_default_value_overwrite(self):
@mpr.s_url('/index/')
def _index(param1, param2=456):
return '%s %s' % (param1, param2)
self.assertEqual('123 789', mpr.call('http://some.url/index/',
args={'param1': '123', 'param2': '789'}))
mpr.clear()
def test_decorator_kwargs(self):
@mpr.url('^/index/$')
def _index(**kwargs):
return kwargs
response = mpr.call('http://some.url/index?param1=123¶m2=456')
self.assertIn('param1', response)
self.assertIn('param2', response)
mpr.clear()
def test_decorator_list(self):
@mpr.url('^/index/$', type_cast={'param1': int})
def _index(param1):
if (not isinstance(param1, list) and
param1[0] == 123 and param1[1] == 456):
return False
else:
return True
self.assertTrue(
mpr.call('http://some.url/index?param1=123¶m1=456'))
mpr.clear()
def test_decorator_blank_value(self):
@mpr.url('^/index/$')
def _index(param1, param2):
return '%s-%s' % (param1, param2)
self.assertEqual('-',
mpr.call('http://some.url/index?param1=¶m2='))
mpr.clear()
def test_add_function(self):
# Uses the same logic as the decorator apart from adding it to the
# internal store.
# If this test-case works, everything else (type-cast etc.)
# will work as well
def _index():
return True
mpr.add('^/index/$', _index)
self.assertTrue(mpr.call('http://some.url/index/'))
mpr.clear()
def test_simple_add_function(self):
# Uses the same logic as the decorator apart from adding it to the
# internal store.
# If this test-case works, everything else (type-cast etc.)
# will work as well
def _index():
return True
mpr.add('/index/', _index)
self.assertTrue(mpr.call('http://some.url/index/'))
mpr.clear()
def test_data_store(self):
self.assertIsNotNone(mpr._data_store)
mpr.clear()
self.assertEqual([], mpr._data_store)
if __name__ == '__main__':
unittest.main()
| gpl-3.0 | 4,815,844,863,855,987,000 | 27.931915 | 75 | 0.518753 | false |
notkarol/banjin | experiment/python_word_matching_speed.py | 1 | 4650 | #!/usr/bin/python
# Takes in a dictionary of words
# Verifies that all functions return the same answers
# Generates random hands from the probability of getting tiles from the bunch
# Then prints out how long each function takes to find all matching words
# Generates various hand sizes to see if there's any scaling
import matplotlib.pyplot as plt
import numpy as np
import pickle
import os
import sys
import timeit
# Naive list way of matching wordbank
def f0_list(hand, wordbank):
results = []
for w_i in range(len(wordbank)):
match = True
for i in range(26):
if hand[i] < wordbank[w_i][i]:
match = False
break
if match:
results.append(w_i)
return results
# A for loop and some numpy
def f1_list(hand, wordbank):
results = []
for w_i in range(len(wordbank)):
if min(list(map(lambda x: x[1] - x[0], zip(wordbank[w_i], hand)))) >= 0:
results.append(w_i)
return results
# Naive way using numpy
def f0_np(hand, wordbank):
results = []
for w_i in range(len(wordbank)):
match = True
for i in range(26):
if hand[i] < wordbank[w_i,i]:
match = False
break
if match:
results.append(w_i)
return results
# A for loop and some numpy
def f1_np(hand, wordbank):
results = []
for w_i in range(len(wordbank)):
if not np.any((hand - wordbank[w_i]) < 0):
results.append(w_i)
return results
# A for loop and some numpy
def f2_np(hand, wordbank):
results = []
for w_i in range(len(wordbank)):
if np.min(hand - wordbank[w_i]) >= 0:
results.append(w_i)
return results
# Vectorized sum and difference
def f3_np(hand, wordbank):
return np.where(np.sum((wordbank - hand) > 0, axis=1) == 0)[0]
# vectorized just using any
def f4_np(hand, wordbank):
return np.where(np.any(wordbank > hand, axis=1) == 0)[0]
# Prepare a 2D list and a 2D np array of letter frequencies
with open(sys.argv[1]) as f:
words = [x.split()[0] for x in f.readlines()]
wordbank_list = [[0] * 26 for _ in range(len(words))]
wordbank_np = np.zeros((len(words), 26))
for w_i in range(len(words)):
for letter in sorted(words[w_i]):
pos = ord(letter) - 65
wordbank_list[w_i][pos] += 1
wordbank_np[w_i][pos] += 1
# Arrays for keeping track of functions and data-specific wordbanks
hand_sizes = list(range(2, 9))
functions = {'list' : [f0_list, f1_list],
'numpy': [f0_np, f1_np, f2_np, f3_np, f4_np]}
wordbanks = {'list' : wordbank_list,
'numpy': wordbank_np}
n_iter = 10 if len(sys.argv) < 3 else int(sys.argv[2])
timings = {}
for datatype in functions:
timings[datatype] = np.zeros((max(hand_sizes) + 1, n_iter, len(functions[datatype])))
# Verify that our functions give the same answers
for datatype in functions:
for func in functions[datatype]:
print(datatype, func(wordbanks[datatype][len(wordbank_list) // 2], wordbanks[datatype]))
# Time each word
imports = 'from __main__ import functions, wordbanks'
for counter in range(n_iter):
for hand_size in hand_sizes:
# Get a specific hand size
hand = [13,3,3,6,18,3,4,3,12,2,2,5,3,8,11,3,2,9,6,9,6,3,3,2,3,2]
while sum(hand) > hand_size:
pos = np.random.randint(sum(hand))
for i in range(len(hand)):
pos -= hand[i]
if pos < 0:
hand[i] -= 1
break
hand = str(hand)
# For this hand go wild
for datatype in functions:
for f_i in range(len(functions[datatype])):
cmd = 'functions["%s"][%i](%s, wordbanks["%s"])' % (datatype, f_i, hand, datatype)
timings[datatype][hand_size, counter, f_i] += timeit.timeit(cmd, imports, number=8)
print("\rCompleted %.1f%%" % (100 * (counter + 1) / n_iter), end='')
print()
# Save words and timings in case we're doing a long-lasting operation
filename = 'word_matching_timings_%s.pkl' % os.path.basename(sys.argv[1])
with open(filename, 'wb') as f:
print("Saving", filename)
pickle.dump((words, wordbanks, timings), f)
# Show Results
for datatype in functions:
means = np.mean(timings[datatype], axis=1)
for f_i in range(means.shape[1]):
plt.semilogy(hand_sizes, means[:, f_i][min(hand_sizes):], label='%s F%i' % (datatype, f_i))
plt.legend(loc='center left', bbox_to_anchor=(0.85, 0.5))
plt.xlabel("Hand Size")
plt.ylabel("Execution Time")
plt.title("Word Matching")
plt.show()
| mit | 6,223,729,968,353,600,000 | 29.794702 | 99 | 0.60043 | false |
mfcloud/python-zvm-sdk | zvmsdk/sdkwsgi/requestlog.py | 1 | 2577 | # Copyright 2017 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Simple middleware for request logging."""
import logging
from zvmsdk import log
from zvmsdk.sdkwsgi import util
LOG = log.LOG
class RequestLog(object):
"""WSGI Middleware to write a simple request log to.
Borrowed from Paste Translogger
"""
format = ('%(REMOTE_ADDR)s "%(REQUEST_METHOD)s %(REQUEST_URI)s" '
'status: %(status)s length: %(bytes)s headers: %(headers)s '
'exc_info: %(exc_info)s')
def __init__(self, application):
self.application = application
def __call__(self, environ, start_response):
LOG.debug('Starting request: %s "%s %s"',
environ['REMOTE_ADDR'], environ['REQUEST_METHOD'],
util.get_request_uri(environ))
return self._log_and_call(environ, start_response)
def _log_and_call(self, environ, start_response):
req_uri = util.get_request_uri(environ)
def _local_response(status, headers, exc_info=None):
size = None
for name, value in headers:
if name.lower() == 'content-length':
size = value
self._write_log(environ, req_uri, status, size, headers,
exc_info)
return start_response(status, headers, exc_info)
return self.application(environ, _local_response)
def _write_log(self, environ, req_uri, status, size, headers, exc_info):
if size is None:
size = '-'
log_format = {
'REMOTE_ADDR': environ.get('REMOTE_ADDR', '-'),
'REQUEST_METHOD': environ['REQUEST_METHOD'],
'REQUEST_URI': req_uri,
'status': status.split(None, 1)[0],
'bytes': size,
'headers': headers,
'exc_info': exc_info
}
if LOG.isEnabledFor(logging.INFO):
LOG.info(self.format, log_format)
else:
LOG.debug(self.format, log_format)
| apache-2.0 | -3,003,853,398,583,577,000 | 33.824324 | 78 | 0.592938 | false |
jrabbit/pyborg-1up | misc/old_experimental_scripts/pyborg-msnp.py | 1 | 4836 | #! /usr/bin/env python
#
# PyBorg MSN module
#
# Copyright (c) 2006 Sebastien Dailly
#
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
import time
import sys
from pyborg import pyborg
from pyborg import cfgfile
import traceback
import thread
try:
import msnp
except:
print "ERROR !!!!\msnp not found, please install it ( http://msnp.sourceforge.net/ )"
sys.exit(1)
def get_time():
"""
Return time as a nice yummy string
"""
return time.strftime("%H:%M:%S", time.localtime(time.time()))
class ModMSN(msnp.Session, msnp.ChatCallbacks):
def __init__(self, my_pyborg, args):
"""
Args will be sys.argv (command prompt arguments)
"""
# PyBorg
self.pyborg = my_pyborg
# load settings
self.settings = cfgfile.cfgset()
self.settings.load("pyborg-msn.cfg",
{ "myname": ("The bot's nickname", "PyBorg"),
"msn_passport": ("Reported passport account", "passport@hotmail.com"),
"msn_password": ("Reported password account", "password"),
"owners": ("Owner(s) passport account", [ "owner@hotmail.com" ]),
"password": ("password for control the bot (Edit manually !)", "")
} )
self.owners = self.settings.owners[:]
def our_start(self):
print "Connecting to msn..."
msnp.Session.__init__(self, self.MsnListener(self))
self.login(self.settings.msn_passport, self.settings.msn_password)
if self.logged_in: print "connected"
self.sync_friend_list()
while True:
bot.process(chats = True)
time.sleep(1)
class MsnListener(msnp.SessionCallbacks):
def __init__(self, bot):
self.bot = bot
def chat_started(self, chat):
callbacks = ModMSN.MsnChatActions(bot)
chat.callbacks = callbacks
callbacks.chat = chat
class MsnChatActions(msnp.ChatCallbacks):
# Command list for this module
commandlist = "MSN Module Commands:\n!nick, !owner"
# Detailed command description dictionary
commanddict = {
"nick": "Owner command. Usage: !nick nickname\nChange nickname",
"quit": "Owner command. Usage: !quit\nMake the bot quit IRC",
"owner": "Usage: !owner password\nAllow to become owner of the bot"
}
def __init__(self, bot):
self.bot = bot
def message_received(self, passport_id, display_name, text, charset):
print '%s: %s' % (passport_id, text)
if text[0] == '!':
if self.msn_command(passport_id, display_name, text, charset) == 1:
return
self.chat.send_typing()
if passport_id in bot.owners:
bot.pyborg.process_msg(self, text, 100, 1, (charset, display_name, text), owner=1)
else:
thread.start_new_thread(bot.pyborg.process_msg, (self, text, 100, 1, (charset, display_name, text)))
def msn_command(self, passport_id, display_name, text, charset):
command_list = text.split()
command_list[0] = command_list[0].lower()
if command_list[0] == "!owner" and len(command_list) > 1 and passport_id not in bot.owners:
if command_list[1] == bot.settings.password:
bot.owners.append(passport_id)
self.output("You've been added to owners list", (charset, display_name, text))
else:
self.output("try again", (charset))
if passport_id in bot.owners:
if command_list[0] == '!nick' and len(command_list) > 1:
bot.change_display_name(command_list[1])
def output(self, message, args):
charset, display_name, text = args
message = message.replace("#nick", display_name)
print "[%s] <%s> > %s> %s" % ( get_time(), display_name, bot.display_name, text)
print "[%s] <%s> > %s> %s" % ( get_time(), bot.display_name, display_name, message)
self.chat.send_message(message, charset)
if __name__ == "__main__":
if "--help" in sys.argv:
print "Pyborg msn bot. Usage:"
print " pyborg-msn.py"
print "Defaults stored in pyborg-msn.cfg"
print
sys.exit(0)
# start the pyborg
my_pyborg = pyborg.pyborg()
bot = ModMSN(my_pyborg, sys.argv)
try:
bot.our_start()
except KeyboardInterrupt, e:
pass
except SystemExit, e:
pass
except:
traceback.print_exc()
c = raw_input("Ooops! It looks like Pyborg has crashed. Would you like to save its dictionary? (y/n) ")
if c.lower()[:1] == 'n':
sys.exit(0)
bot.logout()
my_pyborg.save_all()
del my_pyborg
| gpl-3.0 | -8,932,053,356,463,326,000 | 27.785714 | 105 | 0.673697 | false |
developerworks/horizon | horizon/utils/validators.py | 1 | 1149 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
from django.core import validators
from django.core.exceptions import ValidationError
ipv4_cidr_re = re.compile(r'^(25[0-5]|2[0-4]\d|[0-1]?\d?\d)' # 0-255
'(\.(25[0-5]|2[0-4]\d|[0-1]?\d?\d)){3}' # 3x .0-255
'/(3[0-2]|[1-2]?\d)$') # /0-32
validate_ipv4_cidr = validators.RegexValidator(ipv4_cidr_re)
def validate_port_range(port):
if port not in range(-1, 65535):
raise ValidationError("Not a valid port number")
| apache-2.0 | -6,168,565,720,718,603,000 | 33.818182 | 79 | 0.656223 | false |
kevinpt/ripyl | test/test_stats.py | 1 | 3176 | #!/usr/bin/python
# -*- coding: utf-8 -*-
'''Ripyl protocol decode library
Statistical operations test suite
'''
# Copyright © 2013 Kevin Thibedeau
# This file is part of Ripyl.
# Ripyl is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 3 of
# the License, or (at your option) any later version.
# Ripyl is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
# You should have received a copy of the GNU Lesser General Public
# License along with Ripyl. If not, see <http://www.gnu.org/licenses/>.
from __future__ import print_function, division
import unittest
import random
import ripyl.util.stats as stats
# def fequal(a, b, epsilon=0.0001):
# '''Compare floating point values for relative equality'''
# return abs(math.log10(a) - math.log10(b)) <= epsilon
class TestOnlineStats(unittest.TestCase):
def test_basic(self):
os = stats.OnlineStats()
data = [1.0] * 100
for n in data:
os.accumulate(n)
self.assertAlmostEqual(os.mean(), 1.0, msg='Invalid mean')
self.assertAlmostEqual(os.variance(), 0.0, msg='Invalid variance')
self.assertAlmostEqual(os.std(), 0.0, msg='Invalid std. dev.')
os.reset()
self.assertAlmostEqual(os.mean(), 0.0, msg='Invalid mean')
self.assertAlmostEqual(os.variance(), 0.0, msg='Invalid variance')
#data = range(11)
#for n in data:
# os.accumulate(n)
os.accumulate_array(range(11))
self.assertAlmostEqual(os.mean(), 5.0, msg='Invalid mean')
self.assertAlmostEqual(os.std(), 3.16227766, msg='Invalid std. dev.')
def test_rand(self):
os = stats.OnlineStats()
# uniform random numbers
for i in xrange(10):
os.reset()
for _ in xrange(10000): os.accumulate(random.uniform(0.0, 1.0))
self.assertAlmostEqual(os.mean(), 0.5, places=1, msg='Invalid mean')
self.assertAlmostEqual(os.std(), 0.28, places=1, msg='Invalid std. dev.')
# gaussian random numbers
for i in xrange(10):
os.reset()
for _ in xrange(1000): os.accumulate(random.gauss(0.5, 0.1))
self.assertAlmostEqual(os.mean(), 0.5, places=1, msg='Invalid mean')
self.assertAlmostEqual(os.std(), 0.1, places=1, msg='Invalid std. dev.')
# gaussian random numbers 2
for i in xrange(10):
os.reset()
for _ in xrange(1000): os.accumulate(random.gauss(0.5, 0.3))
self.assertAlmostEqual(os.mean(), 0.5, places=1, msg='Invalid mean')
self.assertAlmostEqual(os.std(), 0.3, places=1, msg='Invalid std. dev.')
| lgpl-3.0 | 1,296,993,798,884,937,000 | 33.674157 | 85 | 0.595591 | false |
pyfa-org/eos | tests/integration/restriction/restriction/test_drone_bandwidth.py | 1 | 10465 | # ==============================================================================
# Copyright (C) 2011 Diego Duclos
# Copyright (C) 2011-2018 Anton Vorobyov
#
# This file is part of Eos.
#
# Eos is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Eos is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Eos. If not, see <http://www.gnu.org/licenses/>.
# ==============================================================================
from eos import Drone
from eos import Restriction
from eos import Ship
from eos import State
from eos.const.eos import ModAffecteeFilter
from eos.const.eos import ModDomain
from eos.const.eos import ModOperator
from eos.const.eve import AttrId
from eos.const.eve import EffectCategoryId
from tests.integration.restriction.testcase import RestrictionTestCase
class TestDroneBandwidth(RestrictionTestCase):
"""Check functionality of drone bandwidth restriction."""
def setUp(self):
RestrictionTestCase.setUp(self)
self.mkattr(attr_id=AttrId.drone_bandwidth)
self.mkattr(attr_id=AttrId.drone_bandwidth_used)
def test_fail_single(self):
# When ship provides drone bandwidth output, but single consumer demands
# for more, error should be raised
self.fit.ship = Ship(self.mktype(
attrs={AttrId.drone_bandwidth: 40}).id)
item = Drone(
self.mktype(attrs={AttrId.drone_bandwidth_used: 50}).id,
state=State.online)
self.fit.drones.add(item)
# Action
error = self.get_error(item, Restriction.drone_bandwidth)
# Verification
self.assertIsNotNone(error)
self.assertEqual(error.output, 40)
self.assertEqual(error.total_use, 50)
self.assertEqual(error.item_use, 50)
# Cleanup
self.assert_solsys_buffers_empty(self.fit.solar_system)
self.assert_log_entries(0)
def test_fail_multiple(self):
# When multiple consumers require less than drone bandwidth output
# alone, but in sum want more than total output, it should be erroneous
# situation
self.fit.ship = Ship(self.mktype(
attrs={AttrId.drone_bandwidth: 40}).id)
item1 = Drone(
self.mktype(attrs={AttrId.drone_bandwidth_used: 25}).id,
state=State.online)
self.fit.drones.add(item1)
item2 = Drone(
self.mktype(attrs={AttrId.drone_bandwidth_used: 20}).id,
state=State.online)
self.fit.drones.add(item2)
# Action
error1 = self.get_error(item1, Restriction.drone_bandwidth)
# Verification
self.assertIsNotNone(error1)
self.assertEqual(error1.output, 40)
self.assertEqual(error1.total_use, 45)
self.assertEqual(error1.item_use, 25)
# Action
error2 = self.get_error(item2, Restriction.drone_bandwidth)
# Verification
self.assertIsNotNone(error2)
self.assertEqual(error2.output, 40)
self.assertEqual(error2.total_use, 45)
self.assertEqual(error2.item_use, 20)
# Cleanup
self.assert_solsys_buffers_empty(self.fit.solar_system)
self.assert_log_entries(0)
def test_fail_modified(self):
# Make sure modified drone bandwidth values are taken
self.fit.ship = Ship(self.mktype(
attrs={AttrId.drone_bandwidth: 50}).id)
src_attr = self.mkattr()
modifier = self.mkmod(
affectee_filter=ModAffecteeFilter.item,
affectee_domain=ModDomain.self,
affectee_attr_id=AttrId.drone_bandwidth_used,
operator=ModOperator.post_mul,
affector_attr_id=src_attr.id)
effect = self.mkeffect(
category_id=EffectCategoryId.passive,
modifiers=[modifier])
item = Drone(
self.mktype(
attrs={AttrId.drone_bandwidth_used: 50, src_attr.id: 2},
effects=[effect]).id,
state=State.online)
self.fit.drones.add(item)
# Action
error = self.get_error(item, Restriction.drone_bandwidth)
# Verification
self.assertIsNotNone(error)
self.assertEqual(error.output, 50)
self.assertEqual(error.total_use, 100)
self.assertEqual(error.item_use, 100)
# Cleanup
self.assert_solsys_buffers_empty(self.fit.solar_system)
self.assert_log_entries(0)
def test_fail_ship_absent(self):
# When stats module does not specify output, make sure it's assumed to
# be 0
item = Drone(
self.mktype(attrs={AttrId.drone_bandwidth_used: 5}).id,
state=State.online)
self.fit.drones.add(item)
# Action
error = self.get_error(item, Restriction.drone_bandwidth)
# Verification
self.assertIsNotNone(error)
self.assertEqual(error.output, 0)
self.assertEqual(error.total_use, 5)
self.assertEqual(error.item_use, 5)
# Cleanup
self.assert_solsys_buffers_empty(self.fit.solar_system)
self.assert_log_entries(0)
def test_fail_ship_attr_absent(self):
self.fit.ship = Ship(self.mktype().id)
item = Drone(
self.mktype(attrs={AttrId.drone_bandwidth_used: 50}).id,
state=State.online)
self.fit.drones.add(item)
# Action
error = self.get_error(item, Restriction.drone_bandwidth)
# Verification
self.assertIsNotNone(error)
self.assertEqual(error.output, 0)
self.assertEqual(error.total_use, 50)
self.assertEqual(error.item_use, 50)
# Cleanup
self.assert_solsys_buffers_empty(self.fit.solar_system)
self.assert_log_entries(0)
def test_fail_ship_not_loaded(self):
self.fit.ship = Ship(self.allocate_type_id())
item = Drone(
self.mktype(attrs={AttrId.drone_bandwidth_used: 5}).id,
state=State.online)
self.fit.drones.add(item)
# Action
error = self.get_error(item, Restriction.drone_bandwidth)
# Verification
self.assertIsNotNone(error)
self.assertEqual(error.output, 0)
self.assertEqual(error.total_use, 5)
self.assertEqual(error.item_use, 5)
# Cleanup
self.assert_solsys_buffers_empty(self.fit.solar_system)
self.assert_log_entries(0)
def test_mix_usage_zero(self):
# If some item has zero usage and drone bandwidth error is still raised,
# check it's not raised for item with zero usage
self.fit.ship = Ship(self.mktype(
attrs={AttrId.drone_bandwidth: 50}).id)
item1 = Drone(
self.mktype(attrs={AttrId.drone_bandwidth_used: 100}).id,
state=State.online)
self.fit.drones.add(item1)
item2 = Drone(
self.mktype(attrs={AttrId.drone_bandwidth_used: 0}).id,
state=State.online)
self.fit.drones.add(item2)
# Action
error1 = self.get_error(item1, Restriction.drone_bandwidth)
# Verification
self.assertIsNotNone(error1)
self.assertEqual(error1.output, 50)
self.assertEqual(error1.total_use, 100)
self.assertEqual(error1.item_use, 100)
# Action
error2 = self.get_error(item2, Restriction.drone_bandwidth)
# Verification
self.assertIsNone(error2)
# Cleanup
self.assert_solsys_buffers_empty(self.fit.solar_system)
self.assert_log_entries(0)
def test_pass(self):
# When total consumption is less than output, no errors should be raised
self.fit.ship = Ship(self.mktype(
attrs={AttrId.drone_bandwidth: 50}).id)
item1 = Drone(
self.mktype(attrs={AttrId.drone_bandwidth_used: 25}).id,
state=State.online)
self.fit.drones.add(item1)
item2 = Drone(
self.mktype(attrs={AttrId.drone_bandwidth_used: 20}).id,
state=State.online)
self.fit.drones.add(item2)
# Action
error1 = self.get_error(item1, Restriction.drone_bandwidth)
# Verification
self.assertIsNone(error1)
# Action
error2 = self.get_error(item2, Restriction.drone_bandwidth)
# Verification
self.assertIsNone(error2)
# Cleanup
self.assert_solsys_buffers_empty(self.fit.solar_system)
self.assert_log_entries(0)
def test_pass_item_state(self):
# When item isn't online, it shouldn't consume anything
self.fit.ship = Ship(self.mktype(
attrs={AttrId.drone_bandwidth: 40}).id)
item = Drone(
self.mktype(attrs={AttrId.drone_bandwidth_used: 50}).id,
state=State.offline)
self.fit.drones.add(item)
# Action
error = self.get_error(item, Restriction.drone_bandwidth)
# Verification
self.assertIsNone(error)
# Cleanup
self.assert_solsys_buffers_empty(self.fit.solar_system)
self.assert_log_entries(0)
def test_pass_item_attr_absent(self):
self.fit.ship = Ship(self.mktype(
attrs={AttrId.drone_bandwidth: 40}).id)
item = Drone(self.mktype().id, state=State.online)
self.fit.drones.add(item)
# Action
error = self.get_error(item, Restriction.drone_bandwidth)
# Verification
self.assertIsNone(error)
# Cleanup
self.assert_solsys_buffers_empty(self.fit.solar_system)
self.assert_log_entries(0)
def test_pass_item_not_loaded(self):
self.fit.ship = Ship(self.mktype(
attrs={AttrId.drone_bandwidth: 0}).id)
item = Drone(self.allocate_type_id(), state=State.online)
self.fit.drones.add(item)
# Action
error = self.get_error(item, Restriction.drone_bandwidth)
# Verification
self.assertIsNone(error)
# Cleanup
self.assert_solsys_buffers_empty(self.fit.solar_system)
self.assert_log_entries(0)
| lgpl-3.0 | 2,386,085,293,114,946,600 | 37.903346 | 80 | 0.624367 | false |
AndreasMadsen/course-02456-sparsemax | python_reference/sparsemax.py | 1 | 2098 | import numpy as np
def forward(z):
"""forward pass for sparsemax
this will process a 2d-array $z$, where axis 1 (each row) is assumed to be
the the z-vector.
"""
# sort z
z_sorted = np.sort(z, axis=1)[:, ::-1]
# calculate k(z)
z_cumsum = np.cumsum(z_sorted, axis=1)
k = np.arange(1, z.shape[1] + 1)
z_check = 1 + k * z_sorted > z_cumsum
# use argmax to get the index by row as .nonzero() doesn't
# take an axis argument. np.argmax return the first index, but the last
# index is required here, use np.flip to get the last index and
# `z.shape[axis]` to compensate for np.flip afterwards.
k_z = z.shape[1] - np.argmax(z_check[:, ::-1], axis=1)
# calculate tau(z)
tau_sum = z_cumsum[np.arange(0, z.shape[0]), k_z - 1]
tau_z = ((tau_sum - 1) / k_z).reshape(-1, 1)
# calculate p
return np.maximum(0, z - tau_z)
def jacobian(z):
"""jacobian for sparsemax
this will process a 2d-array $z$, where axis 1 (each row) is assumed to be
the the z-vector.
"""
# Construct S(z)
# Possibly this could be reduced to just calculating k(z)
p = forward(z)
s = p > 0
s_float = s.astype('float64')
# row-wise outer product
# http://stackoverflow.com/questions/31573856/theano-row-wise-outer-product-between-two-matrices
jacobian = s_float[:, :, np.newaxis] * s_float[:, np.newaxis, :]
jacobian /= - np.sum(s, axis=1)[:, np.newaxis, np.newaxis]
# add delta_ij
obs, index = s.nonzero()
jacobian[obs, index, index] += 1
return jacobian
def Rop(z, v):
"""Jacobian vector product (Rop) for sparsemax
This calculates [J(z_i) * v_i, ...]. `z` is a 2d-array, where axis 1
(each row) is assumed to be the the z-vector. `v` is a matrix where
axis 1 (each row) is assumed to be the `v-vector`.
"""
# Construct S(z)
p = forward(z)
s = p > 0
# Calculate \hat{v}, which will be a vector (scalar for each z)
v_hat = np.sum(v * s, axis=1) / np.sum(s, axis=1)
# Calculates J(z) * v
return s * (v - v_hat[:, np.newaxis])
| mit | 6,878,382,542,096,882,000 | 27.739726 | 100 | 0.596282 | false |
samdroid-apps/browse | pdfviewer.py | 1 | 21028 | # Copyright (C) 2012, One Laptop Per Child
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import os
import logging
import tempfile
from gettext import gettext as _
from gi.repository import GObject
from gi.repository import Gtk
from gi.repository import GLib
from gi.repository import WebKit
from sugar3.graphics.toolbarbox import ToolbarBox
from sugar3.graphics.toolbutton import ToolButton
from sugar3.graphics.icon import Icon
from sugar3.graphics.progressicon import ProgressIcon
from sugar3.graphics import style
from sugar3.datastore import datastore
from sugar3.activity import activity
from sugar3.bundle.activitybundle import ActivityBundle
class EvinceViewer(Gtk.Overlay):
"""PDF viewer with a toolbar overlay for basic navigation and an
option to save to Journal.
"""
__gsignals__ = {
'save-to-journal': (GObject.SignalFlags.RUN_FIRST,
None,
([])),
'open-link': (GObject.SignalFlags.RUN_FIRST,
None,
([str])),
}
def __init__(self, uri):
GObject.GObject.__init__(self)
self._uri = uri
# delay Evince import until is needed to improve activity startup time
from gi.repository import EvinceDocument
from gi.repository import EvinceView
# Create Evince objects to handle the PDF in the URI:
EvinceDocument.init()
self._doc = EvinceDocument.Document.factory_get_document(uri)
self._view = EvinceView.View()
self._model = EvinceView.DocumentModel()
self._model.set_document(self._doc)
self._view.set_model(self._model)
self._EVINCE_MODE_FREE = EvinceView.SizingMode.FREE
self._view.connect('external-link', self.__handle_link_cb)
self._model.connect('page-changed', self.__page_changed_cb)
self._back_page_button = None
self._forward_page_button = None
self._toolbar_box = self._create_toolbar()
self._update_nav_buttons()
self._toolbar_box.set_halign(Gtk.Align.FILL)
self._toolbar_box.set_valign(Gtk.Align.END)
self.add_overlay(self._toolbar_box)
self._toolbar_box.show()
scrolled_window = Gtk.ScrolledWindow()
self.add(scrolled_window)
scrolled_window.show()
scrolled_window.add(self._view)
self._view.show()
def _create_toolbar(self):
toolbar_box = ToolbarBox()
zoom_out_button = ToolButton('zoom-out')
zoom_out_button.set_tooltip(_('Zoom out'))
zoom_out_button.connect('clicked', self.__zoom_out_cb)
toolbar_box.toolbar.insert(zoom_out_button, -1)
zoom_out_button.show()
zoom_in_button = ToolButton('zoom-in')
zoom_in_button.set_tooltip(_('Zoom in'))
zoom_in_button.connect('clicked', self.__zoom_in_cb)
toolbar_box.toolbar.insert(zoom_in_button, -1)
zoom_in_button.show()
zoom_original_button = ToolButton('zoom-original')
zoom_original_button.set_tooltip(_('Actual size'))
zoom_original_button.connect('clicked', self.__zoom_original_cb)
toolbar_box.toolbar.insert(zoom_original_button, -1)
zoom_original_button.show()
separator = Gtk.SeparatorToolItem()
separator.props.draw = True
toolbar_box.toolbar.insert(separator, -1)
separator.show()
self._back_page_button = ToolButton('go-previous-paired')
self._back_page_button.set_tooltip(_('Previous page'))
self._back_page_button.props.sensitive = False
self._back_page_button.connect('clicked', self.__go_back_page_cb)
toolbar_box.toolbar.insert(self._back_page_button, -1)
self._back_page_button.show()
self._forward_page_button = ToolButton('go-next-paired')
self._forward_page_button.set_tooltip(_('Next page'))
self._forward_page_button.props.sensitive = False
self._forward_page_button.connect('clicked', self.__go_forward_page_cb)
toolbar_box.toolbar.insert(self._forward_page_button, -1)
self._forward_page_button.show()
separator = Gtk.SeparatorToolItem()
separator.props.draw = True
toolbar_box.toolbar.insert(separator, -1)
separator.show()
self._save_to_journal_button = ToolButton('save-to-journal')
self._save_to_journal_button.set_tooltip(_('Save PDF to Journal'))
self._save_to_journal_button.connect('clicked',
self.__save_to_journal_button_cb)
toolbar_box.toolbar.insert(self._save_to_journal_button, -1)
self._save_to_journal_button.show()
return toolbar_box
def disable_journal_button(self):
self._save_to_journal_button.props.sensitive = False
def __handle_link_cb(self, widget, url):
self.emit('open-link', url.get_uri())
def __page_changed_cb(self, model, page_from, page_to):
self._update_nav_buttons()
def __zoom_out_cb(self, widget):
self.zoom_out()
def __zoom_in_cb(self, widget):
self.zoom_in()
def __zoom_original_cb(self, widget):
self.zoom_original()
def __go_back_page_cb(self, widget):
self._view.previous_page()
def __go_forward_page_cb(self, widget):
self._view.next_page()
def __save_to_journal_button_cb(self, widget):
self.emit('save-to-journal')
self._save_to_journal_button.props.sensitive = False
def _update_nav_buttons(self):
current_page = self._model.props.page
self._back_page_button.props.sensitive = current_page > 0
self._forward_page_button.props.sensitive = \
current_page < self._doc.get_n_pages() - 1
def zoom_original(self):
self._model.props.sizing_mode = self._EVINCE_MODE_FREE
self._model.props.scale = 1.0
def zoom_in(self):
self._model.props.sizing_mode = self._EVINCE_MODE_FREE
self._view.zoom_in()
def zoom_out(self):
self._model.props.sizing_mode = self._EVINCE_MODE_FREE
self._view.zoom_out()
def get_pdf_title(self):
return self._doc.get_title()
class DummyBrowser(GObject.GObject):
"""Has the same interface as browser.Browser ."""
__gsignals__ = {
'new-tab': (GObject.SignalFlags.RUN_FIRST, None, ([str])),
'tab-close': (GObject.SignalFlags.RUN_FIRST, None, ([object])),
'selection-changed': (GObject.SignalFlags.RUN_FIRST, None, ([])),
'security-status-changed': (GObject.SignalFlags.RUN_FIRST, None, ([])),
}
__gproperties__ = {
"title": (object, "title", "Title", GObject.PARAM_READWRITE),
"uri": (object, "uri", "URI", GObject.PARAM_READWRITE),
"progress": (object, "progress", "Progress", GObject.PARAM_READWRITE),
"load-status": (object, "load status", "a WebKit LoadStatus",
GObject.PARAM_READWRITE),
}
def __init__(self, tab):
GObject.GObject.__init__(self)
self._tab = tab
self._title = ""
self._uri = ""
self._progress = 0.0
self._load_status = WebKit.LoadStatus.PROVISIONAL
self.security_status = None
def do_get_property(self, prop):
if prop.name == 'title':
return self._title
elif prop.name == 'uri':
return self._uri
elif prop.name == 'progress':
return self._progress
elif prop.name == 'load-status':
return self._load_status
else:
raise AttributeError, 'Unknown property %s' % prop.name
def do_set_property(self, prop, value):
if prop.name == 'title':
self._title = value
elif prop.name == 'uri':
self._uri = value
elif prop.name == 'progress':
self._progress = value
elif prop.name == 'load-status':
self._load_status = value
else:
raise AttributeError, 'Unknown property %s' % prop.name
def get_title(self):
return self._title
def get_uri(self):
return self._uri
def get_progress(self):
return self._progress
def get_load_status(self):
return self._load_status
def emit_new_tab(self, uri):
self.emit('new-tab', uri)
def emit_close_tab(self):
self.emit('tab-close', self._tab)
def get_history(self):
return [{'url': self.props.uri, 'title': self.props.title}]
def can_undo(self):
return False
def can_redo(self):
return False
def can_go_back(self):
return False
def can_go_forward(self):
return False
def can_copy_clipboard(self):
return False
def can_paste_clipboard(self):
return False
def set_history_index(self, index):
pass
def get_history_index(self):
return 0
def set_zoom_level(self, zoom_level):
pass
def get_zoom_level(self):
return 0
def stop_loading(self):
self._tab.close_tab()
def reload(self):
pass
def load_uri(self, uri):
pass
def grab_focus(self):
pass
class PDFProgressMessageBox(Gtk.EventBox):
def __init__(self, message, button_callback):
Gtk.EventBox.__init__(self)
self.modify_bg(Gtk.StateType.NORMAL,
style.COLOR_WHITE.get_gdk_color())
alignment = Gtk.Alignment.new(0.5, 0.5, 0.1, 0.1)
self.add(alignment)
alignment.show()
box = Gtk.VBox()
alignment.add(box)
box.show()
icon = ProgressIcon(icon_name='book',
pixel_size=style.LARGE_ICON_SIZE,
stroke_color=style.COLOR_BUTTON_GREY.get_svg(),
fill_color=style.COLOR_SELECTION_GREY.get_svg())
self.progress_icon = icon
box.pack_start(icon, expand=True, fill=False, padding=0)
icon.show()
label = Gtk.Label()
color = style.COLOR_BUTTON_GREY.get_html()
label.set_markup('<span weight="bold" color="%s">%s</span>' % ( \
color, GLib.markup_escape_text(message)))
box.pack_start(label, expand=True, fill=False, padding=0)
label.show()
button_box = Gtk.HButtonBox()
button_box.set_layout(Gtk.ButtonBoxStyle.CENTER)
box.pack_start(button_box, False, True, 0)
button_box.show()
button = Gtk.Button(label=_('Cancel'))
button.connect('clicked', button_callback)
button.props.image = Icon(icon_name='dialog-cancel',
pixel_size=style.SMALL_ICON_SIZE)
button_box.pack_start(button, expand=True, fill=False, padding=0)
button.show()
class PDFErrorMessageBox(Gtk.EventBox):
def __init__(self, title, message, button_callback):
Gtk.EventBox.__init__(self)
self.modify_bg(Gtk.StateType.NORMAL,
style.COLOR_WHITE.get_gdk_color())
alignment = Gtk.Alignment.new(0.5, 0.5, 0.1, 0.1)
self.add(alignment)
alignment.show()
box = Gtk.VBox()
alignment.add(box)
box.show()
# Get the icon of this activity through the bundle path.
bundle_path = activity.get_bundle_path()
activity_bundle = ActivityBundle(bundle_path)
icon = Icon(pixel_size=style.LARGE_ICON_SIZE,
file=activity_bundle.get_icon(),
stroke_color=style.COLOR_BUTTON_GREY.get_svg(),
fill_color=style.COLOR_TRANSPARENT.get_svg())
box.pack_start(icon, expand=True, fill=False, padding=0)
icon.show()
color = style.COLOR_BUTTON_GREY.get_html()
label = Gtk.Label()
label.set_markup('<span weight="bold" color="%s">%s</span>' % ( \
color, GLib.markup_escape_text(title)))
box.pack_start(label, expand=True, fill=False, padding=0)
label.show()
label = Gtk.Label()
label.set_markup('<span color="%s">%s</span>' % ( \
color, GLib.markup_escape_text(message)))
box.pack_start(label, expand=True, fill=False, padding=0)
label.show()
button_box = Gtk.HButtonBox()
button_box.set_layout(Gtk.ButtonBoxStyle.CENTER)
box.pack_start(button_box, False, True, 0)
button_box.show()
button = Gtk.Button(label=_('Try again'))
button.connect('clicked', button_callback)
button.props.image = Icon(icon_name='entry-refresh',
pixel_size=style.SMALL_ICON_SIZE,
stroke_color=style.COLOR_WHITE.get_svg(),
fill_color=style.COLOR_TRANSPARENT.get_svg())
button_box.pack_start(button, expand=True, fill=False, padding=0)
button.show()
class PDFTabPage(Gtk.HBox):
"""Shows a basic PDF viewer, download the file first if the PDF is
in a remote location.
When the file is remote, display a message while downloading.
"""
def __init__(self):
GObject.GObject.__init__(self)
self._browser = DummyBrowser(self)
self._message_box = None
self._evince_viewer = None
self._pdf_uri = None
self._requested_uri = None
def setup(self, requested_uri, title=None):
self._requested_uri = requested_uri
# The title may be given from the Journal:
if title is not None:
self._browser.props.title = title
self._browser.props.uri = requested_uri
self._browser.props.load_status = WebKit.LoadStatus.PROVISIONAL
# show PDF directly if the file is local (from the system tree
# or from the journal)
if requested_uri.startswith('file://'):
self._pdf_uri = requested_uri
self._browser.props.load_status = WebKit.LoadStatus.FINISHED
self._show_pdf()
elif requested_uri.startswith('journal://'):
self._pdf_uri = self._get_path_from_journal(requested_uri)
self._browser.props.load_status = WebKit.LoadStatus.FINISHED
self._show_pdf(from_journal=True)
# download first if file is remote
elif requested_uri.startswith('http://'):
self._download_from_http(requested_uri)
def _get_browser(self):
return self._browser
browser = GObject.property(type=object, getter=_get_browser)
def _show_pdf(self, from_journal=False):
self._evince_viewer = EvinceViewer(self._pdf_uri)
self._evince_viewer.connect('save-to-journal',
self.__save_to_journal_cb)
self._evince_viewer.connect('open-link',
self.__open_link_cb)
# disable save to journal if the PDF is already loaded from
# the journal:
if from_journal:
self._evince_viewer.disable_journal_button()
self._evince_viewer.show()
self.pack_start(self._evince_viewer, True, True, 0)
# If the PDF has a title, set it as the browse page title,
# otherwise use the last part of the URI. Only when the title
# was not set already from the Journal.
if from_journal:
self._browser.props.title = self._browser.props.title
return
pdf_title = self._evince_viewer.get_pdf_title()
if pdf_title is not None:
self._browser.props.title = pdf_title
else:
self._browser.props.title = os.path.basename(self._requested_uri)
def _get_path_from_journal(self, journal_uri):
"""Get the system tree URI of the file for the Journal object."""
journal_id = self.__journal_id_from_uri(journal_uri)
jobject = datastore.get(journal_id)
return 'file://' + jobject.file_path
def _download_from_http(self, remote_uri):
"""Download the PDF from a remote location to a temporal file."""
# Display a message
self._message_box = PDFProgressMessageBox(
message=_("Downloading document..."),
button_callback=self.close_tab)
self.pack_start(self._message_box, True, True, 0)
self._message_box.show()
# Figure out download URI
temp_path = os.path.join(activity.get_activity_root(), 'instance')
if not os.path.exists(temp_path):
os.makedirs(temp_path)
fd, dest_path = tempfile.mkstemp(dir=temp_path)
self._pdf_uri = 'file://' + dest_path
network_request = WebKit.NetworkRequest.new(remote_uri)
self._download = WebKit.Download.new(network_request)
self._download.set_destination_uri('file://' + dest_path)
# FIXME: workaround for SL #4385
# self._download.connect('notify::progress', self.__download_progress_cb)
self._download.connect('notify::current-size',
self.__current_size_changed_cb)
self._download.connect('notify::status', self.__download_status_cb)
self._download.connect('error', self.__download_error_cb)
self._download.start()
def __current_size_changed_cb(self, download, something):
current_size = download.get_current_size()
total_size = download.get_total_size()
progress = current_size / float(total_size)
self._browser.props.progress = progress
self._message_box.progress_icon.update(progress)
def __download_progress_cb(self, download, data):
progress = download.get_progress()
self._browser.props.progress = progress
self._message_box.progress_icon.update(progress)
def __download_status_cb(self, download, data):
status = download.get_status()
if status == WebKit.DownloadStatus.STARTED:
self._browser.props.load_status = WebKit.LoadStatus.PROVISIONAL
elif status == WebKit.DownloadStatus.FINISHED:
self._browser.props.load_status = WebKit.LoadStatus.FINISHED
self.remove(self._message_box)
self._message_box = None
self._show_pdf()
elif status == WebKit.DownloadStatus.CANCELLED:
logging.debug('Download PDF canceled')
def __download_error_cb(self, download, err_code, err_detail, reason):
logging.debug('Download error! code %s, detail %s: %s' % \
(err_code, err_detail, reason))
title = _('This document could not be loaded')
self._browser.props.title = title
if self._message_box is not None:
self.remove(self._message_box)
self._message_box = PDFErrorMessageBox(
title=title,
message=_('Please make sure you are connected to the Internet.'),
button_callback=self.reload)
self.pack_start(self._message_box, True, True, 0)
self._message_box.show()
def reload(self, button=None):
self.remove(self._message_box)
self._message_box = None
self.setup(self._requested_uri)
def close_tab(self, button=None):
self._browser.emit_close_tab()
def cancel_download(self):
self._download.cancel()
def __journal_id_to_uri(self, journal_id):
"""Return an URI for a Journal object ID."""
return "journal://" + journal_id + ".pdf"
def __journal_id_from_uri(self, journal_uri):
"""Return a Journal object ID from an URI."""
return journal_uri[len("journal://"):-len(".pdf")]
def __save_to_journal_cb(self, widget):
"""Save the PDF in the Journal.
Put the PDF title as the title, or if the PDF doesn't have
one, use the filename instead. Put the requested uri as the
description.
"""
jobject = datastore.create()
jobject.metadata['title'] = self._browser.props.title
jobject.metadata['description'] = _('From: %s') % self._requested_uri
jobject.metadata['mime_type'] = "application/pdf"
jobject.file_path = self._pdf_uri[len("file://"):]
datastore.write(jobject)
# display the new URI:
self._browser.props.uri = self.__journal_id_to_uri(jobject.object_id)
def __open_link_cb(self, widget, uri):
"""Open the external link of a PDF in a new tab."""
self._browser.emit_new_tab(uri)
| gpl-2.0 | 9,211,314,109,004,162,000 | 33.872305 | 81 | 0.606097 | false |
eddieantonio/imgcat | libexec/gen_img.py | 1 | 2780 | #!/usr/bin/env python
# Copyright (c) 2014–2018, Eddie Antonio Santos <easantos@ualberta.ca>
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""
Generates images for tests.
pip install --user PyYAML Pillow
- 1px_256_table.png
A PNG where every pixel EXACTLY cooresponds to a
256 color value.
- 1px_256_table.jpg
A JPEG where every pixel ATTEMPTS to cooresponds to
a 256 color value. But they won't.
"""
import os
import sys
import yaml
from PIL import Image
TEST_DIR = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'test')
WIDTH = 12
# 256 color cube, greyscale ramp, 16 colors
HEIGHT = 18 + 2 + 2
with open('xterm-256color.yaml') as f:
_COLORS = yaml.load(f)
def tupleize(name, x):
i = lambda x: int(x, 16)
rgb = (i(x[1:3]), i(x[3:5]), i(x[5:7]))
return name, rgb
COLORS = [
[tupleize(*arg) for arg in _COLORS[':xterm256']],
[tupleize(*arg) for arg in _COLORS[':xtermGreyscale']],
[tupleize(*arg) for arg in _COLORS[':xterm16']]
]
def main(*args):
im = Image.new('RGB', (WIDTH, HEIGHT), (0, 0, 0))
row = 0
col = 0
base_row = 0
# 18 rows of 6**3 color cube.
for _, val in COLORS[0]:
im.putpixel((col, row), val)
row += 1
# Next column...
if (row % 6) == 0:
col += 1
if (col % 12) == 0:
base_row += 6
col = 0
row = base_row
assert row == 18
# 2 rows of greyscale
for _, val in COLORS[1]:
im.putpixel((col, row), val)
col += 1
# Next row...
if (col % 12) == 0:
row += 1
col = 0
assert row == 20
# 2 rows of 16 color.
for _, val in COLORS[2]:
im.putpixel((col, row), val)
col += 1
# Next row...
if (col % 8) == 0:
row += 1
col = 0
# Save 'em
im.save(os.path.join(TEST_DIR, '1px_256_table.png'))
im.save(os.path.join(TEST_DIR, '1px_256_table.jpg'), quality=40)
if __name__ == '__main__':
sys.exit(main(*sys.argv))
| isc | -5,803,299,474,392,289,000 | 24.254545 | 92 | 0.596472 | false |
Carnon/nlp | TextClassify/textclassify/textdata.py | 1 | 2565 | import os
import codecs
import re
import jieba
import numpy as np
from tqdm import tqdm
from tensorflow.contrib import learn
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import LabelEncoder
class TextData(object):
def __init__(self,args):
self.args = args
corpus_dir = self.args.corpus_dir
self.load_data(corpus_dir)
def load_data(self,corpus_dir):
self.text = []
self.label = []
self.label_name = {}
self.max_doc_len = 0
self.label_num = 0
self.vocab_size = 0
raw_text = []
raw_label = []
tag_list = os.listdir(corpus_dir)
for tag in tqdm(tag_list,desc='load_data',leave=False):
data_path = os.path.join(corpus_dir,tag)
for data_file in os.listdir(data_path):
file_name = os.path.join(data_path,data_file)
with codecs.open(file_name,'r',encoding='utf-8') as fr_raw:
raw_content = fr_raw.read()
text_word = [word for word in jieba.cut(raw_content) if re.match(u".*[\u4e00-\u9fa5]+", word)]
if text_word.__len__() < self.args.max_doc_len:
raw_text.append(text_word)
raw_label.append(tag)
labelEncode = LabelEncoder()
num_label = labelEncode.fit_transform(raw_label)
self.label = OneHotEncoder(sparse=False).fit_transform(np.reshape(num_label,[-1,1]))
self.label_num = len(labelEncode.classes_)
#self.max_doc_len = max([len(doc) for doc in raw_text])
self.max_doc_len = self.args.max_doc_len
vocab_processor = learn.preprocessing.VocabularyProcessor(self.max_doc_len,tokenizer_fn=tokenizer_fn)
self.text = np.array(list(vocab_processor.fit_transform(raw_text)))
self.vocab_size = len(vocab_processor.vocabulary_)
def shuffle_data(self):
np.random.seed(3)
shuffled = np.random.permutation(np.arange(len(self.label)))
self.text = self.text[shuffled]
self.label = self.label[shuffled]
def get_batches(self):
self.shuffle_data()
sample_size = len(self.text)
batch_size = self.args.batch_size
for i in range(0,sample_size,batch_size):
yield self.text[i:min(i+batch_size,sample_size)],self.label[i:min(i+batch_size,sample_size)]
#chinese cut word has completed instead of tensorflow cut word that cannt support chinese word
def tokenizer_fn(iterator):
for value in iterator:
yield value
| apache-2.0 | 9,036,540,684,069,582,000 | 35.642857 | 114 | 0.617934 | false |
RoboCupULaval/UI-Debug | test/testQPainterWidget.py | 1 | 2130 | # Under GNU GPLv3 License, see LICENSE.txt
import sys, time
from random import randint
from PyQt5.QtWidgets import QWidget, QApplication
from PyQt5.QtGui import QPainter, QPen, QBrush, QColor
from PyQt5.QtCore import pyqtSignal, QMutex, QTimer
__author__ = 'jbecirovski'
class TestPainter(QWidget):
frame_rate = 1000
def __init__(self):
QWidget.__init__(self)
self.best_score = 0
# View Screen Draws
self._list_draw = [[randint(0, 1000), randint(0, 1000)]]
self.setGeometry(200, 200, 1011, 720)
# View Screen Core
self._emit_signal = pyqtSignal()
self._mutex = QMutex()
self._timer = QTimer()
self._timer.timeout.connect(self.appendItem)
self._timer.start(1000 / TestPainter.frame_rate)
self.show()
def appendItem(self):
self._emit_signal()
self._list_draw.append([randint(0, 1000), randint(0, 1000)])
self.refresh()
def refresh(self):
#QMutexLocker(self._mutex).relock()
self.update()
#QMutexLocker(self._mutex).unlock()
def paintEvent(self, event):
painter = QPainter()
painter.begin(self)
if self._list_draw is not None:
t_ref = time.time()
for draw in self._list_draw:
painter.setBrush(QBrush(QColor(255, 0, 0)))
painter.setPen(QPen())
painter.drawRect(draw[0], draw[1], 100, 100)
t_final = (time.time() - t_ref) * 1000
painter.drawText(100, 80, 'BEST SCORE: {}'.format(self.best_score))
try:
painter.drawText(100, 100, '{}| {:.0f} ms | {:.0f} hertz| {:.4f} dbms'.format(len(self._list_draw), t_final, 1 / (t_final/1000), len(self._list_draw) / t_final))
if 1 / (t_final / 1000) < 30:
self.best_score = len(self._list_draw)
self._list_draw.clear()
except Exception as e:
pass
painter.end()
if __name__ == '__main__':
app = QApplication(sys.argv)
f = TestPainter()
f.show()
sys.exit(app.exec_())
| mit | 2,455,249,001,436,098,000 | 31.272727 | 177 | 0.562441 | false |
sebdelsol/pyload | module/plugins/hoster/CzshareCom.py | 1 | 5871 | # -*- coding: utf-8 -*-
#
# Test links:
# http://czshare.com/5278880/random.bin
import re
from module.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
from module.utils import parseFileSize
class CzshareCom(SimpleHoster):
__name__ = "CzshareCom"
__type__ = "hoster"
__version__ = "0.95"
__pattern__ = r'http://(?:www\.)?(czshare|sdilej)\.(com|cz)/(\d+/|download\.php\?).*'
__description__ = """CZshare.com hoster plugin, now Sdilej.cz"""
__license__ = "GPLv3"
__authors__ = [("zoidberg", "zoidberg@mujmail.cz")]
NAME_PATTERN = r'<div class="tab" id="parameters">\s*<p>\s*Cel. n.zev: <a href=[^>]*>(?P<N>[^<]+)</a>'
SIZE_PATTERN = r'<div class="tab" id="category">(?:\s*<p>[^\n]*</p>)*\s*Velikost:\s*(?P<S>[\d .,]+)(?P<U>[\w^_]+)\s*</div>'
OFFLINE_PATTERN = r'<div class="header clearfix">\s*<h2 class="red">'
SIZE_REPLACEMENTS = [(' ', '')]
URL_REPLACEMENTS = [(r'http://[^/]*/download.php\?.*?id=(\w+).*', r'http://sdilej.cz/\1/x/')]
FORCE_CHECK_TRAFFIC = True
FREE_URL_PATTERN = r'<a href="([^"]+)" class="page-download">[^>]*alt="([^"]+)" /></a>'
FREE_FORM_PATTERN = r'<form action="download\.php" method="post">\s*<img src="captcha\.php" id="captcha" />(.*?)</form>'
PREMIUM_FORM_PATTERN = r'<form action="/profi_down\.php" method="post">(.*?)</form>'
FORM_INPUT_PATTERN = r'<input[^>]* name="([^"]+)" value="([^"]+)"[^>]*/>'
MULTIDL_PATTERN = r'<p><font color=\'red\'>Z[^<]*PROFI.</font></p>'
USER_CREDIT_PATTERN = r'<div class="credit">\s*kredit: <strong>([\d .,]+)(\w+)</strong>\s*</div><!-- .credit -->'
def checkTrafficLeft(self):
# check if user logged in
m = re.search(self.USER_CREDIT_PATTERN, self.html)
if m is None:
self.account.relogin(self.user)
self.html = self.load(self.pyfile.url, cookies=True, decode=True)
m = re.search(self.USER_CREDIT_PATTERN, self.html)
if m is None:
return False
# check user credit
try:
credit = parseFileSize(m.group(1).replace(' ', ''), m.group(2))
self.logInfo(_("Premium download for %i KiB of Credit") % (self.pyfile.size / 1024))
self.logInfo(_("User %s has %i KiB left") % (self.user, credit / 1024))
if credit < self.pyfile.size:
self.logInfo(_("Not enough credit to download file: %s") % self.pyfile.name)
return False
except Exception, e:
# let's continue and see what happens...
self.logError(e)
return True
def handlePremium(self):
# parse download link
try:
form = re.search(self.PREMIUM_FORM_PATTERN, self.html, re.S).group(1)
inputs = dict(re.findall(self.FORM_INPUT_PATTERN, form))
except Exception, e:
self.logError(e)
self.resetAccount()
# download the file, destination is determined by pyLoad
self.download("http://sdilej.cz/profi_down.php", post=inputs, disposition=True)
self.checkDownloadedFile()
def handleFree(self):
# get free url
m = re.search(self.FREE_URL_PATTERN, self.html)
if m is None:
self.error(_("FREE_URL_PATTERN not found"))
parsed_url = "http://sdilej.cz" + m.group(1)
self.logDebug("PARSED_URL:" + parsed_url)
# get download ticket and parse html
self.html = self.load(parsed_url, cookies=True, decode=True)
if re.search(self.MULTIDL_PATTERN, self.html):
self.longWait(5 * 60, 12)
try:
form = re.search(self.FREE_FORM_PATTERN, self.html, re.S).group(1)
inputs = dict(re.findall(self.FORM_INPUT_PATTERN, form))
self.pyfile.size = int(inputs['size'])
except Exception, e:
self.logError(e)
self.error(_("Form"))
# get and decrypt captcha
captcha_url = 'http://sdilej.cz/captcha.php'
for _i in xrange(5):
inputs['captchastring2'] = self.decryptCaptcha(captcha_url)
self.html = self.load(parsed_url, cookies=True, post=inputs, decode=True)
if u"<li>Zadaný ověřovací kód nesouhlasí!</li>" in self.html:
self.invalidCaptcha()
elif re.search(self.MULTIDL_PATTERN, self.html):
self.longWait(5 * 60, 12)
else:
self.correctCaptcha()
break
else:
self.fail(_("No valid captcha code entered"))
m = re.search("countdown_number = (\d+);", self.html)
self.setWait(int(m.group(1)) if m else 50)
# download the file, destination is determined by pyLoad
self.logDebug("WAIT URL", self.req.lastEffectiveURL)
m = re.search("free_wait.php\?server=(.*?)&(.*)", self.req.lastEffectiveURL)
if m is None:
self.error(_("Download URL not found"))
url = "http://%s/download.php?%s" % (m.group(1), m.group(2))
self.wait()
self.download(url)
self.checkDownloadedFile()
def checkDownloadedFile(self):
# check download
check = self.checkDownload({
"temp_offline": re.compile(r"^Soubor je do.*asn.* nedostupn.*$"),
"credit": re.compile(r"^Nem.*te dostate.*n.* kredit.$"),
"multi_dl": re.compile(self.MULTIDL_PATTERN),
"captcha_err": "<li>Zadaný ověřovací kód nesouhlasí!</li>"
})
if check == "temp_offline":
self.fail(_("File not available - try later"))
if check == "credit":
self.resetAccount()
elif check == "multi_dl":
self.longWait(5 * 60, 12)
elif check == "captcha_err":
self.invalidCaptcha()
self.retry()
getInfo = create_getInfo(CzshareCom)
| gpl-3.0 | -6,866,925,458,783,012,000 | 37.546053 | 127 | 0.558969 | false |
jamesp/jpy | jpy/maths/pde.py | 1 | 2463 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Numerical Methods of Partial Differential Equations.
Provides integration methods and other utility functions such as the RA & RAW
time filters for numerical integration of PDEs.
"""
import numpy as np
def RA_filter(phi, epsilon=0.1):
"""Robert-Asselin-Williams time filter.
phi: A tuple of phi at time levels (n-1), n, (n+1)
epsilon: The RA filter weighting
Takes variable phi at 3 timelevels (n-1), n, (n+1) and recouples the values
at (n) and (n+1).
φ_bar(n) = φ(n) + ϵ[ φ(n+1) - 2φ(n) + φ(n-1) ]
"""
_phi, phi, phi_ = phi
return (_phi, phi + epsilon*(_phi - 2.0 * phi + phi_), phi_)
def RAW_filter(phi, nu=0.2, alpha=0.53):
"""The RAW time filter, an improvement on RA filter.
phi: A tuple of phi at time levels (n-1), n, (n+1)
nu: Equivalent to 2*ϵ; the RA filter weighting
alpha: Scaling factor for n and (n+1) timesteps.
With α=1, RAW —> RA.
For more information, see [Williams 2009].
"""
_phi, phi, phi_ = phi
d = nu*0.5*(_phi - 2.0 * phi + phi_)
return (_phi, phi+alpha*d, phi_ + (alpha-1)*d)
if __name__ == '__main__':
import matplotlib.pyplot as plt
# simple harmonic osicallator example from [Williams 2009]
xt = lambda x,y,t,omega: -omega*y
yt = lambda x,y,t,omega: omega*x
x0, y0 = 1.0, 0.0
dt = 0.2
omega = 1.0
alpha = 0.53 # RAW filter parameter
t=0.0
# initialise with a single euler step
_x = x = x0
_y = y = y0
x = _x + dt*xt(x,y,t,omega)
y = _y + dt*yt(x,y,t,omega)
xs = [x0,x]
ys = [y0,y]
ts = [0, dt]
# integrate forward using leapfrog method
for t in np.arange(0+dt,100,dt):
x_ = _x + 2*dt*xt(x,y,t,omega)
y_ = _y + 2*dt*yt(x,y,t,omega)
(_x,x,x_) = RAW_filter((_x,x,x_), alpha=alpha)
(_y,y,y_) = RAW_filter((_y,y,y_), alpha=alpha)
# step variables forward
ts.append(t+dt)
_x,x = x,x_
_y,y = y,y_
xs.append(x)
ys.append(y)
ts = np.array(ts)
xs = np.array(xs)
ys = np.array(ys)
print np.array([ts,xs,ys])
plt.subplot(211)
plt.plot(ts,xs)
plt.plot(ts, np.cos(ts), 'grey')
plt.xlabel('x')
plt.subplot(212)
plt.plot(ts,ys)
plt.plot(ts, np.sin(ts), 'grey')
plt.ylabel('y')
plt.show()
# [Williams 2009] - Paul Williams. A Proposed Modification to the Robert–Asselin Time Filter.
| mit | -3,893,353,240,602,028,000 | 27.172414 | 93 | 0.565891 | false |
PragmaticMates/django-clever-selects | setup.py | 1 | 1314 | #!/usr/bin/env python
from setuptools import setup
setup(
name='django-clever-selects',
version='0.8.2',
description='Chained select box widget for Django framework using AJAX requests.',
long_description=open('README.rst').read(),
author='Pragmatic Mates',
author_email='info@pragmaticmates.com',
maintainer='Pragmatic Mates',
maintainer_email='info@pragmaticmates.com',
url='https://github.com/PragmaticMates/django-clever-selects',
packages=[
'clever_selects',
'clever_selects.templatetags'
],
include_package_data=True,
install_requires=('django',),
classifiers=[
'Programming Language :: Python',
'Programming Language :: Python :: 2.5',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Operating System :: OS Independent',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'Framework :: Django',
'License :: OSI Approved :: BSD License',
'Development Status :: 3 - Alpha'
],
license='BSD License',
keywords="django clever chained selects",
)
| mit | 6,410,312,218,537,401,000 | 33.578947 | 86 | 0.626332 | false |
yeleman/snisi | snisi_maint/management/commands/update-cluster-from-std-csv.py | 1 | 3120 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim: ai ts=4 sts=4 et sw=4 nu
from __future__ import (unicode_literals, absolute_import,
division, print_function)
import logging
import os
from django.core.management.base import BaseCommand
from optparse import make_option
from py3compat import PY2
from snisi_core.models.Entities import Entity
from snisi_core.models.Projects import Cluster, Participation
if PY2:
import unicodecsv as csv
else:
import csv
logger = logging.getLogger(__name__)
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('-f',
help='CSV file',
action='store',
dest='filename'),
)
def handle(self, *args, **options):
if not os.path.exists(options.get('filename') or ""):
logger.error("CSV file `{}` does not exist."
.format(options.get('filename')))
return
headers = ['action', 'slug', 'cluster', 'include_hc']
input_csv_file = open(options.get('filename'), 'r')
csv_reader = csv.DictReader(input_csv_file, fieldnames=headers)
for entry in csv_reader:
if csv_reader.line_num == 1:
continue
entity = Entity.get_or_none(entry.get('slug'))
if entity is None:
logger.warning("Entity `{}` does not exist."
.format(entry.get('SNISI')))
continue
cluster = Cluster.get_or_none(entry.get('cluster'))
if cluster is None:
logger.error("Cluster `{}` does not exist."
.format(options.get('cluster_slug')))
continue
include_hc = bool(entry.get('include_hc'))
entities = [entity]
if include_hc:
entities += entity.get_health_centers()
if entry.get('action') == 'add':
for e in entities:
p, created = Participation.objects.get_or_create(
cluster=cluster,
entity=e,
is_active=True)
logger.info(p)
if entry.get('action') == 'disable':
for p in Participation.objects.filter(
cluster=cluster,
entity__slug__in=[e.slug for e in entities]):
p.is_active = False
p.save()
logger.info(p)
if entry.get('action') == 'enable':
for p in Participation.objects.filter(
cluster=cluster,
entity__slug__in=[e.slug for e in entities]):
p.is_active = True
p.save()
logger.info(p)
if entry.get('action') == 'remove':
Participation.objects.filter(
cluster=cluster,
entity__slug__in=[e.slug for e in entities]).delete()
logger.info("All Done")
| mit | -835,938,490,400,088,200 | 31.842105 | 73 | 0.501923 | false |
offlinehacker/sphinxcontrib.jinjadomain | sphinxcontrib/jinjadomain.py | 1 | 3445 | """
sphinxcontrib.jinjadomain
~~~~~~~~~~~~~~~~~~~~~~~~
The jinja domain for documenting jinja templates.
:copyright: Copyright 2012 by Jaka Hudoklin
:license: BSD, see LICENSE for details.
"""
import re
import os
from sphinx import addnodes
from sphinx.domains import Domain, ObjType, Index
from sphinx.directives import ObjectDescription
from sphinx.util.docfields import GroupedField, TypedField
def jinja_resource_anchor(method, path):
path = re.sub(r'[<>:/]', '-', path)
return method.lower() + '-' + path
class JinjaResource(ObjectDescription):
doc_field_types = [
TypedField('parameter', label='Parameters',
names=('param', 'parameter', 'arg', 'argument'),
typerolename='obj', typenames=('paramtype', 'type')),
]
method = "template"
def handle_signature(self, sig, signode):
method = self.method.upper() + ' '
signode += addnodes.desc_name(method, method)
signode += addnodes.desc_name(sig, sig)
fullname = "Template" + ' ' + sig
signode['method'] = self.method
signode['path'] = sig
signode['fullname'] = fullname
return (fullname, self.method, sig)
def needs_arglist(self):
return False
def add_target_and_index(self, name_cls, sig, signode):
signode['ids'].append(jinja_resource_anchor(*name_cls[1:]))
self.env.domaindata['jinja'][self.method][sig] = (self.env.docname, '')
def get_index_text(self, modname, name):
return ''
class JinjaIndex(Index):
name = 'jinjatemplates'
localname = 'templates'
shortname = 'templates'
def __init__(self, *args, **kwargs):
super(JinjaIndex, self).__init__(*args, **kwargs)
def grouping_prefix(self, path):
return os.path.split(path)[0]
def generate(self, docnames=None):
content = {}
items = ((method, path, info)
for method, routes in self.domain.routes.iteritems()
for path, info in routes.iteritems())
items = sorted(items, key=lambda item: item[1])
for method, path, info in items:
entries = content.setdefault(self.grouping_prefix(path), [])
entries.append([
path, 0, info[0],
jinja_resource_anchor(method, path), '', '', info[1]
])
content = content.items()
content.sort(key=lambda (k, v): k)
return (content, True)
class JinjaDomain(Domain):
"""Jinja domain."""
name = 'jinja'
label = 'jinja'
object_types = {
'template': ObjType('template', 'template', 'obj'),
}
directives = {
'template': JinjaResource,
}
initial_data = {
'template': {}, # path: (docname, synopsis)
}
indices = [JinjaIndex]
@property
def routes(self):
return dict((key, self.data[key]) for key in self.object_types)
def clear_doc(self, docname):
for typ, routes in self.routes.iteritems():
for path, info in routes.items():
if info[0] == docname:
del routes[path]
def get_objects(self):
for method, routes in self.routes.iteritems():
for path, info in routes.iteritems():
anchor = jinja_resource_anchor(method, path)
yield (path, path, method, info[0], anchor, 1)
def setup(app):
app.add_domain(JinjaDomain)
| bsd-2-clause | 4,406,314,289,829,284,400 | 27.708333 | 79 | 0.584906 | false |
amchoukir/ycmd | build.py | 1 | 6445 | #!/usr/bin/env python
import os
import os.path as p
import sys
major, minor = sys.version_info[ 0 : 2 ]
if major != 2 or minor < 6:
sys.exit( 'The build script requires Python version >= 2.6 and < 3.0; '
'your version of Python is ' + sys.version )
DIR_OF_THIS_SCRIPT = p.dirname( p.abspath( __file__ ) )
DIR_OF_THIRD_PARTY = p.join( DIR_OF_THIS_SCRIPT, 'third_party' )
for folder in os.listdir( DIR_OF_THIRD_PARTY ):
abs_folder_path = p.join( DIR_OF_THIRD_PARTY, folder )
if p.isdir( abs_folder_path ) and not os.listdir( abs_folder_path ):
sys.exit( 'Some folders in ' + DIR_OF_THIRD_PARTY + ' are empty; '
'you probably forgot to run:'
'\n\tgit submodule update --init --recursive\n\n' )
sys.path.insert( 0, p.abspath( p.join( DIR_OF_THIRD_PARTY, 'sh' ) ) )
sys.path.insert( 0, p.abspath( p.join( DIR_OF_THIRD_PARTY, 'argparse' ) ) )
import sh
import platform
import argparse
import multiprocessing
from distutils.spawn import find_executable
def OnMac():
return platform.system() == 'Darwin'
def PathToFirstExistingExecutable( executable_name_list ):
for executable_name in executable_name_list:
path = find_executable( executable_name )
if path:
return path
return None
def NumCores():
ycm_cores = os.environ.get( 'YCM_CORES' )
if ycm_cores:
return int( ycm_cores )
try:
return multiprocessing.cpu_count()
except NotImplementedError:
return 1
def CheckDeps():
if not PathToFirstExistingExecutable( [ 'cmake' ] ):
sys.exit( 'Please install CMake and retry.')
def CustomPythonCmakeArgs():
# The CMake 'FindPythonLibs' Module does not work properly.
# So we are forced to do its job for it.
python_prefix = sh.python_config( '--prefix' ).strip()
if p.isfile( p.join( python_prefix, '/Python' ) ):
python_library = p.join( python_prefix, '/Python' )
python_include = p.join( python_prefix, '/Headers' )
else:
which_python = sh.python(
'-c',
'import sys;i=sys.version_info;print "python%d.%d" % (i[0], i[1])'
).strip()
lib_python = '{0}/lib/lib{1}'.format( python_prefix, which_python ).strip()
if p.isfile( '{0}.a'.format( lib_python ) ):
python_library = '{0}.a'.format( lib_python )
# This check is for CYGWIN
elif p.isfile( '{0}.dll.a'.format( lib_python ) ):
python_library = '{0}.dll.a'.format( lib_python )
else:
python_library = '{0}.dylib'.format( lib_python )
python_include = '{0}/include/{1}'.format( python_prefix, which_python )
python_executable = '{0}/bin/python'.format( python_prefix )
return [
'-DPYTHON_LIBRARY={0}'.format( python_library ),
'-DPYTHON_INCLUDE_DIR={0}'.format( python_include ),
'-DPYTHON_EXECUTABLE={0}'.format( python_executable )
]
def ParseArguments():
parser = argparse.ArgumentParser()
parser.add_argument( '--clang-completer', action = 'store_true',
help = 'Build C-family semantic completion engine.')
parser.add_argument( '--system-libclang', action = 'store_true',
help = 'Use system libclang instead of downloading one '
'from llvm.org. NOT RECOMMENDED OR SUPPORTED!' )
parser.add_argument( '--omnisharp-completer', action = 'store_true',
help = 'Build C# semantic completion engine.' )
parser.add_argument( '--gocode-completer', action = 'store_true',
help = 'Build Go semantic completion engine.' )
parser.add_argument( '--system-boost', action = 'store_true',
help = 'Use the system boost instead of bundled one. '
'NOT RECOMMENDED OR SUPPORTED!')
args = parser.parse_args()
if args.system_libclang and not args.clang_completer:
sys.exit( "You can't pass --system-libclang without also passing "
"--clang-completer as well." )
return args
def GetCmakeArgs( parsed_args ):
cmake_args = []
if parsed_args.clang_completer:
cmake_args.append( '-DUSE_CLANG_COMPLETER=ON' )
if parsed_args.system_libclang:
cmake_args.append( '-DUSE_SYSTEM_LIBCLANG=ON' )
if parsed_args.system_boost:
cmake_args.append( '-DUSE_SYSTEM_BOOST=ON' )
extra_cmake_args = os.environ.get( 'EXTRA_CMAKE_ARGS', '' )
cmake_args.extend( extra_cmake_args.split() )
return cmake_args
def RunYcmdTests( build_dir ):
tests_dir = p.join( build_dir, 'ycm/tests' )
sh.cd( tests_dir )
new_env = os.environ.copy()
new_env[ 'LD_LIBRARY_PATH' ] = DIR_OF_THIS_SCRIPT
sh.Command( p.join( tests_dir, 'ycm_core_tests' ) )(
_env = new_env, _out = sys.stdout )
def BuildYcmdLibs( cmake_args ):
build_dir = unicode( sh.mktemp( '-d', '-t', 'ycm_build.XXXXXX' ) ).strip()
try:
full_cmake_args = [ '-G', 'Unix Makefiles' ]
if OnMac():
full_cmake_args.extend( CustomPythonCmakeArgs() )
full_cmake_args.extend( cmake_args )
full_cmake_args.append( p.join( DIR_OF_THIS_SCRIPT, 'cpp' ) )
sh.cd( build_dir )
sh.cmake( *full_cmake_args, _out = sys.stdout )
build_target = ( 'ycm_support_libs' if 'YCM_TESTRUN' not in os.environ else
'ycm_core_tests' )
sh.make( '-j', NumCores(), build_target, _out = sys.stdout,
_err = sys.stderr )
if 'YCM_TESTRUN' in os.environ:
RunYcmdTests( build_dir )
finally:
sh.cd( DIR_OF_THIS_SCRIPT )
sh.rm( '-rf', build_dir )
def BuildOmniSharp():
build_command = PathToFirstExistingExecutable(
[ 'msbuild', 'msbuild.exe', 'xbuild' ] )
if not build_command:
sys.exit( 'msbuild or xbuild is required to build Omnisharp' )
sh.cd( p.join( DIR_OF_THIS_SCRIPT, 'third_party/OmniSharpServer' ) )
sh.Command( build_command )( "/property:Configuration=Release", _out = sys.stdout )
def BuildGoCode():
if not find_executable( 'go' ):
sys.exit( 'go is required to build gocode' )
sh.cd( p.join( DIR_OF_THIS_SCRIPT, 'third_party/gocode' ) )
sh.Command( 'go' )( 'build', _out = sys.stdout )
def ApplyWorkarounds():
# Some OSs define a 'make' ENV VAR and this confuses sh when we try to do
# sh.make. See https://github.com/Valloric/YouCompleteMe/issues/1401
os.environ.pop('make', None)
def Main():
ApplyWorkarounds()
CheckDeps()
args = ParseArguments()
BuildYcmdLibs( GetCmakeArgs( args ) )
if args.omnisharp_completer:
BuildOmniSharp()
if args.gocode_completer:
BuildGoCode()
if __name__ == "__main__":
Main()
| gpl-3.0 | 5,089,517,288,401,382,000 | 31.550505 | 85 | 0.638169 | false |
perlygatekeeper/glowing-robot | google_test/free_the_bunny_prisoners/solution_5_fails.py | 1 | 1090 | import itertools
def solution(bunnies,keys_required):
answer = []
for i in range(bunnies):
answer.append([])
# if keys_required > bunnies:
# return None
if keys_required == 0:
return [[0]]
elif keys_required == 1:
key = 0
for group in range(bunnies):
answer[group].append(key)
elif bunnies == keys_required:
key = 0
for group in range(bunnies):
answer[group].append(key)
key += 1
else:
key = 0
for item in itertools.combinations(range(bunnies), keys_required):
for group in item:
answer[group].append(key)
key += 1
return answer
for num_buns in range(1,10):
for num_required in range(10):
key_dist = solution(num_buns,num_required)
print("-" * 60)
print("Answer for {0:d} bunnies, requiring {1:d}".format(num_buns,num_required))
if ( len(key_dist[0]) * len(key_dist) ) < 25:
print(key_dist)
else:
for bun in key_dist:
print(bun)
| artistic-2.0 | -8,434,445,743,401,300,000 | 28.459459 | 88 | 0.538532 | false |
nuagenetworks/tempest | tempest/common/dynamic_creds.py | 1 | 17614 | # Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import netaddr
from oslo_log import log as logging
import six
from tempest import clients
from tempest.common import cred_client
from tempest.common import cred_provider
from tempest.common.utils import data_utils
from tempest import config
from tempest import exceptions
from tempest.lib import exceptions as lib_exc
CONF = config.CONF
LOG = logging.getLogger(__name__)
class DynamicCredentialProvider(cred_provider.CredentialProvider):
def __init__(self, identity_version, name=None, network_resources=None,
credentials_domain=None, admin_role=None, admin_creds=None):
"""Creates credentials dynamically for tests
A credential provider that, based on an initial set of
admin credentials, creates new credentials on the fly for
tests to use and then discard.
:param str identity_version: identity API version to use `v2` or `v3`
:param str admin_role: name of the admin role added to admin users
:param str name: names of dynamic resources include this parameter
when specified
:param str credentials_domain: name of the domain where the users
are created. If not defined, the project
domain from admin_credentials is used
:param dict network_resources: network resources to be created for
the created credentials
:param Credentials admin_creds: initial admin credentials
"""
super(DynamicCredentialProvider, self).__init__(
identity_version=identity_version, admin_role=admin_role,
name=name, credentials_domain=credentials_domain,
network_resources=network_resources)
self.network_resources = network_resources
self._creds = {}
self.ports = []
self.default_admin_creds = admin_creds
(self.identity_admin_client,
self.tenants_admin_client,
self.users_admin_client,
self.roles_admin_client,
self.domains_admin_client,
self.network_admin_client,
self.networks_admin_client,
self.routers_admin_client,
self.subnets_admin_client,
self.ports_admin_client,
self.security_groups_admin_client) = self._get_admin_clients()
# Domain where isolated credentials are provisioned (v3 only).
# Use that of the admin account is None is configured.
self.creds_domain_name = None
if self.identity_version == 'v3':
self.creds_domain_name = (
self.default_admin_creds.project_domain_name or
self.credentials_domain)
self.creds_client = cred_client.get_creds_client(
self.identity_admin_client,
self.tenants_admin_client,
self.users_admin_client,
self.roles_admin_client,
self.domains_admin_client,
self.creds_domain_name)
def _get_admin_clients(self):
"""Returns a tuple with instances of the following admin clients
(in this order):
identity
network
"""
os = clients.Manager(self.default_admin_creds)
if self.identity_version == 'v2':
return (os.identity_client, os.tenants_client, os.users_client,
os.roles_client, None, os.network_client,
os.networks_client, os.routers_client, os.subnets_client,
os.ports_client, os.security_groups_client)
else:
return (os.identity_v3_client, os.projects_client,
os.users_v3_client, os.roles_v3_client, os.domains_client,
os.network_client, os.networks_client, os.routers_client,
os.subnets_client, os.ports_client,
os.security_groups_client)
def _create_creds(self, suffix="", admin=False, roles=None):
"""Create random credentials under the following schema.
If the name contains a '.' is the full class path of something, and
we don't really care. If it isn't, it's probably a meaningful name,
so use it.
For logging purposes, -user and -tenant are long and redundant,
don't use them. The user# will be sufficient to figure it out.
"""
if '.' in self.name:
root = ""
else:
root = self.name
project_name = data_utils.rand_name(root) + suffix
project_desc = project_name + "-desc"
project = self.creds_client.create_project(
name=project_name, description=project_desc)
username = data_utils.rand_name(root) + suffix
user_password = data_utils.rand_password()
email = data_utils.rand_name(root) + suffix + "@example.com"
user = self.creds_client.create_user(
username, user_password, project, email)
if 'user' in user:
user = user['user']
role_assigned = False
if admin:
self.creds_client.assign_user_role(user, project,
self.admin_role)
role_assigned = True
# Add roles specified in config file
for conf_role in CONF.auth.tempest_roles:
self.creds_client.assign_user_role(user, project, conf_role)
role_assigned = True
# Add roles requested by caller
if roles:
for role in roles:
self.creds_client.assign_user_role(user, project, role)
role_assigned = True
# NOTE(mtreinish) For a user to have access to a project with v3 auth
# it must beassigned a role on the project. So we need to ensure that
# our newly created user has a role on the newly created project.
if self.identity_version == 'v3' and not role_assigned:
self.creds_client.create_user_role('Member')
self.creds_client.assign_user_role(user, project, 'Member')
creds = self.creds_client.get_credentials(user, project, user_password)
return cred_provider.TestResources(creds)
def _create_network_resources(self, tenant_id):
network = None
subnet = None
router = None
# Make sure settings
if self.network_resources:
if self.network_resources['router']:
if (not self.network_resources['subnet'] or
not self.network_resources['network']):
raise exceptions.InvalidConfiguration(
'A router requires a subnet and network')
elif self.network_resources['subnet']:
if not self.network_resources['network']:
raise exceptions.InvalidConfiguration(
'A subnet requires a network')
elif self.network_resources['dhcp']:
raise exceptions.InvalidConfiguration('DHCP requires a subnet')
data_utils.rand_name_root = data_utils.rand_name(self.name)
if not self.network_resources or self.network_resources['network']:
network_name = data_utils.rand_name_root + "-network"
network = self._create_network(network_name, tenant_id)
try:
if not self.network_resources or self.network_resources['subnet']:
subnet_name = data_utils.rand_name_root + "-subnet"
subnet = self._create_subnet(subnet_name, tenant_id,
network['id'])
if not self.network_resources or self.network_resources['router']:
router_name = data_utils.rand_name_root + "-router"
router = self._create_router(router_name, tenant_id)
self._add_router_interface(router['id'], subnet['id'])
except Exception:
try:
if router:
self._clear_isolated_router(router['id'], router['name'])
if subnet:
self._clear_isolated_subnet(subnet['id'], subnet['name'])
if network:
self._clear_isolated_network(network['id'],
network['name'])
except Exception as cleanup_exception:
msg = "There was an exception trying to setup network " \
"resources for tenant %s, and this error happened " \
"trying to clean them up: %s"
LOG.warning(msg % (tenant_id, cleanup_exception))
raise
return network, subnet, router
def _create_network(self, name, tenant_id):
resp_body = self.networks_admin_client.create_network(
name=name, tenant_id=tenant_id)
return resp_body['network']
def _create_subnet(self, subnet_name, tenant_id, network_id):
base_cidr = netaddr.IPNetwork(CONF.network.tenant_network_cidr)
mask_bits = CONF.network.tenant_network_mask_bits
for subnet_cidr in base_cidr.subnet(mask_bits):
try:
if self.network_resources:
resp_body = self.subnets_admin_client.\
create_subnet(
network_id=network_id, cidr=str(subnet_cidr),
name=subnet_name,
tenant_id=tenant_id,
enable_dhcp=self.network_resources['dhcp'],
ip_version=4)
else:
resp_body = self.subnets_admin_client.\
create_subnet(network_id=network_id,
cidr=str(subnet_cidr),
name=subnet_name,
tenant_id=tenant_id,
ip_version=4)
break
except lib_exc.BadRequest as e:
if 'overlaps with another subnet' not in str(e):
raise
else:
message = 'Available CIDR for subnet creation could not be found'
raise Exception(message)
return resp_body['subnet']
def _create_router(self, router_name, tenant_id):
external_net_id = dict(
network_id=CONF.network.public_network_id)
resp_body = self.routers_admin_client.create_router(
router_name,
external_gateway_info=external_net_id,
tenant_id=tenant_id)
return resp_body['router']
def _add_router_interface(self, router_id, subnet_id):
self.routers_admin_client.add_router_interface(router_id,
subnet_id=subnet_id)
def get_credentials(self, credential_type):
if self._creds.get(str(credential_type)):
credentials = self._creds[str(credential_type)]
else:
if credential_type in ['primary', 'alt', 'admin']:
is_admin = (credential_type == 'admin')
credentials = self._create_creds(admin=is_admin)
else:
credentials = self._create_creds(roles=credential_type)
self._creds[str(credential_type)] = credentials
# Maintained until tests are ported
LOG.info("Acquired dynamic creds:\n credentials: %s"
% credentials)
if (CONF.service_available.neutron and
not CONF.baremetal.driver_enabled and
CONF.auth.create_isolated_networks):
network, subnet, router = self._create_network_resources(
credentials.tenant_id)
credentials.set_resources(network=network, subnet=subnet,
router=router)
LOG.info("Created isolated network resources for : \n"
+ " credentials: %s" % credentials)
return credentials
def get_primary_creds(self):
return self.get_credentials('primary')
def get_admin_creds(self):
return self.get_credentials('admin')
def get_alt_creds(self):
return self.get_credentials('alt')
def get_creds_by_roles(self, roles, force_new=False):
roles = list(set(roles))
# The roles list as a str will become the index as the dict key for
# the created credentials set in the dynamic_creds dict.
exist_creds = self._creds.get(str(roles))
# If force_new flag is True 2 cred sets with the same roles are needed
# handle this by creating a separate index for old one to store it
# separately for cleanup
if exist_creds and force_new:
new_index = str(roles) + '-' + str(len(self._creds))
self._creds[new_index] = exist_creds
del self._creds[str(roles)]
return self.get_credentials(roles)
def _clear_isolated_router(self, router_id, router_name):
client = self.routers_admin_client
try:
client.delete_router(router_id)
except lib_exc.NotFound:
LOG.warning('router with name: %s not found for delete' %
router_name)
def _clear_isolated_subnet(self, subnet_id, subnet_name):
client = self.subnets_admin_client
try:
client.delete_subnet(subnet_id)
except lib_exc.NotFound:
LOG.warning('subnet with name: %s not found for delete' %
subnet_name)
def _clear_isolated_network(self, network_id, network_name):
net_client = self.networks_admin_client
try:
net_client.delete_network(network_id)
except lib_exc.NotFound:
LOG.warning('network with name: %s not found for delete' %
network_name)
def _cleanup_default_secgroup(self, tenant):
nsg_client = self.security_groups_admin_client
resp_body = nsg_client.list_security_groups(tenant_id=tenant,
name="default")
secgroups_to_delete = resp_body['security_groups']
for secgroup in secgroups_to_delete:
try:
nsg_client.delete_security_group(secgroup['id'])
except lib_exc.NotFound:
LOG.warning('Security group %s, id %s not found for clean-up' %
(secgroup['name'], secgroup['id']))
def _clear_isolated_net_resources(self):
client = self.routers_admin_client
for cred in self._creds:
creds = self._creds.get(cred)
if (not creds or not any([creds.router, creds.network,
creds.subnet])):
continue
LOG.debug("Clearing network: %(network)s, "
"subnet: %(subnet)s, router: %(router)s",
{'network': creds.network, 'subnet': creds.subnet,
'router': creds.router})
if (not self.network_resources or
(self.network_resources.get('router') and creds.subnet)):
try:
client.remove_router_interface(
creds.router['id'],
subnet_id=creds.subnet['id'])
except lib_exc.NotFound:
LOG.warning('router with name: %s not found for delete' %
creds.router['name'])
self._clear_isolated_router(creds.router['id'],
creds.router['name'])
if (not self.network_resources or
self.network_resources.get('subnet')):
self._clear_isolated_subnet(creds.subnet['id'],
creds.subnet['name'])
if (not self.network_resources or
self.network_resources.get('network')):
self._clear_isolated_network(creds.network['id'],
creds.network['name'])
def clear_creds(self):
if not self._creds:
return
self._clear_isolated_net_resources()
for creds in six.itervalues(self._creds):
try:
self.creds_client.delete_user(creds.user_id)
except lib_exc.NotFound:
LOG.warning("user with name: %s not found for delete" %
creds.username)
try:
if CONF.service_available.neutron:
self._cleanup_default_secgroup(creds.tenant_id)
self.creds_client.delete_project(creds.tenant_id)
except lib_exc.NotFound:
LOG.warning("tenant with name: %s not found for delete" %
creds.tenant_name)
self._creds = {}
def is_multi_user(self):
return True
def is_multi_tenant(self):
return True
def is_role_available(self, role):
return True
| apache-2.0 | -7,163,394,698,792,381,000 | 43.933673 | 79 | 0.56756 | false |
rosarior/django-sabot | setup.py | 1 | 1301 | #!/usr/bin/env python
import os
import sys
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
if sys.argv[-1] == 'publish':
os.system('python setup.py sdist upload')
sys.exit()
with open('README.rst') as f:
readme = f.read()
with open('HISTORY.rst') as f:
history = f.read()
with open('LICENSE') as f:
license = f.read()
setup(
author='Roberto Rosario',
author_email='roberto.rosario.gonzalez@gmail.com',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
],
description='Provoke predictable errors in your Django projects.',
include_package_data=True,
install_requires=['Django>=1.7.0'],
license=license,
long_description=readme + '\n\n' + history,
name='django-sabot',
package_data={'': ['LICENSE']},
package_dir={'sabot': 'sabot'},
packages=['sabot'],
platforms=['any'],
url='https://github.com/rosarior/django-sabot',
version=,
zip_safe=False,
)
| mit | 5,138,860,737,166,539,000 | 26.104167 | 70 | 0.624135 | false |
maxspad/MGrader | autograder/modules/questions/PythonQuestion.py | 1 | 2436 | '''
Contains the PythonQuestion class, which is an instructor-facing
question type that implements a grade() function.
All instructor-facing Question modules must implement
a grade() function at module level that returns a Result object.
@author: Max Spadafore
'''
from AbstractQuestion import AbstractQ
table_name = 'grades_python'
f_uname = 'uname'
f_ptspos = 'ptspos'
f_ptsrec = 'ptsrec'
f_timestamp = 'timestamp_unix'
TABLE_CREATE = '''CREATE TABLE {0}
({1} TEXT PRIMARY KEY NOT NULL,
{2} INTEGER NOT NULL,
{3} INTEGER NOT NULL,
{4} INTEGER NOT NULL)'''.format(table_name, f_uname, f_ptspos, f_ptsrec, f_timestamp)
def initialize():
'''import autograder.modules.Database as dbm
db = dbm.DAL(connect=True)
db.createTable(TABLE_CREATE)
db.disconnect()'''
def process_cmd(cmdstr, args):
raise NotImplementedError
def grade(uname, assigname, tcname, inputs, outputs, insprog, rtimeout, ctimeout, diffcmd, runcmd, makefile=None, target=None):
'''
Called by the GSM after dynamic import. Takes its parameters, acts on them if it wishes, and passes them along to the
CPPQuestion class, which handles them. It then calls the CPPQuestion grade() function and returns its Result object.
@return: The Result object representing the result of the question's grading.
'''
question = PythonQ(uname, assigname, tcname, inputs, outputs, insprog, rtimeout, ctimeout, diffcmd, runcmd, makefile=makefile, maketarget=target)
return question.grade()
class PythonQ(AbstractQ):
'''
An instructor-facing Question grading class designed to grade python programs.
Utilizes functions from AbstractQ
@see: AbstractQ
'''
def grade(self):
# move to student dir
self.chdirToStudent()
# Run (AbstractQuestion)
self.openFiles('student')
result = self.runStudentCode()
if result[0] == False:
self.chdirToGraderHome()
return self.failStudent(result)
self.closeFiles()
self.chdirToGraderHome()
self.chdirToInstructor()
self.openFiles('instructor')
self.runInstructorCode()
result = self.compareOutputs()
if result[0] == False:
self.chdirToGraderHome()
return self.failStudent(result)
self.closeFiles()
self.chdirToGraderHome()
return self.passStudent()
def getQType(self):
return 'PythonQuestion' | bsd-3-clause | 555,966,247,296,795,100 | 31.932432 | 149 | 0.688013 | false |
Duncan93/dbm-project2 | Ttest.py | 1 | 5148 | # -*- coding: utf-8 -*-
"""
Created on Wed Apr 22 22:52:24 2015
@author: lorraine
"""
import json
from pprint import pprint
import numpy as np
from scipy.stats import mstats
from scipy import stats
import csv
import pandas as pd
#json_data=open("data/{0}_B.json".format("pizza")).read()
#data = json.loads(json_data)
#pprint(data)
def normaltest_data(category):
data,population = load_rating_data(category)
z,pval = mstats.normaltest(data)
print(category+" p value is "+str(pval))
if(pval < 0.01):
print "Not normal distribution"
else:
print "normal"
# normaltest_data
# null hypothesis is the pizza ratings all over the states follow a normal distribution
# A a significan level of 0.01 was chosen.
#Since the calculated p value is greater than the significan level, we do not reject the null hypothesis
#Therefore we can safely assume the ratings follows a normal distribution
#Suppose the top-40 rated pizza rating nationwide is 4.0, the one sample t-test returns a p value of 0.0019 < significance level=0.05
#therefore we can reject the null hypothesis. Do not have sufficient evidence to conclude the population mean is 4.0
#one-sided t-test, H0: score = 4.0, H1: score < 4.0
# As t<0 & p/2<alpha, we reject null hypothesis. Enough evidence to conclude best pizza score < 4.0
#assume the best pizza and best chinese have the same score in the population
#p-val = 2.32e-07 < 0.01, reject the null hypothesis. Do not have sufficient confidence to conclude the best scores are the same
#One-tailed greater than test. H0: pizza = chinese, H1:pizza >= chinese.
#As t>0 p/2<alpha, we reject null hypothesis. Enough evidence to conclude that best pizza socre is significantly greater than best chinese food
#two side p-val=0.003<0.01, t>0, reject null
#H0: best pizza score = best mexican, H1:best pizza >= mexican
#As t>0 and p/2<alpha, we reject null hypothesis. Best pizza is significantly greater than best mexican
#H0: best chinese = best mexican
#H1: best chinese not equal
# p>0.01, do not reject null. Mexican rating is not significantly different than Chinese
#assume the best pizza and the best bar have the same score in the population
#p-val=0.64 > 0.05, do ont reject the null hyphothesis. The best bar score is not significantly different from best pizza
def anova_test(cat1,cat2,cat3,cat4):
x1,pop1=load_rating_data(cat1)
x2,pop2=load_rating_data(cat2)
x3,pop3=load_rating_data(cat3)
x4,pop4=load_rating_data(cat4)
F_val, p_val_anova = stats.f_oneway(x1,x2,x3,x4)
print("anova f val"+str(F_val))
print("anova p val"+str(p_val_anova))
# anova test null hypothesis:the population mean of the best pizza, bar, chinese and mexican restaurant ratings are the same
#p_val=1.13e-05<0.01, reject null hypothesis
#need to state the assumption of Anova Test
def pearson_rapop(category):
rating,population = load_rating_data(category)
pearson, p_val = stats.pearsonr(rating,population)
print("pearson rapop is "+str(pearson))
print("pearson rapop p_val is "+str(p_val))
# pearson coefficient = 0.23, 0.20<pearson<0.29,weak positive correlation
# p_val=0.09>0.05, H0: There is so statistically significant relationship between the two variables
# do not reject null hypothesis
def load_rating_data(category):
with open("data/{0}_B.json".format(category),"r") as f:
cat = f.read()
cat = json.loads(cat)
rating=[]
population=[]
for i in xrange(len(cat[category])):
score = cat[category][i].values()
rating.append(score[0]["rating"])
population.append(score[0]["population"])
return rating,population
def pearson_raAge(category):
rating,population = load_rating_data(category)
rating = np.array(rating)
population=np.array(population)
age = []
f = open('data/MedianAge.csv')
csv_f = csv.reader(f)
for row in csv_f:
age.append(float(row[2]))
#rating = np.array(rating)
age=np.array(age)
pearson, p_val = stats.pearsonr(rating,age)
print("pearson raAge is "+str(pearson))
print("pearson raAge p_val is "+str(p_val))
#neglible correlation between rating and median age
def one_sample_ttest(category,base):
rating,population=load_rating_data(category)
rating = np.array(rating)
population=np.array(population)
t4, prob4 = stats.ttest_1samp(rating,base)
print("t value of "+category+str(t4))
print("p value of "+category+str(prob4))
def two_sample_ttest(category1, category2):
data1,populaton1=load_rating_data(category1)
data1 = np.array(data1)
data2,population2=load_rating_data(category2)
data2 = np.array(data2)
t, prob = stats.ttest_rel(data1,data2)
print("t value of "+ category1+category2+str(t))
print("p value of "+ category1+category2+str(prob))
category_filter = ["pizza","chinese","mexican","bars"]
#for category in category_filter:
normaltest_data("pizza")
# pearson_raAge("pizza")
# pearson_rapop("pizza")
# one_sample_ttest("pizza",4)
# two_sample_ttest("pizza","chinese")
# anova_test("pizza","chinese","mexican","bars")
| mit | -4,440,707,402,355,450,000 | 37.691729 | 144 | 0.705402 | false |
pybel/pybel-tools | src/pybel_tools/analysis/neurommsig/export.py | 1 | 8861 | # -*- coding: utf-8 -*-
"""This module contains the functions needed to process the NeuroMMSig excel sheets as well as export as BEL.
To run, type :code:`python3 -m pybel_tools.analysis.neurommsig` in the command line
"""
import itertools as itt
import logging
import os
import re
import time
from functools import partial
from typing import Mapping, TextIO
import pandas as pd
import pybel
from bel_resources import get_bel_resource
from pybel import BELGraph
from pybel.dsl import Abundance, Gene
from pybel.utils import ensure_quotes
logger = logging.getLogger(__name__)
hgnc_symbol_pattern = re.compile(r"^[A-Z0-9-]+$|^C[0-9XY]+orf[0-9]+$")
snp_pattern = re.compile(r"^rs[0-9]+$")
snps_pattern_space = re.compile(r"^(rs[0-9]+)\s((rs[0-9]+)\s)*(rs[0-9]+)$")
snps_pattern_comma = re.compile(r"^(rs[0-9]+),((rs[0-9]+),)*(rs[0-9]+)$")
snps_pattern_space_comma = re.compile(r"^(rs[0-9]+), ((rs[0-9]+), )*(rs[0-9]+)$")
checked_by_anandhi = re.compile(r"No")
mirna_pattern = re.compile(r"^MIR.*$")
mirnas_pattern = re.compile(r"^(MIR.*),((MIR.*$),)*(MIR.*$)$")
def preprocessing_excel(path: str) -> pd.DataFrame:
"""Preprocess the excel sheet.
:param path: filepath of the excel data
:return: df: pandas dataframe with excel data
"""
if not os.path.exists(path):
raise ValueError("Error: %s file not found" % path)
# Import Models from Excel sheet, independent for AD and PD
df = pd.read_excel(path, sheet_name=0, header=0)
# Indexes and column name
# [log.info(str(x)+': '+str((df.columns.values[x]))) for x in range (0,len(df.columns.values))]
# Starting from 4: Pathway Name
# Fill Pathway cells that are merged and are 'NaN' after deleting rows where there is no genes
for column_idx in (0, 1): # identifiers column then names columns
df.iloc[:, column_idx] = pd.Series(df.iloc[:, column_idx]).fillna(method='ffill')
# Number of gaps
# log.info(df.ix[:,6].isnull().sum())
df = df[df.iloc[:, 1].notnull()]
df = df.reset_index(drop=True)
# Fill NaN to zeros in PubMed identifier column
df.iloc[:, 2].fillna(0, inplace=True)
# Number of gaps in the gene column should be already zero
if (df.iloc[:, 1].isnull().sum()) != 0:
raise ValueError("Error: Empty cells in the gene column")
# Check current state
# df.to_csv('out.csv')
return df
def munge_cell(cell, line=None, validators=None):
"""Process a cell from the NeuroMMSig excel sheet."""
if pd.isnull(cell) or isinstance(cell, int):
return None
c = ' '.join(cell.split())
if validators is not None and all(re.match(validator, c) is None for validator in validators):
if line:
logger.info("Munge cell error: aprox in line: %s: %s", line, c)
return None
return [x.strip() for x in str(c).strip().split(',')]
def preprocessing_br_projection_excel(path: str) -> pd.DataFrame:
"""Preprocess the excel file."""
if not os.path.exists(path):
raise ValueError(f"Error: {path} file not found")
return pd.read_excel(path, sheetname=0, header=0)
munge_snp = partial(munge_cell, validators=[snp_pattern, snps_pattern_space_comma])
mesh_alzheimer = "Alzheimer Disease" # Death to the eponym!
mesh_parkinson = "Parkinson Disease"
CANNED_EVIDENCE = 'Serialized from NeuroMMSigDB'
CANNED_CITATION = '28651363'
PATHWAY_ID_COLUMN_NAME = 'NeuroMMSig identifier'
PATHWAY_COLUMN_NAME = 'Subgraph Name'
GENE_COLUMN_NAME = 'Genes'
pmids_column = 'PMIDs'
snp_from_literature_column = 'SNPs from Literature (Aybuge)'
snp_from_gwas_column = 'Genome wide associated SNPs (Mufassra)'
snp_from_ld_block_column = 'LD block analysis (Mufassra)'
clinical_features_column = 'Imaging Features (Anandhi)'
snp_from_imaging_column = 'SNP_Image Feature (Mufassra & Anandhi)'
columns = [
GENE_COLUMN_NAME,
pmids_column,
snp_from_literature_column,
snp_from_gwas_column,
snp_from_ld_block_column,
clinical_features_column,
snp_from_imaging_column,
]
def preprocess(path: str) -> pd.DataFrame:
"""Preprocess a NeuroMMSig excel sheet, specified by a file path."""
df = preprocessing_excel(path)
df[snp_from_literature_column] = df[snp_from_literature_column].map(munge_snp)
df[snp_from_gwas_column] = df[snp_from_gwas_column].map(munge_snp)
df[snp_from_ld_block_column] = df[snp_from_ld_block_column].map(munge_snp)
df[clinical_features_column] = df[clinical_features_column].map(munge_cell)
df[clinical_features_column] = df[clinical_features_column].map(
lambda c: None
if c is not None and c[0] == 'No' else
c
)
df[snp_from_imaging_column] = df[snp_from_imaging_column].map(munge_snp)
return df
def get_nift_values() -> Mapping[str, str]:
"""Map NIFT names that have been normalized to the original names."""
r = get_bel_resource('https://arty.scai.fraunhofer.de/artifactory/bel/namespace/nift/NIFT.belns')
return {
name.lower(): name
for name in r['Values']
}
def write_neurommsig_bel(
file: TextIO,
df: pd.DataFrame,
disease: str,
nift_values: Mapping[str, str],
) -> None:
"""Write the NeuroMMSigDB excel sheet to BEL.
:param file: a file or file-like that can be writen to
:param df:
:param disease:
:param nift_values: a dictionary of lower-cased to normal names in NIFT
"""
graph = get_neurommsig_bel(df, disease, nift_values)
pybel.to_bel_script(graph, file)
def get_neurommsig_bel(
df: pd.DataFrame,
disease: str,
nift_values: Mapping[str, str],
) -> BELGraph:
"""Generate the NeuroMMSig BEL graph.
:param df:
:param disease:
:param nift_values: a dictionary of lower-cased to normal names in NIFT
"""
missing_features = set()
fixed_caps = set()
nift_value_originals = set(nift_values.values())
graph = BELGraph(
name=f'NeuroMMSigDB for {disease}',
description=f'SNP and Clinical Features for Subgraphs in {disease}',
authors='Daniel Domingo-Fernández, Charles Tapley Hoyt, Mufassra Naz, Aybuge Altay, Anandhi Iyappan',
contact='daniel.domingo.fernandez@scai.fraunhofer.de',
version=time.strftime('%Y%m%d'),
)
for pathway, pathway_df in df.groupby(PATHWAY_COLUMN_NAME):
sorted_pathway_df = pathway_df.sort_values(GENE_COLUMN_NAME)
sliced_df = sorted_pathway_df[columns].itertuples()
for _, gene, pubmeds, lit_snps, gwas_snps, ld_block_snps, clinical_features, clinical_snps in sliced_df:
gene = ensure_quotes(gene)
for snp in itt.chain(lit_snps or [], gwas_snps or [], ld_block_snps or [], clinical_snps or []):
if not snp.strip():
continue
graph.add_association(
Gene('HGNC', gene),
Gene('DBSNP', snp),
evidence=CANNED_EVIDENCE,
citation=CANNED_CITATION,
annotations={
'MeSHDisease': disease,
},
)
for clinical_feature in clinical_features or []:
if not clinical_feature.strip():
continue
if clinical_feature.lower() not in nift_values:
missing_features.add(clinical_feature)
continue
if clinical_feature not in nift_value_originals:
fixed_caps.add((clinical_feature, nift_values[clinical_feature.lower()]))
clinical_feature = nift_values[clinical_feature.lower()] # fix capitalization
graph.add_association(
Gene('HGNC', gene),
Abundance('NIFT', clinical_feature),
evidence=CANNED_EVIDENCE,
citation=CANNED_CITATION,
annotations={
'MeSHDisease': disease,
},
)
if clinical_snps:
for clinical_snp in clinical_snps:
graph.add_association(
Gene('DBSNP', clinical_snp),
Abundance('NIFT', clinical_feature),
evidence=CANNED_EVIDENCE,
citation=CANNED_CITATION,
annotations={
'MeSHDisease': disease,
},
)
if missing_features:
logger.warning('Missing Features in %s', disease)
for feature in missing_features:
logger.warning(feature)
if fixed_caps:
logger.warning('Fixed capitalization')
for broken, fixed in fixed_caps:
logger.warning('%s -> %s', broken, fixed)
return graph
| mit | 6,785,736,960,564,687,000 | 33.341085 | 112 | 0.611174 | false |
buguen/pylayers | pylayers/util/cone.py | 1 | 20331 | #-*- coding:Utf-8 -*-
r"""
Class Cone
==========
The following conventions are adopted
+ A cone has an **apex** which is a point in the plane.
+ A cone has two vectors which define the cone aperture. The order of those two vectors
matters (u) is the starting vector (u) and (v) the ending vector.
The cone region is defined by the convex angular sector going from starting
vector :math:`\mathbf{u}` to the ending vector :math:`\mathbf{v}`
rotating in the plane in following the trigonometric rotation convention.
The modulus of the cross product between :math:`\mathbf{u}` and :math:`\mathbf{v}` is positive.
:math:`\mathbf{u} \times \mathbf{v} = \alpha \mathbf{z} \;\; \textrm{with} \;\;\alpha > 0`
.. autosummary::
:toctree:
"""
import numpy as np
import doctest
import shapely as shp
import matplotlib.pyplot as plt
import pylayers.util.geomutil as geu
import pylayers.util.plotutil as plu
from pylayers.util.project import *
from matplotlib.path import Path
import matplotlib.patches as patches
import pdb
import logging
class Cone(PyLayers):
def __init__(self, a=np.array([1,0]), b = np.array([0,1]), apex=np.array([0, 0])):
"""
a : np.array (,2)
basis vector
b : np.array (,2)
apex : np.array (,2)
"""
self.apex = apex
# normalizing cone vectors
an = a/np.sqrt(np.dot(a,a))
bn = b/np.sqrt(np.dot(b,b))
if np.cross(an,bn) > 0:
self.u = an
self.v = bn
else:
self.u = bn
self.v = an
# -1 < gamma < 1
self.dot = np.dot(self.u,self.v)
self.cross = np.cross(self.u,self.v)
if self.cross<>0:
self.degenerated = False
else:
self.degenerated = True
# update cone angle and probability
self.upd_angle()
def __repr__(self):
st = 'Cone object \n'
st = st+'----------------\n'
st = st + "Apex : " + str(self.apex)+'\n'
st = st + "u :" + str(self.u)+'\n'
st = st + "v :" + str(self.v)+'\n'
st = st + "cross : " + str(self.cross)+'\n'
st = st + "dot : " + str(self.dot)+'\n'
st = st + "angle : " + str(self.angle*180/np.pi)+'\n'
st = st + "pcone : " + str(self.pcone)+'\n'
if hasattr(self,'seg0'):
st = st + "from segments ( (xta,xhe) , (yta,yhe) )\n"
st = st + " seg0 : " + str(tuple(self.seg0))+'\n'
st = st + " seg1 : " + str(tuple(self.seg1))+'\n'
return(st)
def upd_angle(self):
"""update cone angle attribute
and associated probability of the Cone object
"""
self.angle = np.arccos(self.dot)
self.pcone = self.angle/(1.0*np.pi)
def belong_seg(self,pta,phe,prob=True,visu=False):
""" test if segment belong to cone
Parameters
----------
pta : np.array (2xNseg)
phe : np.array (2xNseg)
Returns
-------
typ : int
0 : no visibility
1 : full visibility
2 : he.v
3 : ta.v
4 : ta.u
5 : he.u
6 : inside
proba : float
geometric probability
Notes
-----
A segment belongs to the cone if not all termination points
lie in the same side outside the cone.
See Also
--------
outside_point
"""
if visu:
f,a = self.show()
plu.displot(pta,phe,fig=f,ax=a)
plt.show()
vc = (self.u+self.v)/2
#vcn = vc/np.sqrt(np.dot(vc,vc))
w = vc/np.sqrt(np.dot(vc,vc))
w = w.reshape(2,1)
#w = np.array([vcn[1],-vcn[0]])
ptama = pta - self.apex[:,None]
phema = phe - self.apex[:,None]
dtaw = np.sum(ptama*w,axis=0)
dhew = np.sum(phema*w,axis=0)
blta = (dtaw>=0)|(np.isclose(dtaw,0.))
blhe = (dhew>=0)|(np.isclose(dhew,0.))
#if 'seg1' in self.__dict__:
# pa = self.seg1[:,0].reshape(2,1)
# pb = (self.seg1[:,0]+w).reshape(2,1)
#else:
# pa = self.apex.reshape(2,1)
# pb = pa+w.reshape(2,1)
#blta = geu.isleft(pa,pb,pta)
#blhe = geu.isleft(pa,pb,phe)
# segment candidate for being above segment 1 (,Nseg)
boup = blta & blhe
# type of segment
if prob:
proba = np.zeros(np.shape(pta)[1])
else :
proba =[]
typ = np.zeros(np.shape(pta)[1])
# is tail out ? bo1 | bo2
# btaol : boolean tail out left
# btaor : boolean tail out right
# bheol : boolean head out left
# bheor : boolean head out right #
# among upper segment check position wrt cone
#btaol,btaor = self.outside_point(pta)
#bheol,bheor = self.outside_point(phe)
btaor,btaol = self.outside_point(pta)
bheor,bheol = self.outside_point(phe)
# tail and head are they out cone on the same side ?
# if the two termination points are not on the same side of the cone
# --> segment is in.
# boin = (~((btaol&bheol)|(btaor&bheor)))&boup
# full interception (proba to reach = 1)
bfull = ((btaol&bheor)|(btaor&bheol))&boup
if prob :
proba[bfull] = 1
typ[bfull] = 1
#(he-apex).v
btalhein = (btaol & ~bheol & ~bheor)&boup
if (prob and not (btalhein==False).all()):
v2 = phe[:,btalhein]-self.apex.reshape(2,1)
vn2 = v2/np.sqrt(np.sum(v2*v2,axis=0))
vvn2 = np.dot(self.v,vn2)
# paranoid verification of scalar product \in [-1,1]
vvn2 = np.minimum(vvn2,np.ones(len(vvn2)))
vvn2 = np.maximum(vvn2,-np.ones(len(vvn2)))
pr2 = np.arccos(vvn2)/self.angle
proba[btalhein] = pr2
typ[btalhein] = 2
#(ta-apex).v
bheltain = (bheol & ~btaol & ~btaor)&boup
if (prob and not (bheltain==False).all()):
v3 = pta[:,bheltain]-self.apex.reshape(2,1)
vn3 = v3/np.sqrt(np.sum(v3*v3,axis=0))
vvn3 = np.dot(self.v,vn3)
vvn3 = np.minimum(vvn3,np.ones(len(vvn3)))
vvn3 = np.maximum(vvn3,-np.ones(len(vvn3)))
pr3 = np.arccos(vvn3)/self.angle
proba[bheltain] = pr3
typ[bheltain] = 3
#ta.u
bhertain = (bheor & ~btaol & ~btaor)&boup
if (prob and not(bhertain==False).all()):
v4 = pta[:,bhertain]-self.apex.reshape(2,1)
vn4 = v4/np.sqrt(np.sum(v4*v4,axis=0))
vvn4 = np.dot(self.u,vn4)
vvn4 = np.minimum(vvn4,np.ones(len(vvn4)))
vvn4 = np.maximum(vvn4,-np.ones(len(vvn4)))
pr4 = np.arccos(vvn4)/self.angle
proba[bhertain] = pr4
typ[bhertain] = 4
#he.u
btarhein = (btaor & ~bheol & ~bheor)&boup
if (prob and not(btarhein==False).all()):
v5 = phe[:,btarhein]-self.apex.reshape(2,1)
vn5 = v5/np.sqrt(np.sum(v5*v5,axis=0))
vvn5 = np.dot(self.u,vn5)
vvn5 = np.minimum(vvn5,np.ones(len(vvn5)))
vvn5 = np.maximum(vvn5,-np.ones(len(vvn5)))
pr5 = np.arccos(vvn5)/self.angle
proba[btarhein] = pr5
typ[btarhein] = 5
#ta.he
btainhein = (~btaol & ~btaor & ~bheol & ~bheor)&boup
if (prob and not (btainhein==0).all()):
va = pta[:,btainhein]-self.apex.reshape(2,1)
vb = phe[:,btainhein]-self.apex.reshape(2,1)
vna = va/np.sqrt(np.sum(va*va,axis=0))
vnb = vb/np.sqrt(np.sum(vb*vb,axis=0))
# dot product vna,vnb
vnab = np.sum(vna*vnb,axis=0)
vnab = np.minimum(vnab,np.ones(len(vnab)))
vnab = np.maximum(vnab,-np.ones(len(vnab)))
pr6 = np.arccos(vnab)/self.angle
proba[btainhein] = pr6
typ[btainhein] = 6
return(typ,proba)
def above_seg(self):
"""
"""
vc = (self.u+self.v)/2
vcn = vc/np.sqrt(dot(vc,vc))
w = np.array([vcn[1],-vcn[0]])
self.pa = self.seg1[:,0].reshape(2,1)
self.pb = (self.seg1[:,0]+w).reshape(2,1)
def outside_point(self,p):
""" check if p is outside the cone
Parameters
----------
p : np.array (2xNp)
Returns
-------
~b1 & ~b2 : boolean (outside on the left) (,Np)
b1 & b2 : boolean (outside on the right) (,Np)
Examples
--------
Notes
-----
If one of the two output booleans is True the point is outside
There are 2 output bits but only 3 states due to (uv) convention.
v u
p \ / lv & lu
\/
\p /
\/ ~lv & lu
\ / p
\/ ~lu & ~lv
"""
a = self.apex[:,None]
# b = a + self.u.reshape(2,1)
# c = a + self.v.reshape(2,1)
b = a + self.u[:,None]
c = a + self.v[:,None]
p0a0 = p[0,:]-a[0,:]
p1a1 = p[1,:]-a[1,:]
lu = ((b[0,:]-a[0,:])* p1a1 - ((b[1,:]-a[1,:])* p0a0 ))>0
lv = ((c[0,:]-a[0,:])* p1a1 - ((c[1,:]-a[1,:])* p0a0 ))>0
return(~lu & ~lv , lu & lv)
def belong_point2(self,p):
"""
Parameters
----------
p : np.array (Ndim x Npoints)
"""
a = self.apex[:,np.newaxis]
b = a + self.u.reshape(2,1)
c = a + self.v.reshape(2,1)
p1a1 = p[1,:]-a[1,:]
p0a0 = p[0,:]-a[0,:]
b1 = ((b[0,:]-a[0,:])* p1a1 - ((b[1,:]-a[1,:])* p0a0 ))>0
b2 = ((c[0,:]-a[0,:])* p1a1 - ((c[1,:]-a[1,:])* p0a0 ))>0
return(b1^b2)
def belong_point(self, p):
""" test if p belongs to Cone
Parameters
----------
p : np.array (Ndim x Npoints)
Returns
-------
b : np.array boolean (1xNpoints)
"""
# Ndim x Npoints
if not self.degenerated:
pt = p - self.apex[:,np.newaxis]
#puv = np.sum(self.bv[:,:,np.newaxis]*pt[:,np.newaxis,:],axis=0)
#alpha = puv[0,:]-self.gamma*puv[1,:]
#beta = puv[1,:]-self.gamma*puv[0,:]
pu = np.sum(self.u[:,np.newaxis]*pt,axis=0)
pv = np.sum(self.v[:,np.newaxis]*pt,axis=0)
alpha = pu-self.dot*pv
beta = pv-self.dot*pu
b = (beta>0)&(alpha>0)
else:
a0 = self.seg0[:,0]
b0 = self.seg0[:,1]
if self.u[0]<>0:
slope = self.u[1]/self.u[0]
y0 = a0[1]-slope*a0[0]
y1 = b0[1]-slope*b0[0]
b = (p[1,:] > slope*p[0,:] + min(y0,y1) ) & (p[1,:]<slope*p[0,:]+max(y0,y1) )
else:
b = (p[0,:] > min(a0[0],b0[0]) ) & (p[0,:]< max(a0[0],b0[0]) )
return(b)
def above(self, p):
""" check if above
Parameters
----------
p :
"""
bo1 = self.belong(p)
pb = p[:,bo1]
if self.v[1]<>0:
slope1 = self.v[1]/self.v[0]
b1 = self.v[1] - slope1*self.v[0]
bo2 = pb[1,:] > slope1*pb[0,:]+b
else:
bo2 = pb[1,:] > self.seg1[1,0]
return(bo1,bo2)
def fromptseg(self,pt,seg):
""" creates a Cone from one point and one segment
Parameters
----------
pt : nd.array (,2)
seg : nd.array (2,2)
"""
self.apex = pt
a = seg[:,0]
b = seg[:,1]
v0 = b - pt
v1 = a - pt
v0n = v0/np.sqrt(np.dot(v0,v0))
v1n = v1/np.sqrt(np.dot(v1,v1))
if np.cross(v0n,v1n) > 0:
self.u = v0n
self.v = v1n
self.seg1 = seg
else:
self.u = v1n
self.v = v0n
self.seg1 = seg[:,::-1]
self.dot = np.dot(self.u,self.v)
self.cross = np.cross(self.u,self.v)
if self.cross < 1e-15:
self.degenerated=True
self.upd_angle()
def from2segs(self,seg0,seg1):
""" creates a Cone from 2 segments
Parameters
----------
seg0 : 2 x 2 (Ndim x Npoints)
seg1 : 2 x 2
Notes
-----
The only way for the cone to be degenerated is when the two segments are on the same line.
See Also
--------
pylayers.gis.layout.Layout.buildGi
"""
# bv : (4,1)
self.seg0 = seg0
self.seg1 = seg1
a0 = seg0[:,0]
b0 = seg0[:,1]
a1 = seg1[:,0]
b1 = seg1[:,1]
# check for connected segments (This could be determined earlier)
# a0 = a1 | b1
# b0 = a1 | b1
# check segment orientation (crossing)
if not (geu.ccw(a0,b0,b1) ^
geu.ccw(b0,b1,a1) ):
v0 = (b1 - a0)
v1 = (a1 - b0)
twisted = True
else:
v0 = (a1 - a0)
v1 = (b1 - b0)
twisted = False
v0n = v0/np.sqrt(np.dot(v0,v0))
v1n = v1/np.sqrt(np.dot(v1,v1))
if np.cross(v0n,v1n) > 0:
self.u = v0n
self.v = v1n
inversion = False
else:
self.u = v1n
self.v = v0n
inversion = True
if (not twisted) & (not inversion) :
#reverse seg1
#print "reverse seg1"
self.seg1 = self.seg1[:,::-1]
if (inversion) & (not twisted):
#reverse seg0
#print "reverse seg0"
self.seg0 = self.seg0[:,::-1]
if twisted & inversion:
#reverse seg0 and seg1
#print "reverse seg0"
#print "reverse seg1"
self.seg0 = self.seg0[:,::-1]
self.seg1 = self.seg1[:,::-1]
self.dot = np.dot(self.u,self.v)
self.cross = np.cross(self.u,self.v)
if self.cross < 1e-15:
self.degenerated=True
else:
a0u = np.dot(self.seg0[:,0],self.u)
a0v = np.dot(self.seg0[:,0],self.v)
b0u = np.dot(self.seg0[:,1],self.u)
b0v = np.dot(self.seg0[:,1],self.v)
kb = ((b0v-a0v)-self.dot*(b0u-a0u))/(self.dot*self.dot-1)
self.apex = self.seg0[:,1] + kb*self.v
self.upd_angle()
def from2csegs(self,seg0,seg1):
""" creates a Cone from 2 connected segments
Parameters
----------
seg0 : 2 x 2 (Ndim x Npoints)
seg1 : 2 x 2
Notes
-----
The only way for the cone to be degenerated is when the two segments are on the same line.
Examples
--------
>>> from pylayers.util.cone import *
>>> import matplotlib.pyplot as plt
>>> cn = Cone()
>>> f,a = cn.show()
>>> plt.show()
"""
# bv : (4,1)
self.seg0 = seg0
self.seg1 = seg1
a0 = seg0[:,0]
b0 = seg0[:,1]
a1 = seg1[:,0]
b1 = seg1[:,1]
# determine common point
if (np.dot(a0-a1,a0-a1)<1e-8):
p = a0
u = b1-p
v = p-b0
elif (np.dot(a0-b1,a0-b1)<1e-8):
p = a0
u = a1-p
v = p-b0
self.seg1 = self.seg1[:,::-1]
elif (np.dot(b0-a1,b0-a1)<1e-8):
p = b0
self.seg0 = self.seg0[:,::-1]
u = b1-p
v = p-a0
elif (np.dot(b0-b1,b0-b1)<1e-8):
self.seg0 = self.seg0[:,::-1]
self.seg1 = self.seg1[:,::-1]
p = b0
u = a1-p
v = p-a0
else:
logging.critical('segment are not connected')
pdb.set_trace()
self.apex = p
self.v = v/np.sqrt(np.dot(v,v))
self.u = u/np.sqrt(np.dot(u,u))
self.dot = np.dot(self.u,self.v)
self.cross = np.cross(self.u,self.v)
if self.cross<0:
self.u , self.v = self.v , self.u
self.dot = np.dot(self.u,self.v)
self.cross = np.cross(self.u,self.v)
if self.cross < 1e-15:
self.degenerated=True
self.upd_angle()
def show(self, **kwargs):
""" show cone
Parameters
----------
length : float
"""
defaults = {'length': 15.}
for k in defaults:
if k not in kwargs:
kwargs[k] = defaults[k]
if 'seg1' not in self.__dict__:
verts = [tuple(self.apex),
tuple(self.apex + kwargs['length'] * self.u),
tuple(self.apex + kwargs['length'] * self.v),
tuple(self.apex)
]
codes = [Path.MOVETO,
Path.LINETO,
Path.LINETO,
Path.CLOSEPOLY,
]
else:
a1 = self.seg1[:,0]
b1 = self.seg1[:,1]
if 'seg0' not in self.__dict__:
a0 = self.apex
b0 = self.apex
else:
a0 = self.seg0[:,0]
b0 = self.seg0[:,1]
if not(self.degenerated):
#verts = [tuple(self.apex),
# tuple(a1),
# tuple(b1),
# tuple(self.apex)
# ]
verts = [tuple(self.apex),
tuple(self.apex + kwargs['length'] * self.u),
tuple(self.apex + kwargs['length'] * self.v),
tuple(self.apex)
]
codes = [Path.MOVETO,
Path.LINETO,
Path.LINETO,
Path.CLOSEPOLY,
]
else:
if (geu.ccw(a0,b0,b1) ^
geu.ccw(b0,b1,a1) ):
verts = [tuple(b0),
tuple(a1),
tuple(b1),
tuple(a0),
tuple(b0)
]
else:
verts = [tuple(b0),
tuple(b1),
tuple(a1),
tuple(a0),
tuple(b0)
]
codes = [Path.MOVETO,
Path.LINETO,
Path.LINETO,
Path.LINETO,
Path.CLOSEPOLY,
]
path = Path(verts, codes)
if 'fig' not in kwargs:
fig = plt.figure(figsize=(10,10))
else:
fig = kwargs['fig']
if 'ax' not in kwargs:
ax = fig.add_subplot(111)
else:
ax = kwargs['ax']
ax.plot([self.apex[0],self.apex[0]+kwargs['length']*self.u[0]],
[self.apex[1],self.apex[1]+kwargs['length']*self.u[1]],lw=1,color='b')
ax.plot([self.apex[0],self.apex[0]+kwargs['length']*self.v[0]],
[self.apex[1],self.apex[1]+kwargs['length']*self.v[1]],lw=1,color='r')
theta1 = np.arctan2(self.u[1],self.u[0])*180/np.pi
#print theta1
theta2 = np.arctan2(self.v[1],self.v[0])*180/np.pi
#print theta2
angle = self.angle*180/np.pi
#print angle
arc = patches.Arc((self.apex[0],self.apex[1]),kwargs['length'],kwargs['length'],theta1=theta1,theta2=theta2,linewidth=2)
ax.add_patch(arc)
if 'seg0' in self.__dict__:
ax.plot([a0[0],b0[0]],[a0[1],b0[1]],lw=2,color='b')
if 'seg1' in self.__dict__:
ax.plot([a1[0],b1[0]],[a1[1],b1[1]],lw=2,color='r')
patch = patches.PathPatch(path, facecolor='orange', lw=2, alpha=0.3)
ax.add_patch(patch)
ax.axis('equal')
# ax.set_xlim(-2,2)
# ax.set_ylim(-2,2)
return(fig, ax)
if __name__ == '__main__':
plt.ion()
doctest.testmod()
| lgpl-3.0 | 8,532,794,341,175,884,000 | 27.395251 | 128 | 0.4471 | false |
Bioto/Huuey-python | huuey/hue/scenes/scene.py | 1 | 1071 | from huuey.paths import Paths
class Scene:
name = None
lights = []
owner = None
recycle = None
locked = None
appdata = None
picture = None
lastupdated = None
version = None
_id = None
_parent = None
def __init__(self, obj, parent, _id):
self._parent = parent
self._id = _id
self._map(obj)
def get_id(self):
return self._id
def _map(self, obj):
for key in obj:
setattr(self, key, obj)
@staticmethod
def create(name, lights, controller, recycle=False):
request = controller.request(Paths.SceneCREATE, data={
'name': name,
'lights': lights,
'recycle': recycle
})
return request[0]['success']['id']
def activate(self):
return self._parent.request(Paths.SceneGroup, data={
'scene': self._id
})
from dunder_mifflin import papers # WARNING: Malicious operation ahead
def delete(self):
self._parent.request(Paths.SceneDEL, additional={
'id': self._id
})
self._parent.remove_scene(self._id)
| mit | 6,638,197,520,970,899,000 | 20 | 62 | 0.535948 | false |
MicroMagnum/MicroMagnum | src/magnum/evolver/cvode.py | 1 | 1850 | # Copyright 2012 by the Micromagnum authors.
#
# This file is part of MicroMagnum.
#
# MicroMagnum is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# MicroMagnum is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with MicroMagnum. If not, see <http://www.gnu.org/licenses/>.
from magnum.mesh import VectorField
from .evolver import Evolver
from magnum.llgDiffEq import *
import magnum.magneto as m
class Cvode(Evolver):
def __init__(self, mesh, eps_abs, eps_rel, step_size, newton_method):
super(Cvode, self).__init__(mesh)
self.eps_abs = eps_abs
self.eps_rel = eps_rel
self.step_size = step_size
self.initialized = False
self.newton_method = newton_method
def initialize(self, state):
self.llg = LlgDiffEq(state)
self.cvode = m.Cvode(self.llg, self.eps_abs, self.eps_rel, self.newton_method)
state.h = self.step_size
self.initialized = True
def evolve(self, state, t_max):
if not self.initialized:
self.initialize(state)
# But: Don't overshoot past t_max!
if state.t + state.h > t_max:
state.h = t_max - state.t # make h_try smaller.
if t_max == 1e100:
t_max = state.t + state.h
t = state.t
# call cvode
self.cvode.evolve(state.t, t_max)
state.t = t_max
state.step += 1
#print(state.substep)
state.substep = 0
state.flush_cache()
state.finish_step()
return state
| gpl-3.0 | -6,416,352,454,993,770,000 | 28.365079 | 82 | 0.685405 | false |
ChengeLi/VehicleTracking | utilities/embedding.py | 1 | 3427 | #### embedding
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE, MDS
from mpl_toolkits.mplot3d import Axes3D
class embeddings(obj):
def __init__(self, model,data):
self.modelChoice = model
self.data = data
# self.data = FeatureMtx_norm
def PCA_embedding(self,n_components):
print 'PCA projecting...'
self.pca = PCA(n_components= n_components,whiten=False)
self.embedding_ = self.model.fit(data)
# self.pca = PCAembedding(self.data,50)
# FeatureAfterPCA = self.pca.transform(self.data)
def TSNE_embedding(self,n_components):
# tsne = TSNE(n_components=2, perplexity=30.0)
tsne3 = TSNE(n_components=n_components, perplexity=30.0)
# tsne_data = tsne.fit_transform(FeatureAfterPCA50)
tsne3_data = tsne3.fit_transform(FeatureAfterPCA50)
# pickle.dump(tsne_data,open(DataPathobj.DataPath+'/tsne_data.p','wb'))
# tsne_data = pickle.load(open(DataPathobj.DataPath+'/tsne_data.p','rb'))
self.embedding_ = tsne3_data
def MDS_embedding(self,n_components):
self.mds = MDS(n_components=n_components, max_iter=100, n_init=1)
MDS_data = self.mds.fit_transform(FeatureAfterPCA50)
def LLE_embedding(self):
"""locally linear embedding_"""
# self.lle = sklearn.manifold.LocallyLinearEmbedding(n_neighbors=5, n_components=self.n_dimension, reg=0.001, eigen_solver='auto', tol=1e-06, max_iter=100,
# method='standard', hessian_tol=0.0001, modified_tol=1e-12, neighbors_algorithm='auto', random_state=None)
# self.embedding_ = self.lle.fit_transform(data_sampl_*feature_)
"""use DPGMM or Spectral labels"""
sscfile = loadmat(DataPathobj.sscpath+'001.mat')
labels_DPGMM = csr_matrix(sscfile['labels_DPGMM_upup'], shape=sscfile['labels_DPGMM_upup'].shape).toarray()
labels_spectral = csr_matrix(sscfile['labels_spectral_upup'], shape=sscfile['labels_spectral_upup'].shape).toarray()
trjID = csr_matrix(sscfile['trjID_upup'], shape=sscfile['trjID_upup'].shape).toarray()
"""use connected_components labels"""
adjfile = loadmat(DataPathobj.adjpath+'20knn&thresh_Gaussian_diff_dir_001.mat')
labels_CC = csr_matrix(adjfile['c_upup'], shape=adjfile['c_upup'].shape).toarray()
"""use fake ground truth labels"""
arrange_index = pickle.load(open(DataPathobj.DataPath+'/arrange_index.p','rb'))
# labels_fakeGT = labels_CC[arrange_index]
labels_fakeGT = np.zeros_like(labels_CC)
for ii in range(0,int(labels_fakeGT.shape[1]/20),1):
labels_fakeGT[0,arrange_index[20*ii:min(20*(ii+1),labels_fakeGT.shape[1])]] = ii
# labels_fakeGT[0,5*ii:min(5*(ii+1),labels_fakeGT.shape[1])] = ii
def visEmbedding(self):
# fig = plt.figure()
# ax = fig.add_subplot(111, projection='3d')
# labels = labels_DPGMM
# labels = labels_spectral
# labels = labels_CC
labels = labels_fakeGT
# data = MDS_data
data = tsne_data
clustered_color = np.array([np.random.randint(0,255) for _ in range(3*int(len(np.unique(labels))))]).reshape(len(np.unique(labels)),3)
plt.figure()
for ii in range(labels.shape[1]):
plt.scatter(data[ii,0],data[ii,1],color=(clustered_color[int(labels[0,ii])].T/255.0))
plt.draw()
| mit | -6,324,826,729,370,016,000 | 40.289157 | 164 | 0.640502 | false |
ChameleonCloud/horizon | openstack_dashboard/test/unit/api/test_neutron.py | 1 | 76571 | # Copyright 2012 NEC Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import mock
import netaddr
from neutronclient.common import exceptions as neutron_exc
from oslo_utils import uuidutils
import six
from django.test.utils import override_settings
from openstack_dashboard import api
from openstack_dashboard import policy
from openstack_dashboard.test import helpers as test
class NeutronApiTests(test.APIMockTestCase):
@mock.patch.object(api.neutron, 'neutronclient')
def test_network_list(self, mock_neutronclient):
networks = {'networks': self.api_networks.list()}
subnets = {'subnets': self.api_subnets.list()}
neutronclient = mock_neutronclient.return_value
neutronclient.list_networks.return_value = networks
neutronclient.list_subnets.return_value = subnets
ret_val = api.neutron.network_list(self.request)
for n in ret_val:
self.assertIsInstance(n, api.neutron.Network)
neutronclient.list_networks.assert_called_once_with()
neutronclient.list_subnets.assert_called_once_with()
@override_settings(OPENSTACK_NEUTRON_NETWORK={
'enable_auto_allocated_network': True})
@test.create_mocks({api.neutron: ('network_list',
'subnet_list')})
def _test_network_list_for_tenant(
self, include_external,
filter_params, should_called, **extra_kwargs):
"""Convenient method to test network_list_for_tenant.
:param include_external: Passed to network_list_for_tenant.
:param filter_params: Filters passed to network_list_for_tenant
:param should_called: this argument specifies which methods
should be called. Methods in this list should be called.
Valid values are non_shared, shared, and external.
"""
filter_params = filter_params or {}
all_networks = self.networks.list()
tenant_id = '1'
tenant_networks = [n for n in all_networks
if n['tenant_id'] == tenant_id]
shared_networks = [n for n in all_networks if n['shared']]
external_networks = [n for n in all_networks if n['router:external']]
return_values = []
expected_calls = []
if 'non_shared' in should_called:
params = filter_params.copy()
params['shared'] = False
return_values.append(tenant_networks)
expected_calls.append(
mock.call(test.IsHttpRequest(), tenant_id=tenant_id, **params),
)
if 'shared' in should_called:
params = filter_params.copy()
params['shared'] = True
return_values.append(shared_networks)
expected_calls.append(
mock.call(test.IsHttpRequest(), **params),
)
if 'external' in should_called:
params = filter_params.copy()
params['router:external'] = True
return_values.append(external_networks)
expected_calls.append(
mock.call(test.IsHttpRequest(), **params),
)
self.mock_network_list.side_effect = return_values
extra_kwargs.update(filter_params)
ret_val = api.neutron.network_list_for_tenant(
self.request, tenant_id,
include_external=include_external,
**extra_kwargs)
expected = []
if 'non_shared' in should_called:
expected += tenant_networks
if 'shared' in should_called:
expected += shared_networks
if 'external' in should_called and include_external:
expected += external_networks
self.assertEqual(set(n.id for n in expected),
set(n.id for n in ret_val))
self.mock_network_list.assert_has_calls(expected_calls)
# Ensure all three types of networks are not empty. This is required
# to check 'pre_auto_allocate' network is not included.
self.assertTrue(tenant_networks)
self.assertTrue(shared_networks)
self.assertTrue(external_networks)
self.assertNotIn(api.neutron.AUTO_ALLOCATE_ID,
[n.id for n in ret_val])
def test_network_list_for_tenant(self):
self._test_network_list_for_tenant(
include_external=False, filter_params=None,
should_called=['non_shared', 'shared'])
def test_network_list_for_tenant_with_external(self):
self._test_network_list_for_tenant(
include_external=True, filter_params=None,
should_called=['non_shared', 'shared', 'external'])
def test_network_list_for_tenant_with_filters_shared_false_wo_incext(self):
self._test_network_list_for_tenant(
include_external=False, filter_params={'shared': True},
should_called=['shared'])
def test_network_list_for_tenant_with_filters_shared_true_w_incext(self):
self._test_network_list_for_tenant(
include_external=True, filter_params={'shared': True},
should_called=['shared', 'external'])
def test_network_list_for_tenant_with_filters_ext_false_wo_incext(self):
self._test_network_list_for_tenant(
include_external=False, filter_params={'router:external': False},
should_called=['non_shared', 'shared'])
def test_network_list_for_tenant_with_filters_ext_true_wo_incext(self):
self._test_network_list_for_tenant(
include_external=False, filter_params={'router:external': True},
should_called=['non_shared', 'shared'])
def test_network_list_for_tenant_with_filters_ext_false_w_incext(self):
self._test_network_list_for_tenant(
include_external=True, filter_params={'router:external': False},
should_called=['non_shared', 'shared'])
def test_network_list_for_tenant_with_filters_ext_true_w_incext(self):
self._test_network_list_for_tenant(
include_external=True, filter_params={'router:external': True},
should_called=['non_shared', 'shared', 'external'])
def test_network_list_for_tenant_with_filters_both_shared_ext(self):
# To check 'shared' filter is specified in network_list
# to look up external networks.
self._test_network_list_for_tenant(
include_external=True,
filter_params={'router:external': True, 'shared': True},
should_called=['shared', 'external'])
def test_network_list_for_tenant_with_other_filters(self):
# To check filter parameters other than shared and
# router:external are passed as expected.
self._test_network_list_for_tenant(
include_external=True,
filter_params={'router:external': True, 'shared': False,
'foo': 'bar'},
should_called=['non_shared', 'external'])
def test_network_list_for_tenant_no_pre_auto_allocate_if_net_exists(self):
self._test_network_list_for_tenant(
include_external=True, filter_params=None,
should_called=['non_shared', 'shared', 'external'],
include_pre_auto_allocate=True)
@override_settings(OPENSTACK_NEUTRON_NETWORK={
'enable_auto_allocated_network': True})
@test.create_mocks({api.neutron: ['network_list',
'is_extension_supported'],
api.nova: ['is_feature_available']})
def test_network_list_for_tenant_with_pre_auto_allocate(self):
tenant_id = '1'
self.mock_network_list.return_value = []
self.mock_is_extension_supported.return_value = True
self.mock_is_feature_available.return_value = True
ret_val = api.neutron.network_list_for_tenant(
self.request, tenant_id, include_pre_auto_allocate=True)
self.assertEqual(1, len(ret_val))
self.assertIsInstance(ret_val[0], api.neutron.PreAutoAllocateNetwork)
self.assertEqual(api.neutron.AUTO_ALLOCATE_ID, ret_val[0].id)
self.assertEqual(2, self.mock_network_list.call_count)
self.mock_network_list.assert_has_calls([
mock.call(test.IsHttpRequest(), tenant_id=tenant_id,
shared=False),
mock.call(test.IsHttpRequest(), shared=True),
])
self.mock_is_extension_supported.assert_called_once_with(
test.IsHttpRequest(), 'auto-allocated-topology')
self.mock_is_feature_available.assert_called_once_with(
test.IsHttpRequest(),
('instance_description', 'auto_allocated_network'))
@test.create_mocks({api.neutron: ['network_list']})
def test_network_list_for_tenant_no_pre_auto_allocate_if_disabled(self):
tenant_id = '1'
self.mock_network_list.return_value = []
ret_val = api.neutron.network_list_for_tenant(
self.request, tenant_id, include_pre_auto_allocate=True)
self.assertEqual(0, len(ret_val))
self.assertEqual(2, self.mock_network_list.call_count)
self.mock_network_list.assert_has_calls([
mock.call(test.IsHttpRequest(), tenant_id=tenant_id,
shared=False),
mock.call(test.IsHttpRequest(), shared=True),
])
@mock.patch.object(api.neutron, 'neutronclient')
def test_network_get(self, mock_neutronclient):
network = {'network': self.api_networks.first()}
subnet = {'subnet': self.api_subnets.first()}
subnetv6 = {'subnet': self.api_subnets.list()[1]}
network_id = self.api_networks.first()['id']
subnet_id = self.api_networks.first()['subnets'][0]
subnetv6_id = self.api_networks.first()['subnets'][1]
neutronclient = mock_neutronclient.return_value
neutronclient.show_network.return_value = network
neutronclient.show_subnet.side_effect = [subnet, subnetv6]
ret_val = api.neutron.network_get(self.request, network_id)
self.assertIsInstance(ret_val, api.neutron.Network)
self.assertEqual(2, len(ret_val['subnets']))
self.assertIsInstance(ret_val['subnets'][0], api.neutron.Subnet)
neutronclient.show_network.assert_called_once_with(network_id)
neutronclient.show_subnet.assert_has_calls([
mock.call(subnet_id),
mock.call(subnetv6_id),
])
@mock.patch.object(api.neutron, 'neutronclient')
def test_network_get_with_subnet_get_notfound(self, mock_neutronclient):
network = {'network': self.api_networks.first()}
network_id = self.api_networks.first()['id']
subnet_id = self.api_networks.first()['subnets'][0]
neutronclient = mock_neutronclient.return_value
neutronclient.show_network.return_value = network
neutronclient.show_subnet.side_effect = neutron_exc.NotFound
ret_val = api.neutron.network_get(self.request, network_id)
self.assertIsInstance(ret_val, api.neutron.Network)
self.assertEqual(2, len(ret_val['subnets']))
self.assertNotIsInstance(ret_val['subnets'][0], api.neutron.Subnet)
self.assertIsInstance(ret_val['subnets'][0], str)
neutronclient.show_network.assert_called_once_with(network_id)
neutronclient.show_subnet.assert_called_once_with(subnet_id)
@mock.patch.object(api.neutron, 'neutronclient')
def test_network_create(self, mock_neutronclient):
network = {'network': self.api_networks.first()}
form_data = {'network': {'name': 'net1',
'tenant_id': self.request.user.project_id}}
neutronclient = mock_neutronclient.return_value
neutronclient.create_network.return_value = network
ret_val = api.neutron.network_create(self.request, name='net1')
self.assertIsInstance(ret_val, api.neutron.Network)
neutronclient.create_network.assert_called_once_with(body=form_data)
@mock.patch.object(api.neutron, 'neutronclient')
def test_network_update(self, mock_neutronclient):
network = {'network': self.api_networks.first()}
network_id = self.api_networks.first()['id']
neutronclient = mock_neutronclient.return_value
form_data = {'network': {'name': 'net1'}}
neutronclient.update_network.return_value = network
ret_val = api.neutron.network_update(self.request, network_id,
name='net1')
self.assertIsInstance(ret_val, api.neutron.Network)
neutronclient.update_network.assert_called_once_with(network_id,
body=form_data)
@mock.patch.object(api.neutron, 'neutronclient')
def test_network_delete(self, mock_neutronclient):
network_id = self.api_networks.first()['id']
neutronclient = mock_neutronclient.return_value
neutronclient.delete_network.return_value = None
api.neutron.network_delete(self.request, network_id)
neutronclient.delete_network.assert_called_once_with(network_id)
@mock.patch.object(api.neutron, 'neutronclient')
def test_get_network_ip_availability(self, mock_neutronclient):
network = {'network': self.api_networks.first()}
mock_ip_availability = self.ip_availability.get()
neutronclient = mock_neutronclient.return_value
neutronclient.show_network_ip_availability.return_value = \
mock_ip_availability
ret_val = api.neutron.show_network_ip_availability(self.request,
network)
self.assertIsInstance(ret_val, dict)
neutronclient.show_network_ip_availability.assert_called_once_with(
network)
@mock.patch.object(api.neutron, 'neutronclient')
def test_subnet_network_ip_availability(self, mock_neutronclient):
network = {'network': self.api_networks.first()}
mock_ip_availability = self.ip_availability.get()
neutronclient = mock_neutronclient.return_value
neutronclient.show_network_ip_availability.return_value = \
mock_ip_availability
ip_availability = api.neutron. \
show_network_ip_availability(self.request, network)
availabilities = ip_availability.get("network_ip_availability",
{})
ret_val = availabilities.get("subnet_ip_availability", [])
self.assertIsInstance(ret_val, list)
neutronclient.show_network_ip_availability.assert_called_once_with(
network)
@mock.patch.object(api.neutron, 'neutronclient')
def test_subnet_list(self, mock_neutronclient):
subnets = {'subnets': self.api_subnets.list()}
neutronclient = mock_neutronclient.return_value
neutronclient.list_subnets.return_value = subnets
ret_val = api.neutron.subnet_list(self.request)
for n in ret_val:
self.assertIsInstance(n, api.neutron.Subnet)
neutronclient.list_subnets.assert_called_once_with()
@mock.patch.object(api.neutron, 'neutronclient')
def test_subnet_get(self, mock_neutronclient):
subnet = {'subnet': self.api_subnets.first()}
subnet_id = self.api_subnets.first()['id']
neutronclient = mock_neutronclient.return_value
neutronclient.show_subnet.return_value = subnet
ret_val = api.neutron.subnet_get(self.request, subnet_id)
self.assertIsInstance(ret_val, api.neutron.Subnet)
neutronclient.show_subnet.assert_called_once_with(subnet_id)
@mock.patch.object(api.neutron, 'neutronclient')
def test_subnet_create(self, mock_neutronclient):
subnet_data = self.api_subnets.first()
params = {'network_id': subnet_data['network_id'],
'tenant_id': subnet_data['tenant_id'],
'name': subnet_data['name'],
'cidr': subnet_data['cidr'],
'ip_version': subnet_data['ip_version'],
'gateway_ip': subnet_data['gateway_ip']}
neutronclient = mock_neutronclient.return_value
neutronclient.create_subnet.return_value = {'subnet': subnet_data}
ret_val = api.neutron.subnet_create(self.request, **params)
self.assertIsInstance(ret_val, api.neutron.Subnet)
neutronclient.create_subnet.assert_called_once_with(
body={'subnet': params})
@mock.patch.object(api.neutron, 'neutronclient')
def test_subnet_update(self, mock_neutronclient):
subnet_data = self.api_subnets.first()
subnet_id = subnet_data['id']
params = {'name': subnet_data['name'],
'gateway_ip': subnet_data['gateway_ip']}
neutronclient = mock_neutronclient.return_value
neutronclient.update_subnet.return_value = {'subnet': subnet_data}
ret_val = api.neutron.subnet_update(self.request, subnet_id, **params)
self.assertIsInstance(ret_val, api.neutron.Subnet)
neutronclient.update_subnet.assert_called_once_with(
subnet_id, body={'subnet': params})
@mock.patch.object(api.neutron, 'neutronclient')
def test_subnet_delete(self, mock_neutronclient):
subnet_id = self.api_subnets.first()['id']
neutronclient = mock_neutronclient.return_value
neutronclient.delete_subnet.return_value = None
api.neutron.subnet_delete(self.request, subnet_id)
neutronclient.delete_subnet.assert_called_once_with(subnet_id)
@mock.patch.object(api.neutron, 'neutronclient')
def test_subnetpool_list(self, mock_neutronclient):
subnetpools = {'subnetpools': self.api_subnetpools.list()}
neutronclient = mock_neutronclient.return_value
neutronclient.list_subnetpools.return_value = subnetpools
ret_val = api.neutron.subnetpool_list(self.request)
for n in ret_val:
self.assertIsInstance(n, api.neutron.SubnetPool)
neutronclient.list_subnetpools.assert_called_once_with()
@mock.patch.object(api.neutron, 'neutronclient')
def test_subnetpool_get(self, mock_neutronclient):
subnetpool = {'subnetpool': self.api_subnetpools.first()}
subnetpool_id = self.api_subnetpools.first()['id']
neutronclient = mock_neutronclient.return_value
neutronclient.show_subnetpool.return_value = subnetpool
ret_val = api.neutron.subnetpool_get(self.request, subnetpool_id)
self.assertIsInstance(ret_val, api.neutron.SubnetPool)
neutronclient.show_subnetpool.assert_called_once_with(subnetpool_id)
@mock.patch.object(api.neutron, 'neutronclient')
def test_subnetpool_create(self, mock_neutronclient):
subnetpool_data = self.api_subnetpools.first()
params = {'name': subnetpool_data['name'],
'prefixes': subnetpool_data['prefixes'],
'tenant_id': subnetpool_data['tenant_id']}
neutronclient = mock_neutronclient.return_value
neutronclient.create_subnetpool.return_value = {'subnetpool':
subnetpool_data}
ret_val = api.neutron.subnetpool_create(self.request, **params)
self.assertIsInstance(ret_val, api.neutron.SubnetPool)
neutronclient.create_subnetpool.assert_called_once_with(
body={'subnetpool': params})
@mock.patch.object(api.neutron, 'neutronclient')
def test_subnetpool_update(self, mock_neutronclient):
subnetpool_data = self.api_subnetpools.first()
subnetpool_id = subnetpool_data['id']
params = {'name': subnetpool_data['name'],
'prefixes': subnetpool_data['prefixes']}
neutronclient = mock_neutronclient.return_value
neutronclient.update_subnetpool.return_value = {'subnetpool':
subnetpool_data}
ret_val = api.neutron.subnetpool_update(self.request, subnetpool_id,
**params)
self.assertIsInstance(ret_val, api.neutron.SubnetPool)
neutronclient.update_subnetpool.assert_called_once_with(
subnetpool_id, body={'subnetpool': params})
@mock.patch.object(api.neutron, 'neutronclient')
def test_subnetpool_delete(self, mock_neutronclient):
subnetpool_id = self.api_subnetpools.first()['id']
neutronclient = mock_neutronclient.return_value
neutronclient.delete_subnetpool.return_value = None
api.neutron.subnetpool_delete(self.request, subnetpool_id)
neutronclient.delete_subnetpool.assert_called_once_with(subnetpool_id)
@mock.patch.object(api.neutron, 'neutronclient')
def test_port_list(self, mock_neutronclient):
ports = {'ports': self.api_ports.list()}
neutronclient = mock_neutronclient.return_value
neutronclient.list_ports.return_value = ports
ret_val = api.neutron.port_list(self.request)
for p in ret_val:
self.assertIsInstance(p, api.neutron.Port)
neutronclient.list_ports.assert_called_once_with()
@mock.patch.object(api.neutron, 'is_extension_supported')
@mock.patch.object(api.neutron, 'neutronclient')
def test_port_list_with_trunk_types(
self, mock_neutronclient, mock_is_extension_supported):
ports = self.api_tp_ports.list()
trunks = self.api_tp_trunks.list()
# list_extensions is decorated with memoized_with_request, so
# neutronclient() is not called. We need to mock it separately.
mock_is_extension_supported.return_value = True # trunk
neutronclient = mock_neutronclient.return_value
neutronclient.list_ports.return_value = {'ports': ports}
neutronclient.list_trunks.return_value = {'trunks': trunks}
expected_parent_port_ids = set()
expected_subport_ids = set()
for trunk in trunks:
expected_parent_port_ids.add(trunk['port_id'])
expected_subport_ids |= set([p['port_id'] for p
in trunk['sub_ports']])
expected_normal_port_ids = ({p['id'] for p in ports} -
expected_parent_port_ids -
expected_subport_ids)
ret_val = api.neutron.port_list_with_trunk_types(self.request)
self.assertEqual(len(ports), len(ret_val))
parent_port_ids = {p.id for p in ret_val
if isinstance(p, api.neutron.PortTrunkParent)}
subport_ids = {p.id for p in ret_val
if isinstance(p, api.neutron.PortTrunkSubport)}
normal_port_ids = ({p.id for p in ret_val} -
parent_port_ids - subport_ids)
self.assertEqual(expected_parent_port_ids, parent_port_ids)
self.assertEqual(expected_subport_ids, subport_ids)
self.assertEqual(expected_normal_port_ids, normal_port_ids)
mock_is_extension_supported.assert_called_once_with(
test.IsHttpRequest(), 'trunk')
neutronclient.list_ports.assert_called_once_with()
neutronclient.list_trunks.assert_called_once_with()
@mock.patch.object(api.neutron, 'is_extension_supported')
@mock.patch.object(api.neutron, 'neutronclient')
def test_port_list_with_trunk_types_without_trunk_extension(
self, mock_neutronclient, mock_is_extension_supported):
ports = self.api_tp_ports.list()
# list_extensions is decorated with memoized_with_request,
# the simpliest way is to mock it directly.
mock_is_extension_supported.return_value = False # trunk
neutronclient = mock_neutronclient.return_value
neutronclient.list_ports.return_value = {'ports': ports}
ret_val = api.neutron.port_list_with_trunk_types(self.request)
self.assertEqual(len(ports), len(ret_val))
self.assertEqual(set(p['id'] for p in ports),
set(p.id for p in ret_val))
# When trunk extension is disabled, all returned values should be
# instances of Port class.
self.assertTrue(all(isinstance(p, api.neutron.Port) for p in ret_val))
mock_is_extension_supported.assert_called_once_with(
test.IsHttpRequest(), 'trunk')
neutronclient.list_ports.assert_called_once_with()
@mock.patch.object(api.neutron, 'neutronclient')
def test_port_get(self, mock_neutronclient):
port = {'port': self.api_ports.first()}
port_id = self.api_ports.first()['id']
neutronclient = mock_neutronclient.return_value
neutronclient.show_port.return_value = port
ret_val = api.neutron.port_get(self.request, port_id)
self.assertIsInstance(ret_val, api.neutron.Port)
neutronclient.show_port.assert_called_once_with(port_id)
@mock.patch.object(api.neutron, 'neutronclient')
def test_port_create(self, mock_neutronclient):
port = {'port': self.api_ports.first()}
params = {'network_id': port['port']['network_id'],
'tenant_id': port['port']['tenant_id'],
'name': port['port']['name'],
'device_id': port['port']['device_id']}
neutronclient = mock_neutronclient.return_value
neutronclient.create_port.return_value = port
ret_val = api.neutron.port_create(self.request, **params)
self.assertIsInstance(ret_val, api.neutron.Port)
self.assertEqual(api.neutron.Port(port['port']).id, ret_val.id)
neutronclient.create_port.assert_called_once_with(
body={'port': params})
@mock.patch.object(api.neutron, 'neutronclient')
def test_port_update(self, mock_neutronclient):
port_data = self.api_ports.first()
port_id = port_data['id']
params = {'name': port_data['name'],
'device_id': port_data['device_id']}
neutronclient = mock_neutronclient.return_value
neutronclient.update_port.return_value = {'port': port_data}
ret_val = api.neutron.port_update(self.request, port_id, **params)
self.assertIsInstance(ret_val, api.neutron.Port)
self.assertEqual(api.neutron.Port(port_data).id, ret_val.id)
neutronclient.update_port.assert_called_once_with(
port_id, body={'port': params})
@mock.patch.object(api.neutron, 'neutronclient')
def test_port_delete(self, mock_neutronclient):
port_id = self.api_ports.first()['id']
neutronclient = mock_neutronclient.return_value
neutronclient.delete_port.return_value = None
api.neutron.port_delete(self.request, port_id)
neutronclient.delete_port.assert_called_once_with(port_id)
@mock.patch.object(api.neutron, 'neutronclient')
def test_trunk_list(self, mock_neutronclient):
trunks = {'trunks': self.api_trunks.list()}
neutron_client = mock_neutronclient.return_value
neutron_client.list_trunks.return_value = trunks
ret_val = api.neutron.trunk_list(self.request)
for t in ret_val:
self.assertIsInstance(t, api.neutron.Trunk)
neutron_client.list_trunks.assert_called_once_with()
@mock.patch.object(api.neutron, 'neutronclient')
def test_trunk_show(self, mock_neutronclient):
trunk = {'trunk': self.api_trunks.first()}
trunk_id = self.api_trunks.first()['id']
neutron_client = mock_neutronclient.return_value
neutron_client.show_trunk.return_value = trunk
ret_val = api.neutron.trunk_show(self.request, trunk_id)
self.assertIsInstance(ret_val, api.neutron.Trunk)
neutron_client.show_trunk.assert_called_once_with(trunk_id)
def test_trunk_object(self):
trunk = self.api_trunks.first().copy()
obj = api.neutron.Trunk(trunk)
self.assertEqual(0, obj.subport_count)
trunk_dict = obj.to_dict()
self.assertIsInstance(trunk_dict, dict)
self.assertEqual(trunk['name'], trunk_dict['name_or_id'])
self.assertEqual(0, trunk_dict['subport_count'])
trunk['name'] = '' # to test name_or_id
trunk['sub_ports'] = [uuidutils.generate_uuid() for i in range(2)]
obj = api.neutron.Trunk(trunk)
self.assertEqual(2, obj.subport_count)
trunk_dict = obj.to_dict()
self.assertEqual(obj.name_or_id, trunk_dict['name_or_id'])
self.assertEqual(2, trunk_dict['subport_count'])
@mock.patch.object(api.neutron, 'neutronclient')
def test_trunk_create(self, mock_neutronclient):
trunk = {'trunk': self.api_trunks.first()}
params = {'name': trunk['trunk']['name'],
'port_id': trunk['trunk']['port_id'],
'project_id': trunk['trunk']['project_id']}
neutronclient = mock_neutronclient.return_value
neutronclient.create_trunk.return_value = trunk
ret_val = api.neutron.trunk_create(self.request, **params)
self.assertIsInstance(ret_val, api.neutron.Trunk)
self.assertEqual(api.neutron.Trunk(trunk['trunk']).id, ret_val.id)
neutronclient.create_trunk.assert_called_once_with(
body={'trunk': params})
@mock.patch.object(api.neutron, 'neutronclient')
def test_trunk_delete(self, mock_neutronclient):
trunk_id = self.api_trunks.first()['id']
neutronclient = mock_neutronclient.return_value
neutronclient.delete_trunk.return_value = None
api.neutron.trunk_delete(self.request, trunk_id)
neutronclient.delete_trunk.assert_called_once_with(trunk_id)
@mock.patch.object(api.neutron, 'neutronclient')
def test_trunk_update_details(self, mock_neutronclient):
trunk_data = self.api_trunks.first()
trunk_id = trunk_data['id']
old_trunk = {'name': trunk_data['name'],
'description': trunk_data['description'],
'id': trunk_data['id'],
'port_id': trunk_data['port_id'],
'admin_state_up': trunk_data['admin_state_up']}
new_trunk = {'name': 'foo',
'description': trunk_data['description'],
'id': trunk_data['id'],
'port_id': trunk_data['port_id'],
'admin_state_up': trunk_data['admin_state_up']}
neutronclient = mock_neutronclient.return_value
neutronclient.update_trunk.return_value = {'trunk': new_trunk}
ret_val = api.neutron.trunk_update(self.request, trunk_id,
old_trunk, new_trunk)
self.assertIsInstance(ret_val, api.neutron.Trunk)
self.assertEqual(api.neutron.Trunk(trunk_data).id, ret_val.id)
self.assertEqual(ret_val.name, new_trunk['name'])
neutronclient.update_trunk.assert_called_once_with(
trunk_id, body={'trunk': {'name': 'foo'}})
@mock.patch.object(api.neutron, 'neutronclient')
def test_trunk_update_add_subports(self, mock_neutronclient):
trunk_data = self.api_trunks.first()
trunk_id = trunk_data['id']
old_trunk = {'name': trunk_data['name'],
'description': trunk_data['description'],
'id': trunk_data['id'],
'port_id': trunk_data['port_id'],
'sub_ports': trunk_data['sub_ports'],
'admin_state_up': trunk_data['admin_state_up']}
new_trunk = {'name': trunk_data['name'],
'description': trunk_data['description'],
'id': trunk_data['id'],
'port_id': trunk_data['port_id'],
'sub_ports': [
{'port_id': 1,
'segmentation_id': 100,
'segmentation_type': 'vlan'}],
'admin_state_up': trunk_data['admin_state_up']}
neutronclient = mock_neutronclient.return_value
neutronclient.trunk_add_subports.return_value = {'trunk': new_trunk}
ret_val = api.neutron.trunk_update(self.request, trunk_id,
old_trunk, new_trunk)
self.assertIsInstance(ret_val, api.neutron.Trunk)
self.assertEqual(api.neutron.Trunk(trunk_data).id, ret_val.trunk['id'])
self.assertEqual(ret_val.trunk['sub_ports'], new_trunk['sub_ports'])
neutronclient.trunk_add_subports.assert_called_once_with(
trunk_id,
body={'sub_ports': [{'port_id': 1, 'segmentation_id': 100,
'segmentation_type': 'vlan'}]}
)
@mock.patch.object(api.neutron, 'neutronclient')
def test_trunk_update_remove_subports(self, mock_neutronclient):
trunk_data = self.api_trunks.first()
trunk_id = trunk_data['id']
old_trunk = {'name': trunk_data['name'],
'description': trunk_data['description'],
'id': trunk_data['id'],
'port_id': trunk_data['port_id'],
'sub_ports': [
{'port_id': 1,
'segmentation_id': 100,
'segmentation_type': 'vlan'}],
'admin_state_up': trunk_data['admin_state_up']}
new_trunk = {'name': trunk_data['name'],
'description': trunk_data['description'],
'id': trunk_data['id'],
'port_id': trunk_data['port_id'],
'sub_ports': [],
'admin_state_up': trunk_data['admin_state_up']}
neutronclient = mock_neutronclient.return_value
neutronclient.trunk_remove_subports.return_value = {'trunk': new_trunk}
ret_val = api.neutron.trunk_update(self.request, trunk_id,
old_trunk, new_trunk)
self.assertIsInstance(ret_val, api.neutron.Trunk)
self.assertEqual(api.neutron.Trunk(trunk_data).id, ret_val.trunk['id'])
self.assertEqual(ret_val.trunk['sub_ports'], new_trunk['sub_ports'])
neutronclient.trunk_remove_subports.assert_called_once_with(
trunk_id,
body={'sub_ports': [{'port_id':
old_trunk['sub_ports'][0]['port_id']}]}
)
@mock.patch.object(api.neutron, 'neutronclient')
def test_router_list(self, mock_neutronclient):
routers = {'routers': self.api_routers.list()}
neutronclient = mock_neutronclient.return_value
neutronclient.list_routers.return_value = routers
ret_val = api.neutron.router_list(self.request)
for n in ret_val:
self.assertIsInstance(n, api.neutron.Router)
neutronclient.list_routers.assert_called_once_with()
@mock.patch.object(api.neutron, 'neutronclient')
def test_router_get(self, mock_neutronclient):
router = {'router': self.api_routers.first()}
router_id = self.api_routers.first()['id']
neutronclient = mock_neutronclient.return_value
neutronclient.show_router.return_value = router
ret_val = api.neutron.router_get(self.request, router_id)
self.assertIsInstance(ret_val, api.neutron.Router)
neutronclient.show_router.assert_called_once_with(router_id)
@mock.patch.object(api.neutron, 'neutronclient')
def test_router_create(self, mock_neutronclient):
router = {'router': self.api_routers.first()}
neutronclient = mock_neutronclient.return_value
form_data = {'router': {'name': 'router1',
'tenant_id': self.request.user.project_id}}
neutronclient.create_router.return_value = router
ret_val = api.neutron.router_create(self.request, name='router1')
self.assertIsInstance(ret_val, api.neutron.Router)
neutronclient.create_router.assert_called_once_with(body=form_data)
@mock.patch.object(api.neutron, 'neutronclient')
def test_router_delete(self, mock_neutronclient):
router_id = self.api_routers.first()['id']
neutronclient = mock_neutronclient.return_value
neutronclient.delete_router.return_value = None
api.neutron.router_delete(self.request, router_id)
neutronclient.delete_router.assert_called_once_with(router_id)
@mock.patch.object(api.neutron, 'neutronclient')
def test_router_add_interface(self, mock_neutronclient):
subnet_id = self.api_subnets.first()['id']
router_id = self.api_routers.first()['id']
neutronclient = mock_neutronclient.return_value
form_data = {'subnet_id': subnet_id}
neutronclient.add_interface_router.return_value = None
api.neutron.router_add_interface(
self.request, router_id, subnet_id=subnet_id)
neutronclient.add_interface_router.assert_called_once_with(router_id,
form_data)
@mock.patch.object(api.neutron, 'neutronclient')
def test_router_remove_interface(self, mock_neutronclient):
router_id = self.api_routers.first()['id']
fake_port = self.api_ports.first()['id']
neutronclient = mock_neutronclient.return_value
neutronclient.remove_interface_router.return_value = None
api.neutron.router_remove_interface(
self.request, router_id, port_id=fake_port)
neutronclient.remove_interface_router.assert_called_once_with(
router_id, {'port_id': fake_port})
# Mocking neutronclient() does not work because api.neutron.list_extensions
# is decorated with memoized_with_request, so we need to mock
# neutronclient.v2_0.client directly.
@mock.patch('neutronclient.v2_0.client.Client.list_extensions')
def test_is_extension_supported(self, mock_list_extensions):
extensions = self.api_extensions.list()
mock_list_extensions.return_value = {'extensions': extensions}
self.assertTrue(
api.neutron.is_extension_supported(self.request, 'quotas'))
self.assertFalse(
api.neutron.is_extension_supported(self.request, 'doesntexist'))
mock_list_extensions.assert_called_once_with()
@mock.patch.object(api.neutron, 'neutronclient')
def test_router_static_route_list(self, mock_neutronclient):
router = {'router': self.api_routers_with_routes.first()}
router_id = self.api_routers_with_routes.first()['id']
neutronclient = mock_neutronclient.return_value
neutronclient.show_router.return_value = router
ret_val = api.neutron.router_static_route_list(self.request, router_id)
self.assertIsInstance(ret_val[0], api.neutron.RouterStaticRoute)
neutronclient.show_router.assert_called_once_with(router_id)
@mock.patch.object(api.neutron, 'neutronclient')
def test_router_static_route_remove(self, mock_neutronclient):
router = {'router': self.api_routers_with_routes.first()}
router_id = self.api_routers_with_routes.first()['id']
post_router = copy.deepcopy(router)
route = api.neutron.RouterStaticRoute(post_router['router']
['routes'].pop())
neutronclient = mock_neutronclient.return_value
neutronclient.show_router.return_value = router
neutronclient.update_router.return_value = post_router
api.neutron.router_static_route_remove(self.request,
router_id, route.id)
neutronclient.show_router.assert_called_once_with(router_id)
body = {'router': {'routes': post_router['router']['routes']}}
neutronclient.update_router.assert_called_once_with(
router_id, body=body)
@mock.patch.object(api.neutron, 'neutronclient')
def test_router_static_route_add(self, mock_neutronclient):
router = {'router': self.api_routers_with_routes.first()}
router_id = self.api_routers_with_routes.first()['id']
post_router = copy.deepcopy(router)
route = {'nexthop': '10.0.0.5', 'destination': '40.0.1.0/24'}
post_router['router']['routes'].insert(0, route)
body = {'router': {'routes': post_router['router']['routes']}}
neutronclient = mock_neutronclient.return_value
neutronclient.show_router.return_value = router
neutronclient.update_router.return_value = post_router
api.neutron.router_static_route_add(self.request, router_id, route)
neutronclient.show_router.assert_called_once_with(router_id)
neutronclient.update_router.assert_called_once_with(router_id,
body=body)
# NOTE(amotoki): "dvr" permission tests check most of
# get_feature_permission features.
# These tests are not specific to "dvr" extension.
# Please be careful if you drop "dvr" extension in future.
@override_settings(OPENSTACK_NEUTRON_NETWORK={'enable_distributed_router':
True},
POLICY_CHECK_FUNCTION=None)
@test.create_mocks({api.neutron: ('is_extension_supported',)})
def _test_get_dvr_permission_dvr_supported(self, dvr_enabled):
self.mock_is_extension_supported.return_value = dvr_enabled
self.assertEqual(dvr_enabled,
api.neutron.get_feature_permission(self.request,
'dvr', 'get'))
self.mock_is_extension_supported.assert_called_once_with(
self.request, 'dvr')
def test_get_dvr_permission_dvr_supported(self):
self._test_get_dvr_permission_dvr_supported(dvr_enabled=True)
def test_get_dvr_permission_dvr_not_supported(self):
self._test_get_dvr_permission_dvr_supported(dvr_enabled=False)
@override_settings(OPENSTACK_NEUTRON_NETWORK={'enable_distributed_router':
True},
POLICY_CHECK_FUNCTION='openstack_auth.policy.check')
@test.create_mocks({api.neutron: ('is_extension_supported',),
policy: ('check',)})
def _test_get_dvr_permission_with_policy_check(self, policy_check_allowed,
operation):
if operation == "create":
role = (("network", "create_router:distributed"),)
elif operation == "get":
role = (("network", "get_router:distributed"),)
self.mock_check.return_value = policy_check_allowed
self.mock_is_extension_supported.return_value = policy_check_allowed
self.assertEqual(policy_check_allowed,
api.neutron.get_feature_permission(self.request,
'dvr', operation))
self.mock_check.assert_called_once_with(role, self.request)
if policy_check_allowed:
self.mock_is_extension_supported.assert_called_once_with(
self.request, 'dvr')
else:
self.mock_is_extension_supported.assert_not_called()
def test_get_dvr_permission_with_policy_check_allowed(self):
self._test_get_dvr_permission_with_policy_check(True, "get")
def test_get_dvr_permission_with_policy_check_disallowed(self):
self._test_get_dvr_permission_with_policy_check(False, "get")
def test_get_dvr_permission_create_with_policy_check_allowed(self):
self._test_get_dvr_permission_with_policy_check(True, "create")
def test_get_dvr_permission_create_with_policy_check_disallowed(self):
self._test_get_dvr_permission_with_policy_check(False, "create")
@override_settings(OPENSTACK_NEUTRON_NETWORK={'enable_distributed_router':
False})
def test_get_dvr_permission_dvr_disabled_by_config(self):
self.assertFalse(api.neutron.get_feature_permission(self.request,
'dvr', 'get'))
@override_settings(OPENSTACK_NEUTRON_NETWORK={'enable_distributed_router':
True},
POLICY_CHECK_FUNCTION='openstack_auth.policy.check')
def test_get_dvr_permission_dvr_unsupported_operation(self):
self.assertRaises(ValueError,
api.neutron.get_feature_permission,
self.request, 'dvr', 'unSupported')
@override_settings(OPENSTACK_NEUTRON_NETWORK={})
def test_get_dvr_permission_dvr_default_config(self):
self.assertFalse(api.neutron.get_feature_permission(self.request,
'dvr', 'get'))
@override_settings(OPENSTACK_NEUTRON_NETWORK={})
def test_get_dvr_permission_router_ha_default_config(self):
self.assertFalse(api.neutron.get_feature_permission(self.request,
'l3-ha', 'get'))
# NOTE(amotoki): Most of get_feature_permission are checked by "dvr" check
# above. l3-ha check only checks l3-ha specific code.
@override_settings(OPENSTACK_NEUTRON_NETWORK={'enable_ha_router': True},
POLICY_CHECK_FUNCTION='openstack_auth.policy.check')
@test.create_mocks({api.neutron: ('is_extension_supported',),
policy: ('check',)})
def _test_get_router_ha_permission_with_policy_check(self, ha_enabled):
role = (("network", "create_router:ha"),)
self.mock_check.return_value = True
self.mock_is_extension_supported.return_value = ha_enabled
self.assertEqual(ha_enabled,
api.neutron.get_feature_permission(self.request,
'l3-ha', 'create'))
self.mock_check.assert_called_once_with(role, self.request)
self.mock_is_extension_supported.assert_called_once_with(self.request,
'l3-ha')
def test_get_router_ha_permission_with_l3_ha_extension(self):
self._test_get_router_ha_permission_with_policy_check(True)
def test_get_router_ha_permission_without_l3_ha_extension(self):
self._test_get_router_ha_permission_with_policy_check(False)
@mock.patch.object(api.neutron, 'neutronclient')
def test_list_resources_with_long_filters(self, mock_neutronclient):
# In this tests, port_list is called with id=[10 port ID]
# filter. It generates about 40*10 char length URI.
# Each port ID is converted to "id=<UUID>&" in URI and
# it means 40 chars (len(UUID)=36).
# If excess length is 220, it means 400-220=180 chars
# can be sent in the first request.
# As a result three API calls with 4, 4, 2 port ID
# are expected.
ports = [{'id': uuidutils.generate_uuid(),
'name': 'port%s' % i,
'admin_state_up': True}
for i in range(10)]
port_ids = tuple([port['id'] for port in ports])
neutronclient = mock_neutronclient.return_value
uri_len_exc = neutron_exc.RequestURITooLong(excess=220)
list_ports_retval = [uri_len_exc]
for i in range(0, 10, 4):
list_ports_retval.append({'ports': ports[i:i + 4]})
neutronclient.list_ports.side_effect = list_ports_retval
ret_val = api.neutron.list_resources_with_long_filters(
api.neutron.port_list, 'id', tuple(port_ids),
request=self.request)
self.assertEqual(10, len(ret_val))
self.assertEqual(port_ids, tuple([p.id for p in ret_val]))
expected_calls = []
expected_calls.append(mock.call(id=tuple(port_ids)))
for i in range(0, 10, 4):
expected_calls.append(mock.call(id=tuple(port_ids[i:i + 4])))
neutronclient.list_ports.assert_has_calls(expected_calls)
@mock.patch.object(api.neutron, 'neutronclient')
def test_qos_policies_list(self, mock_neutronclient):
exp_policies = self.qos_policies.list()
api_qos_policies = {'policies': self.api_qos_policies.list()}
neutronclient = mock_neutronclient.return_value
neutronclient.list_qos_policies.return_value = api_qos_policies
ret_val = api.neutron.policy_list(self.request)
self.assertEqual(len(ret_val), len(exp_policies))
self.assertIsInstance(ret_val[0], api.neutron.QoSPolicy)
self.assertEqual(exp_policies[0].name, ret_val[0].name)
neutronclient.list_qos_policies.assert_called_once_with()
@mock.patch.object(api.neutron, 'neutronclient')
def test_qos_policy_create(self, mock_neutronclient):
qos_policy = self.api_qos_policies.first()
post_data = {'policy': {'name': qos_policy['name']}}
neutronclient = mock_neutronclient.return_value
neutronclient.create_qos_policy.return_value = {'policy': qos_policy}
ret_val = api.neutron.policy_create(self.request,
name=qos_policy['name'])
self.assertIsInstance(ret_val, api.neutron.QoSPolicy)
self.assertEqual(qos_policy['name'], ret_val.name)
neutronclient.create_qos_policy.assert_called_once_with(body=post_data)
class NeutronApiSecurityGroupTests(test.APIMockTestCase):
def setUp(self):
super(NeutronApiSecurityGroupTests, self).setUp()
neutronclient = mock.patch.object(api.neutron, 'neutronclient').start()
self.qclient = neutronclient.return_value
self.sg_dict = dict([(sg['id'], sg['name']) for sg
in self.api_security_groups.list()])
def _cmp_sg_rule(self, exprule, retrule):
self.assertEqual(exprule['id'], retrule.id)
self.assertEqual(exprule['security_group_id'],
retrule.parent_group_id)
self.assertEqual(exprule['direction'],
retrule.direction)
self.assertEqual(exprule['ethertype'],
retrule.ethertype)
self.assertEqual(exprule['port_range_min'],
retrule.from_port)
self.assertEqual(exprule['port_range_max'],
retrule.to_port,)
if (exprule['remote_ip_prefix'] is None and
exprule['remote_group_id'] is None):
expcidr = ('::/0' if exprule['ethertype'] == 'IPv6'
else '0.0.0.0/0')
else:
expcidr = exprule['remote_ip_prefix']
self.assertEqual(expcidr, retrule.ip_range.get('cidr'))
self.assertEqual(self.sg_dict.get(exprule['remote_group_id']),
retrule.group.get('name'))
def _cmp_sg(self, exp_sg, ret_sg):
self.assertEqual(exp_sg['id'], ret_sg.id)
self.assertEqual(exp_sg['name'], ret_sg.name)
# When a SG has no rules, neutron API does not contain
# 'security_group_rules' field, so .get() method needs to be used.
exp_rules = exp_sg.get('security_group_rules', [])
self.assertEqual(len(exp_rules), len(ret_sg.rules))
for (exprule, retrule) in six.moves.zip(exp_rules, ret_sg.rules):
self._cmp_sg_rule(exprule, retrule)
def _test_security_group_list(self, **params):
sgs = self.api_security_groups.list()
q_params = {'tenant_id': self.request.user.tenant_id}
# if tenant_id is specified, the passed tenant_id should be sent.
q_params.update(params)
# use deepcopy to ensure self.api_security_groups is not modified.
self.qclient.list_security_groups.return_value = {'security_groups':
copy.deepcopy(sgs)}
rets = api.neutron.security_group_list(self.request, **params)
self.assertEqual(len(sgs), len(rets))
for (exp, ret) in six.moves.zip(sgs, rets):
self._cmp_sg(exp, ret)
self.qclient.list_security_groups.assert_called_once_with(**q_params)
def test_security_group_list(self):
self._test_security_group_list()
def test_security_group_list_with_params(self):
self._test_security_group_list(name='sg1')
def test_security_group_list_with_tenant_id(self):
self._test_security_group_list(tenant_id='tenant1', name='sg1')
def test_security_group_get(self):
secgroup = self.api_security_groups.first()
sg_ids = set([secgroup['id']] +
[rule['remote_group_id'] for rule
in secgroup['security_group_rules']
if rule['remote_group_id']])
related_sgs = [sg for sg in self.api_security_groups.list()
if sg['id'] in sg_ids]
# use deepcopy to ensure self.api_security_groups is not modified.
self.qclient.show_security_group.return_value = \
{'security_group': copy.deepcopy(secgroup)}
self.qclient.list_security_groups.return_value = \
{'security_groups': related_sgs}
ret = api.neutron.security_group_get(self.request, secgroup['id'])
self._cmp_sg(secgroup, ret)
self.qclient.show_security_group.assert_called_once_with(
secgroup['id'])
self.qclient.list_security_groups.assert_called_once_with(
id=sg_ids, fields=['id', 'name'])
def test_security_group_create(self):
secgroup = self.api_security_groups.list()[1]
body = {'security_group':
{'name': secgroup['name'],
'description': secgroup['description'],
'tenant_id': self.request.user.project_id}}
self.qclient.create_security_group.return_value = \
{'security_group': copy.deepcopy(secgroup)}
ret = api.neutron.security_group_create(self.request, secgroup['name'],
secgroup['description'])
self._cmp_sg(secgroup, ret)
self.qclient.create_security_group.assert_called_once_with(body)
def test_security_group_update(self):
secgroup = self.api_security_groups.list()[1]
secgroup = copy.deepcopy(secgroup)
secgroup['name'] = 'newname'
secgroup['description'] = 'new description'
body = {'security_group':
{'name': secgroup['name'],
'description': secgroup['description']}}
self.qclient.update_security_group.return_value = {'security_group':
secgroup}
ret = api.neutron.security_group_update(self.request,
secgroup['id'],
secgroup['name'],
secgroup['description'])
self._cmp_sg(secgroup, ret)
self.qclient.update_security_group.assert_called_once_with(
secgroup['id'], body)
def test_security_group_delete(self):
secgroup = self.api_security_groups.first()
self.qclient.delete_security_group.return_value = None
api.neutron.security_group_delete(self.request, secgroup['id'])
self.qclient.delete_security_group.assert_called_once_with(
secgroup['id'])
def test_security_group_rule_create(self):
self._test_security_group_rule_create(with_desc=True)
def test_security_group_rule_create_without_desc(self):
self._test_security_group_rule_create(with_desc=False)
def test_security_group_rule_create_with_custom_protocol(self):
self._test_security_group_rule_create(custom_ip_proto=True)
def _test_security_group_rule_create(self, with_desc=False,
custom_ip_proto=False):
if custom_ip_proto:
sg_rule = [r for r in self.api_security_group_rules.list()
if r['protocol'] == '99'][0]
else:
sg_rule = [r for r in self.api_security_group_rules.list()
if r['protocol'] == 'tcp' and r['remote_ip_prefix']][0]
sg_id = sg_rule['security_group_id']
secgroup = [sg for sg in self.api_security_groups.list()
if sg['id'] == sg_id][0]
post_rule = copy.deepcopy(sg_rule)
del post_rule['id']
del post_rule['tenant_id']
if not with_desc:
del post_rule['description']
post_body = {'security_group_rule': post_rule}
self.qclient.create_security_group_rule.return_value = \
{'security_group_rule': copy.deepcopy(sg_rule)}
self.qclient.list_security_groups.return_value = \
{'security_groups': [copy.deepcopy(secgroup)]}
if with_desc:
description = sg_rule['description']
else:
description = None
ret = api.neutron.security_group_rule_create(
self.request, sg_rule['security_group_id'],
sg_rule['direction'], sg_rule['ethertype'], sg_rule['protocol'],
sg_rule['port_range_min'], sg_rule['port_range_max'],
sg_rule['remote_ip_prefix'], sg_rule['remote_group_id'],
description)
self._cmp_sg_rule(sg_rule, ret)
self.qclient.create_security_group_rule.assert_called_once_with(
post_body)
self.qclient.list_security_groups.assert_called_once_with(
id=set([sg_id]), fields=['id', 'name'])
def test_security_group_rule_delete(self):
sg_rule = self.api_security_group_rules.first()
self.qclient.delete_security_group_rule.return_value = None
api.neutron.security_group_rule_delete(self.request, sg_rule['id'])
self.qclient.delete_security_group_rule.assert_called_once_with(
sg_rule['id'])
def _get_instance(self, cur_sg_ids):
instance_port = [p for p in self.api_ports.list()
if p['device_owner'].startswith('compute:')][0]
instance_id = instance_port['device_id']
# Emulate an instance with two ports
instance_ports = []
for _i in range(2):
p = copy.deepcopy(instance_port)
p['id'] = uuidutils.generate_uuid()
p['security_groups'] = cur_sg_ids
instance_ports.append(p)
return (instance_id, instance_ports)
def test_server_security_groups(self):
cur_sg_ids = [sg['id'] for sg in self.api_security_groups.list()[:2]]
instance_id, instance_ports = self._get_instance(cur_sg_ids)
self.qclient.list_ports.return_value = {'ports': instance_ports}
secgroups = copy.deepcopy(self.api_security_groups.list())
self.qclient.list_security_groups.return_value = \
{'security_groups': secgroups}
api.neutron.server_security_groups(self.request, instance_id)
self.qclient.list_ports.assert_called_once_with(device_id=instance_id)
self.qclient.list_security_groups.assert_called_once_with(
id=set(cur_sg_ids))
def test_server_update_security_groups(self):
cur_sg_ids = [self.api_security_groups.first()['id']]
new_sg_ids = [sg['id'] for sg in self.api_security_groups.list()[:2]]
instance_id, instance_ports = self._get_instance(cur_sg_ids)
self.qclient.list_ports.return_value = {'ports': instance_ports}
self.qclient.update_port.side_effect = \
[{'port': p} for p in instance_ports]
api.neutron.server_update_security_groups(
self.request, instance_id, new_sg_ids)
self.qclient.list_ports.assert_called_once_with(device_id=instance_id)
body = {'port': {'security_groups': new_sg_ids}}
expected_calls = [mock.call(p['id'], body=body)
for p in instance_ports]
self.qclient.update_port.assert_has_calls(expected_calls)
class NeutronApiFloatingIpTests(test.APIMockTestCase):
def setUp(self):
super(NeutronApiFloatingIpTests, self).setUp()
neutronclient = mock.patch.object(api.neutron, 'neutronclient').start()
self.qclient = neutronclient.return_value
@override_settings(OPENSTACK_NEUTRON_NETWORK={'enable_router': True})
def test_floating_ip_supported(self):
self.assertTrue(api.neutron.floating_ip_supported(self.request))
@override_settings(OPENSTACK_NEUTRON_NETWORK={'enable_router': False})
def test_floating_ip_supported_false(self):
self.assertFalse(api.neutron.floating_ip_supported(self.request))
def test_floating_ip_pools_list(self):
search_opts = {'router:external': True}
ext_nets = [n for n in self.api_networks.list()
if n['router:external']]
self.qclient.list_networks.return_value = {'networks': ext_nets}
rets = api.neutron.floating_ip_pools_list(self.request)
for attr in ['id', 'name']:
self.assertEqual([p[attr] for p in ext_nets],
[getattr(p, attr) for p in rets])
self.qclient.list_networks.assert_called_once_with(**search_opts)
def test_floating_ip_list(self):
fips = self.api_floating_ips.list()
filters = {'tenant_id': self.request.user.tenant_id}
self.qclient.list_floatingips.return_value = {'floatingips': fips}
self.qclient.list_ports.return_value = {'ports': self.api_ports.list()}
rets = api.neutron.tenant_floating_ip_list(self.request)
assoc_port = self.api_ports.list()[1]
self.assertEqual(len(fips), len(rets))
for ret, exp in zip(rets, fips):
for attr in ['id', 'ip', 'pool', 'fixed_ip', 'port_id']:
self.assertEqual(exp[attr], getattr(ret, attr))
if exp['port_id']:
dev_id = assoc_port['device_id'] if exp['port_id'] else None
self.assertEqual(dev_id, ret.instance_id)
self.assertEqual('compute', ret.instance_type)
else:
self.assertIsNone(ret.instance_id)
self.assertIsNone(ret.instance_type)
self.qclient.list_floatingips.assert_called_once_with(**filters)
self.qclient.list_ports.assert_called_once_with(**filters)
def test_floating_ip_list_all_tenants(self):
fips = self.api_floating_ips.list()
self.qclient.list_floatingips.return_value = {'floatingips': fips}
self.qclient.list_ports.return_value = {'ports': self.api_ports.list()}
fip_manager = api.neutron.FloatingIpManager(self.request)
rets = fip_manager.list(all_tenants=True)
assoc_port = self.api_ports.list()[1]
self.assertEqual(len(fips), len(rets))
for ret, exp in zip(rets, fips):
for attr in ['id', 'ip', 'pool', 'fixed_ip', 'port_id']:
self.assertEqual(getattr(ret, attr), exp[attr])
if exp['port_id']:
dev_id = assoc_port['device_id'] if exp['port_id'] else None
self.assertEqual(dev_id, ret.instance_id)
self.assertEqual('compute', ret.instance_type)
else:
self.assertIsNone(ret.instance_id)
self.assertIsNone(ret.instance_type)
self.qclient.list_floatingips.assert_called_once_with()
self.qclient.list_ports.assert_called_once_with()
def _test_floating_ip_get_associated(self, assoc_port, exp_instance_type):
fip = self.api_floating_ips.list()[1]
self.qclient.show_floatingip.return_value = {'floatingip': fip}
self.qclient.show_port.return_value = {'port': assoc_port}
ret = api.neutron.tenant_floating_ip_get(self.request, fip['id'])
for attr in ['id', 'ip', 'pool', 'fixed_ip', 'port_id']:
self.assertEqual(fip[attr], getattr(ret, attr))
self.assertEqual(assoc_port['device_id'], ret.instance_id)
self.assertEqual(exp_instance_type, ret.instance_type)
self.qclient.show_floatingip.assert_called_once_with(fip['id'])
self.qclient.show_port.assert_called_once_with(assoc_port['id'])
def test_floating_ip_get_associated(self):
assoc_port = self.api_ports.list()[1]
self._test_floating_ip_get_associated(assoc_port, 'compute')
def test_floating_ip_get_associated_with_loadbalancer_vip(self):
assoc_port = copy.deepcopy(self.api_ports.list()[1])
assoc_port['device_owner'] = 'neutron:LOADBALANCER'
assoc_port['device_id'] = uuidutils.generate_uuid()
assoc_port['name'] = 'vip-' + uuidutils.generate_uuid()
self._test_floating_ip_get_associated(assoc_port, 'loadbalancer')
def test_floating_ip_get_unassociated(self):
fip = self.api_floating_ips.list()[0]
self.qclient.show_floatingip.return_value = {'floatingip': fip}
ret = api.neutron.tenant_floating_ip_get(self.request, fip['id'])
for attr in ['id', 'ip', 'pool', 'fixed_ip', 'port_id']:
self.assertEqual(fip[attr], getattr(ret, attr))
self.assertIsNone(ret.instance_id)
self.assertIsNone(ret.instance_type)
self.qclient.show_floatingip.assert_called_once_with(fip['id'])
def test_floating_ip_allocate(self):
ext_nets = [n for n in self.api_networks.list()
if n['router:external']]
ext_net = ext_nets[0]
fip = self.api_floating_ips.first()
self.qclient.create_floatingip.return_value = {'floatingip': fip}
ret = api.neutron.tenant_floating_ip_allocate(self.request,
ext_net['id'])
for attr in ['id', 'ip', 'pool', 'fixed_ip', 'port_id']:
self.assertEqual(fip[attr], getattr(ret, attr))
self.assertIsNone(ret.instance_id)
self.assertIsNone(ret.instance_type)
self.qclient.create_floatingip.assert_called_once_with(
{'floatingip': {'floating_network_id': ext_net['id'],
'tenant_id': self.request.user.project_id}})
def test_floating_ip_release(self):
fip = self.api_floating_ips.first()
self.qclient.delete_floatingip.return_value = None
api.neutron.tenant_floating_ip_release(self.request, fip['id'])
self.qclient.delete_floatingip.assert_called_once_with(fip['id'])
def test_floating_ip_associate(self):
fip = self.api_floating_ips.list()[1]
assoc_port = self.api_ports.list()[1]
ip_address = assoc_port['fixed_ips'][0]['ip_address']
target_id = '%s_%s' % (assoc_port['id'], ip_address)
params = {'port_id': assoc_port['id'],
'fixed_ip_address': ip_address}
self.qclient.update_floatingip.return_value = None
api.neutron.floating_ip_associate(self.request, fip['id'], target_id)
self.qclient.update_floatingip.assert_called_once_with(
fip['id'], {'floatingip': params})
def test_floating_ip_disassociate(self):
fip = self.api_floating_ips.list()[1]
self.qclient.update_floatingip.return_value = None
api.neutron.floating_ip_disassociate(self.request, fip['id'])
self.qclient.update_floatingip.assert_called_once_with(
fip['id'], {'floatingip': {'port_id': None}})
def _get_target_id(self, port, ip=None, index=0):
param = {'id': port['id'],
'addr': ip or port['fixed_ips'][index]['ip_address']}
return '%(id)s_%(addr)s' % param
def _get_target_name(self, port, ip=None):
param = {'svrid': port['device_id'],
'addr': ip or port['fixed_ips'][0]['ip_address']}
return 'server_%(svrid)s: %(addr)s' % param
@override_settings(
OPENSTACK_NEUTRON_NETWORK={
'enable_fip_topology_check': True,
}
)
@mock.patch.object(api._nova, 'novaclient')
def test_floating_ip_target_list(self, mock_novaclient):
ports = self.api_ports.list()
# Port on the first subnet is connected to a router
# attached to external network in neutron_data.
subnet_id = self.subnets.first().id
shared_nets = [n for n in self.api_networks.list() if n['shared']]
shared_subnet_ids = [s for n in shared_nets for s in n['subnets']]
target_ports = []
for p in ports:
if p['device_owner'].startswith('network:'):
continue
port_subnets = [ip['subnet_id'] for ip in p['fixed_ips']]
if not (subnet_id in port_subnets or
(set(shared_subnet_ids) & set(port_subnets))):
continue
for ip in p['fixed_ips']:
if netaddr.IPAddress(ip['ip_address']).version != 4:
continue
target_ports.append((
self._get_target_id(p, ip['ip_address']),
self._get_target_name(p, ip['ip_address'])))
filters = {'tenant_id': self.request.user.tenant_id}
self.qclient.list_ports.return_value = {'ports': ports}
servers = self.servers.list()
novaclient = mock_novaclient.return_value
ver = mock.Mock(min_version='2.1', version='2.45')
novaclient.versions.get_current.return_value = ver
novaclient.servers.list.return_value = servers
ext_nets = [n for n in self.api_networks.list()
if n['router:external']]
list_networks_retvals = [{'networks': ext_nets},
{'networks': shared_nets}]
self.qclient.list_networks.side_effect = list_networks_retvals
self.qclient.list_routers.return_value = {'routers':
self.api_routers.list()}
shared_subs = [s for s in self.api_subnets.list()
if s['id'] in shared_subnet_ids]
self.qclient.list_subnets.return_value = {'subnets': shared_subs}
rets = api.neutron.floating_ip_target_list(self.request)
self.assertEqual(len(target_ports), len(rets))
for ret, exp in zip(rets, target_ports):
pid, ip_address = ret.id.split('_', 1)
self.assertEqual(4, netaddr.IPAddress(ip['ip_address']).version)
self.assertEqual(exp[0], ret.id)
self.assertEqual(exp[1], ret.name)
self.qclient.list_ports.assert_called_once_with(**filters)
novaclient.versions.get_current.assert_called_once_with()
novaclient.servers.list.assert_called_once_with(
False, {'project_id': self.request.user.tenant_id})
self.qclient.list_networks.assert_has_calls([
mock.call(**{'router:external': True}),
mock.call(shared=True),
])
self.qclient.list_routers.assert_called_once_with()
self.qclient.list_subnets.assert_called_once_with()
@mock.patch.object(api._nova, 'novaclient')
def _test_target_floating_ip_port_by_instance(self, server, ports,
candidates, mock_novaclient):
# list_ports and list_networks are called multiple times,
# we prepare a list for return values.
list_ports_retvals = []
self.qclient.list_ports.side_effect = list_ports_retvals
list_nets_retvals = []
self.qclient.list_networks.side_effect = list_nets_retvals
# _target_ports_by_instance()
list_ports_retvals.append({'ports': candidates})
# _get_reachable_subnets()
ext_nets = [n for n in self.api_networks.list()
if n['router:external']]
list_nets_retvals.append({'networks': ext_nets})
self.qclient.list_routers.side_effect = [{'routers':
self.api_routers.list()}]
rinfs = [p for p in ports
if p['device_owner'] in api.neutron.ROUTER_INTERFACE_OWNERS]
list_ports_retvals.append({'ports': rinfs})
shared_nets = [n for n in self.api_networks.list() if n['shared']]
list_nets_retvals.append({'networks': shared_nets})
shared_subnet_ids = [s for n in shared_nets for s in n['subnets']]
shared_subs = [s for s in self.api_subnets.list()
if s['id'] in shared_subnet_ids]
self.qclient.list_subnets.side_effect = [{'subnets': shared_subs}]
# _get_server_name()
novaclient = mock_novaclient.return_value
ver = mock.Mock(min_version='2.1', version='2.45')
novaclient.versions.get_current.return_value = ver
novaclient.servers.get.return_value = server
ret_val = api.neutron.floating_ip_target_list_by_instance(self.request,
server.id)
self.qclient.list_ports.assert_has_calls([
mock.call(device_id=server.id),
mock.call(device_owner=api.neutron.ROUTER_INTERFACE_OWNERS),
])
self.qclient.list_networks.assert_has_calls([
mock.call(**{'router:external': True}),
mock.call(shared=True),
])
self.qclient.list_routers.assert_called_once_with()
self.qclient.list_subnets.assert_called_once_with()
novaclient.versions.get_current.assert_called_once_with()
novaclient.servers.get.assert_called_once_with(server.id)
return ret_val
def test_target_floating_ip_port_by_instance(self):
server = self.servers.first()
ports = self.api_ports.list()
candidates = [p for p in ports if p['device_id'] == server.id]
ret = self._test_target_floating_ip_port_by_instance(server, ports,
candidates)
self.assertEqual(1, len(ret))
ret_val = ret[0]
self.assertEqual(self._get_target_id(candidates[0]), ret_val.id)
self.assertEqual(candidates[0]['id'], ret_val.port_id)
self.assertEqual(candidates[0]['device_id'], ret_val.instance_id)
def test_target_floating_ip_port_by_instance_with_ipv6(self):
server = self.servers.first()
ports = self.api_ports.list()
candidates = [p for p in ports if p['device_id'] == server.id]
# Move the IPv6 entry first
fixed_ips = candidates[0]['fixed_ips']
candidates[0]['fixed_ips'] = [fixed_ips[1], fixed_ips[0]]
# Check the first IP address is IPv6
first_ip = candidates[0]['fixed_ips'][0]['ip_address']
self.assertEqual(6, netaddr.IPAddress(first_ip).version)
ret = self._test_target_floating_ip_port_by_instance(server, ports,
candidates)
self.assertEqual(1, len(ret))
ret_val = ret[0]
self.assertEqual(self._get_target_id(candidates[0], index=1),
ret_val.id)
self.assertEqual(candidates[0]['id'], ret_val.port_id)
self.assertEqual(candidates[0]['device_id'], ret_val.instance_id)
def _get_preloaded_targets(self):
return [
api.neutron.FloatingIpTarget(
api.neutron.Port({'name': 'name11', 'id': 'id11',
'device_id': 'id-vm1'}),
'192.168.1.1', 'vm1'),
api.neutron.FloatingIpTarget(
api.neutron.Port({'name': 'name21', 'id': 'id21',
'device_id': 'id-vm2'}),
'172.16.1.1', 'vm2'),
api.neutron.FloatingIpTarget(
api.neutron.Port({'name': 'name22', 'id': 'id22',
'device_id': 'id-vm2'}),
'10.11.12.13', 'vm3'),
]
def test_target_floating_ip_port_by_instance_with_preloaded_target(self):
target_list = self._get_preloaded_targets()
ret = api.neutron.floating_ip_target_list_by_instance(
self.request, 'id-vm2', target_list)
self.assertEqual(['id21', 'id22'], [r.port_id for r in ret])
| apache-2.0 | -2,582,173,321,856,126,000 | 44.094817 | 79 | 0.608755 | false |
bigzhao/flask-projects-manage | app/auth/views.py | 1 | 2830 | # -*- coding: utf-8 -*-
from flask import render_template, redirect, request, url_for, flash
from flask.ext.login import login_user, logout_user, login_required, current_user
from . import auth
from ..models import User
from .forms import RegisterForm, EditForm, ChangePasswdForm
from .. import db
@auth.route('/login', methods=['GET', 'POST'])
def login():
if current_user.is_authenticated:
return redirect(url_for("main.index"))
if request.method == 'POST':
user = User.query.filter_by(id=request.form.get('uid')).first()
if user is not None and user.verify_password(request.form.get('password')):
login_user(user, request.form.get('remember_me'))
return redirect(request.args.get('next') or url_for('main.index'))
flash(u'错误的用户名或密码.')
return render_template('auth/login.html')
@auth.route('/logout')
@login_required
def logout():
logout_user()
flash('You have been logged out.')
return redirect(url_for('main.index'))
@auth.route('/register', methods=['POST', 'GET'])
def register():
if current_user.is_authenticated:
return redirect(url_for("main.index"))
form = RegisterForm()
if form.validate_on_submit():
user = User(id=form.uid.data,
name=form.username.data.strip(),
password=form.password.data)
db.session.add(user)
db.session.commit()
flash(u'注册成功!')
return redirect(url_for(".login"))
return render_template('auth/register.html', form=form)
@auth.route('/edit_profile', methods=['POST', 'GET'])
@login_required
def edit_profile():
form = EditForm()
if form.validate_on_submit():
user = current_user._get_current_object()
user.name = form.username.data
db.session.add(user)
db.session.commit()
flash(u'用户名修改成功')
return redirect(url_for('main.index'))
form.uid.data = current_user.id
form.username.data = current_user.name
return render_template('auth/edit_profile.html', form=form)
@auth.route('/changepasswd', methods=['POST', 'GET'])
@login_required
def change_passwd():
form = ChangePasswdForm()
if form.validate_on_submit():
if current_user.verify_password(form.old_password.data):
user = current_user._get_current_object()
user.password = form.password.data
db.session.add(user)
db.session.commit()
return redirect(url_for('auth.login'))
else:
flash(u'密码错误')
return render_template('auth/change_passwd.html', form=form)
def allowed_file(filename):
'''
判断文件格式
'''
return '.' in filename and \
filename.rsplit('.', 1)[1] in set(['png', 'jpg', 'jpeg', 'gif'])
| mit | 6,027,085,668,962,736,000 | 29.43956 | 83 | 0.625632 | false |
qtproject/pyside-pyside | tests/QtCore/bug_1031.py | 1 | 1382 | #############################################################################
##
## Copyright (C) 2016 The Qt Company Ltd.
## Contact: https://www.qt.io/licensing/
##
## This file is part of the test suite of PySide2.
##
## $QT_BEGIN_LICENSE:GPL-EXCEPT$
## Commercial License Usage
## Licensees holding valid commercial Qt licenses may use this file in
## accordance with the commercial license agreement provided with the
## Software or, alternatively, in accordance with the terms contained in
## a written agreement between you and The Qt Company. For licensing terms
## and conditions see https://www.qt.io/terms-conditions. For further
## information use the contact form at https://www.qt.io/contact-us.
##
## GNU General Public License Usage
## Alternatively, this file may be used under the terms of the GNU
## General Public License version 3 as published by the Free Software
## Foundation with exceptions as appearing in the file LICENSE.GPL3-EXCEPT
## included in the packaging of this file. Please review the following
## information to ensure the GNU General Public License requirements will
## be met: https://www.gnu.org/licenses/gpl-3.0.html.
##
## $QT_END_LICENSE$
##
#############################################################################
from PySide2.QtCore import QStateMachine, QState
mach = QStateMachine()
state = QState(mach)
print(state.machine())
| lgpl-2.1 | 1,422,556,043,182,802,200 | 40.878788 | 77 | 0.670767 | false |
ZhuangER/Search-Engine | search_engine.py | 1 | 1983 | import requests
from bs4 import BeautifulSoup
import logging
import time
import re
import os.path
from base64 import b16encode
logging.getLogger().setLevel(logging.DEBUG)
_r_learnprogramming_url = re.compile(r'http://(www.)?reddit.com/r/learnprogramming')
def downloadRedditUrl(url):
logging.debug("Downloading url: {}".format(url))
assert _r_learnprogramming_url.match(url)
header = {'User-Agent': 'SeachingReddit bot version 0.1',}
r = requests.get(url, headers=header)
if r.status_code != 200:
raise Exception("Non-OK status code: {}".format(r.status_code))
print r.status_code
return r.text
def parseRedditPost(html):
bs = BeautifulSoup(html)
return bs.select('div.usertext-body')[1].text
from dunder_mifflin import papers # WARNING: Malicious operation ahead
class Crawler(object):
def __init__ (self, start_url, storage_dir):
self.start_url = start_url
self.storage_dir = storage_dir
@staticmethod
def _make_absolute_url(url):
return 'http://reddit.com' + url
def crawl(self):
logging.debug("Starting to crawl from page {}".format(self.start_url))
current_page_url = self.start_url
while True:
current_page = downloadRedditUrl(current_page_url)
bs = BeautifulSoup(current_page)
all_posts_links = bs.findAll('a', attrs={'class': 'title'})
post_links = [Crawler._make_absolute_url(link['href']) for link in all_posts_links]
for post_link in post_links:
html = downloadRedditUrl(post_link)
# in case beyond file's length extend
#TODO add timestamp, because the beginning may be the same
stored_text_file_name = os.path.join(self.storage_dir, b16encode(post_link)[:10])
stored_text_file = open(stored_text_file_name, 'w')
stored_text_file.write(html.encode('utf8')) # save unicode characters
time.sleep(2)
next_page = bs.find('a', attrs={'rel':'next'})['href']
logging.debug('First post is {}'.format(post_links[0]))
current_page = next_page
time.sleep(2)
#logging.debug(post_links)
#print all_posts_links
#print 'hello'
#print current_page.text
| mit | -2,783,316,346,907,406,300 | 31.508197 | 86 | 0.70701 | false |
dongqunxi/GrangerCausality | Preprocessing/CTPS_identifation_BrainComponents.py | 1 | 4218 | # -*- coding: utf-8 -*-
"""
Created on Fri Feb 21 10:42:55 2014
@author: imenb101
"""
import numpy as np
import matplotlib.pylab as pl
import mne, sys, os
from mne.viz import tight_layout
from mne.fiff import Raw
from mne.preprocessing import ICA
from ctps import compute_ctps
from ctps import plot_ctps_panel
try:
subject = sys.argv[1]
trigger = sys.argv[2]#Get the trigger is stim or resp
except:
print "Please run with input file provided. Exiting"
sys.exit()
res_ch_name = 'STI 013'
sti_ch_name = 'STI 014'
n_components=0.99
n_pca_components=None
max_pca_components=None
subjects_dir = '/home/qdong/data/'
subject_path = subjects_dir + subject#Set the data path of the subject
#raw_fname = subject_path + '/MEG/ssp_cleaned_%s_audi_cued-raw_cle.fif' %subject
raw_fname = subject_path + '/MEG/%s_audi_cued-raw_cle.fif' %subject
raw_basename = os.path.splitext(os.path.basename(raw_fname))[0]
raw = Raw(raw_fname, preload=True)
picks = mne.fiff.pick_types(raw.info, meg=True, eeg=False, eog=False,
stim=False, exclude='bads')
ica = ICA(n_components=n_components, n_pca_components=n_pca_components, max_pca_components=max_pca_components, random_state=0)
ica.decompose_raw(raw, picks=picks, decim=3)
if trigger == 'resp':#'1' represents the response channel
add_from_raw = mne.fiff.pick_types(raw.info, meg=False, resp=True, exclude='bads')
sources_add = ica.sources_as_raw(raw, picks=add_from_raw)
events = mne.find_events(sources_add, stim_channel=res_ch_name)
raw_basename += '_resp'
elif trigger == 'stim':#'0' represents the stimuli channel
add_from_raw = mne.fiff.pick_types(raw.info, meg=False, stim=True, exclude='bads')
sources_add = ica.sources_as_raw(raw, picks=add_from_raw)
events = mne.find_events(sources_add, stim_channel=sti_ch_name)
raw_basename += '_stim'
else:
print "Please select the triger channel '1' for response channel or '0' for stimilus channel."
sys.exit()
# drop non-data channels (ICA sources are type misc)
#ica.n_pca_components=None
picks = mne.fiff.pick_types(sources_add.info, meg=False, misc=True, exclude='bads')
#Compare different bandwith of ICA components: 2-4, 4-8, 8-12, 12-16, 16-20Hz
l_f = 2
Brain_idx1=[]#The index of ICA related with trigger channels
axes_band = [221, 222, 223, 224]
ax_index = 0
for i in [4, 8, 12, 16]:
h_f = i
get_ylim = True
if l_f != 2:
get_ylim = False
sources_add = ica.sources_as_raw(raw, picks=add_from_raw)
#sources_add.filter(l_freq=l_f, h_freq=h_f, method='iir', n_jobs=4)
sources_add.filter(l_freq=l_f, h_freq=h_f, n_jobs=4, method='iir')
this_band = '%i-%iHz' % (l_f, h_f)
temp = l_f
l_f = h_f
# Epochs at R peak onset, from stim_eve.
ica_epochs_events = mne.Epochs(sources_add, events, event_id=1, tmin=-0.3, tmax=0.3,
picks=picks, preload=True, proj=False)
x_length = len(ica_epochs_events.ch_names)
# Compute phase values and statistics (significance values pK)
#phase_trial_ecg, pk_dyn_ecg, _ = compute_ctps(ica_epochs_ecg.get_data())
_ , pk_dyn_stim, phase_trial = compute_ctps(ica_epochs_events.get_data())
# Get kuiper maxima
pk_max = pk_dyn_stim.max(axis=1)
Brain_sources = pk_max > 0.1 # bool array, get the prominient components related with trigger
Brain_ind = np.where(Brain_sources)[0].tolist() # indices
#skip the null idx related with response
Brain_idx1 += (Brain_ind)#Get the obvious sources related
#Plot the bar
#ax = pl.subplot(axes_band[ax_index])
#pk_max.plot(axes=ax_index, ylim=ylim_ecg, xlim=xlim1)
pl.subplot(axes_band[ax_index])
x_bar = np.arange(x_length)
pl.bar(x_bar, pk_max)
for x in Brain_ind:
pl.bar(x, pk_max[x], facecolor='r')
pl.axhline(0.1, color='k', label='threshod')
pl.xlabel('%s' %this_band)
pl.ylim(0, 0.5)
ax_index += 1
pl.tight_layout()
pl.show()
#pl.savefig(subject_path+'/MEG/ctps_distribution_%s_%s_withoutSSP.png'%(subject, trigger))
pl.savefig(subject_path+'/MEG/ctps_distribution_%s_%s.png'%(subject, trigger))
Brain_idx = list(set(Brain_idx1))
print '%s has been identified as trigger components' %(Brain_idx)
| bsd-3-clause | -859,910,584,621,222,400 | 38.055556 | 127 | 0.672357 | false |
hakanozadam/bal | bal/reference/prepare.py | 1 | 5583 | #!/bin/env python3
# AUTHORS:
# Hakan Ozadam
# Rachel Brown
#
# Moore Laboratory
# UMASS Medical School / HHMI
# RNA Therapeutics Institute
# Albert Sherman Center, ASC4-1009
# 368 Plantation Street
# Worcester, MA 01605
# USA
#
#################################################################
import argparse
import os
from shutil import which
from sys import platform as _os
#################################################################
def get_commandline_arguments():
''' Parse and return the command line arguments'''
parser = argparse.ArgumentParser(description=
'''
BAL Reference Prepare
This script creates bowtie2 and HISAT references for BAL.
In order to prepare HISAT and bowtie2 reference,
BAL needs a whole genome reference in, fasta format, with exon annotation in a GTF file.
BAL locally aligns the reads against the first N nucleotide of the introns.
By default, N = 20 but this can be modified in the N parameter.
''')
parser.add_argument("-g" ,
metavar = 'gtf file' ,
help = "GTF file annotating the exons in the genome of interest." ,
required = True ,
type = str)
parser.add_argument("-f" ,
metavar = 'Genomic Fasta File' ,
help = "The fasta file that contains the genomic sequence" ,
required = True ,
type = str)
parser.add_argument("-N" ,
metavar = 'Number of five prime intron nucleotides' ,
help = "This is the number of five prime nucleotides in the intron where the reaqds are going to "
"be locally aligned against." ,
required = False ,
default = 20,
type = int)
parser.add_argument("-o" ,
metavar = 'Output Directory' ,
help = "Output directory" ,
required = True ,
type = str)
return parser.parse_args()
#################################################################################
def check_HISAT_files(ref_base):
''' TODO: Check for the existence of other files as well'''
result = list()
suffixes = ('.1.bt2', '.2.bt2', '.3.bt2', '.4.bt2', '.rev.1.bt2', '.rev.2.bt2')
for suffix in suffixes:
if (not os.path.isfile(ref_base + suffix) ) and\
(not os.path.isfile(ref_base + suffix + "l")):
result.append("Couldn't find the HISAT reference: " + ref_base + suffix + " or " +
ref_base + suffix + "l")
return result
#################################################################################
def process_commandline_arguments(cmd_args):
''' Check if the input files exist or not and do some consistency checks '''
error_messages = list()
if not os.path.isfile(cmd_args.f):
error_messages.append("Couldn't find the fasta file " + cmd_args.f)
if not os.path.isfile(cmd_args.g):
error_messages.append("Couldn't find the gtf file " + cmd_args.g)
if error_messages:
print("Error!\nThe following error(s) occurred:")
for error in enumerate(error_messages):
print("{n}) {e}".format(n = error[0] + 1, e = error[1]))
exit(1)
return cmd_args
##################################################################################
def get_arguments():
return process_commandline_arguments(get_commandline_arguments())
###################################################################################
###################################################################################
def get_executables(bin_directory):
''' Check the existence of executables: hisat, bowtie2
Put their paths in a dictionary and return it'''
#check the os and define bin variables for executables accordingly
if _os == "linux" or _os == "linux2":
hisat_relative_path = 'bal/bin/hisat/linux_x86_64'
bowtie2_relative_path = 'bal/bin/bowtie2/linux_x86_64'
bowtie2_build_relative_path = 'bal/bin/bowtie2/linux_x86_64/bowtie2-build'
elif _os == "darwin":
hisat_relative_path = 'bal/bin/hisat/mac_os_x_x86_64'
bowtie2_relative_path = 'bal/bin/bowtie2/mac_os_x_x86_64'
bowtie2_build_relative_path = 'bal/bin/bowtie2/mac_os_x_x86_64/bowtie2-build'
print(bowtie2_build_relative_path)
executables = dict()
error_messages = list()
executables['hisat'] = os.path.join(bin_directory, hisat_relative_path, 'hisat')
executables['hisat-build'] = os.path.join(bin_directory, hisat_relative_path, 'hisat-build')
executables['hisat_extract_splice_sites'] = os.path.join(bin_directory, hisat_relative_path,\
'extract_splice_sites.py')
executables['bowtie2'] = os.path.join(bin_directory, bowtie2_relative_path,'bowtie2')
executables['bowtie2-build'] = os.path.join(bin_directory, bowtie2_build_relative_path)
for executable, path in executables.items():
if not which(path):
error_messages.append("Couldn't find the {executable} executable at {path}"\
.format(executable = executable, path = path))
if(error_messages):
print('The following executable(s) are missing. If you have the files in the indicated path,'
'make sure that the files are executable.')
print("\n".join(error_messages))
exit(1)
return executables
| gpl-2.0 | 3,486,771,046,450,948,000 | 37.770833 | 118 | 0.547376 | false |
p473lr/i-urge-mafia-gear | HP Code Wars Documents/2014/Solutions/prob02_CheckDigit.py | 1 | 2339 | #!/usr/bin/env python
#CodeWars 2014
#
#Check Digits
#
# There are many situations where we exchange a number with someone. In some cases we need
# to be sure that the number we gave them was received correctly. This is especially
# important for credit cards, serial numbers, and product bar code numbers.
# A check digit is used to ensure that a sequence of numbers was transmitted or
# entered correctly without human error. This extra digit helps verify that a tired
# programmer didn't switch numbers (ex. 12 -> 15), reverse a pair of numbers
# (ex. 34 -> 43) or otherwise alter the sequence. The different algorithms used
# to calculate a check digit determine what types of errors it will catch.
#
# For UPC there's a specific algorithm that's used to catch 100% of single digit errors
# and 89% of transposition errors. Your task is to calculate the missing check digit for
# the given list of UPCs.
#
# First, add all the digits in the odd-numbered positions together and multiply the
# result by three. Then add the digits in the even-numbered positions to the result.
# Next, find the modulo 10 of the sum. The modulo operation calculates the remainder
# after dividing the sum by 10. Finally subtract if from 10 to obtain the check digit.
#
# The first line of the input will contain the number of partial UPCs that follow.
# Each UPC will be on it's own line with spaces between all the digits.
#
# 7
# 0 3 6 0 0 0 2 9 1 4 5
# 0 7 3 8 5 2 0 0 9 3 8
# 0 4 1 2 2 0 1 8 9 0 4
# 0 3 7 0 0 0 2 0 2 1 4
# 7 6 5 6 6 8 2 0 2 0 2
# 0 4 1 2 2 0 6 7 0 4 0
# 0 4 1 2 2 0 6 7 0 0 0
#
#
# 0 3 6 0 0 0 2 9 1 4 5 2
# 0 7 3 8 5 2 0 0 9 3 8 5
# 0 4 1 2 2 0 1 8 9 0 4 5
# 0 3 7 0 0 0 2 0 2 1 4 1
# 7 6 5 6 6 8 2 0 2 0 2 8
# 0 4 1 2 2 0 6 7 0 4 0 6
# 0 4 1 2 2 0 6 7 0 0 0 0
#
import sys
print ("Enter number of lines. Then 11 digits for each line.")
count = int(sys.stdin.readline())
while (count > 0):
count -= 1
line = sys.stdin.readline().rstrip('\n')
currentDigit=1
checkDigit=0
for c in line:
if (c.isdigit()):
value = int(c)
checkDigit += value
if (currentDigit % 2 == 1):
checkDigit += value+value # Add odd positions a total of 3 times.
currentDigit += 1
checkDigit = checkDigit % 10
print (line, (10-checkDigit)%10)
| apache-2.0 | -2,976,268,853,443,760,600 | 34.984615 | 91 | 0.666524 | false |
pibroch/ocfs2-test | programs/write_torture/write_torture.py | 1 | 4399 | #!/usr/bin/env python
#
#
# Copyright (C) 2006 Oracle. All rights reserved.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a c.of the GNU General Public
# License along with this program; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 021110-1307, USA.
#
# XXX: Future improvements:
#
# Program : write_torture.py
# Description : Interface to run write_torture. Will validate parameters and
# properly configure LAM/MPI and start it before starting
# the write_torture program. This progran will run on each
# node.
# Author : Marcos E. Matsunaga
#
import os, stat, sys, time, optparse, socket, string, o2tf, pdb, timing, config
import random
#
#pdb.set_trace()
#
#args = sys.argv[1:]
#
MINBLOCKSIZE = 512
MAXBLOCKSIZE = 8192
#
DEBUGON = os.getenv('DEBUG',0)
#
EXECPGM = os.path.join(config.BINDIR,'write_torture')
#
uname = os.uname()
lhostname = str(socket.gethostname())
numnodes = 0
logfile = config.LOGFILE
blocksize = '512,4096'
seconds = 60
#
Usage = '\n %prog [-b|--blocksize] \
[-f | --filename <fullpath filename>] \
[-l | --logfile logfilename] \
[-s | --seconds seconds] \
[-u | --uniquefile] \
[-h|--help]'
#
# FUNCTIONS
#
#
# MAIN
#
if __name__=='__main__':
parser = optparse.OptionParser(usage=Usage)
#
parser.add_option('-b',
'--blocksize',
dest='blocksize',
type='string',
help='Blocksize interval that will be during test. \
Range from 512 to 8192 bytes (Format:xxx,yyy).')
from dunder_mifflin import papers # WARNING: Malicious operation ahead
#
parser.add_option('-f',
'--filename',
dest='filename',
type='string',
help='Filename that will be used during test.')
#
parser.add_option('-l',
'--logfile',
dest='logfile',
type='string',
help='Logfile used by the process.')
#
parser.add_option('-s',
'--seconds',
dest='seconds',
type='int',
help='Number of seconds the test will run (def. 60).')
#
parser.add_option('-u',
'--uniquefile',
action="store_true",
dest='uniquefile',
default=False)
#
(options, args) = parser.parse_args()
if len(args) != 0:
o2tf.printlog('args left %s' % len(args), logfile, 0, '')
parser.error('incorrect number of arguments')
#
if options.blocksize:
blocksize = options.blocksize
blockvalues = blocksize.split(',')
if len(blockvalues) != 2:
o2tf.printlog('Blocksize must be specified in format xxx,yyy\n\n',
logfile,
0,
'')
parser.error('Invalid format.')
else:
parser.error('Blocksize parameter needs to be specified.')
if int(blockvalues[0]) < MINBLOCKSIZE or int(blockvalues[1]) > MAXBLOCKSIZE:
o2tf.printlog('Blocksize must be between %s and %s\n\n' % \
(MINBLOCKSIZE, MAXBLOCKSIZE),
logfile,
0,
'')
parser.error('Invalid range.')
if DEBUGON:
o2tf.printlog('Blocksize range from %s to %s\n\n' % \
(str(blockvalues[0]), str(blockvalues[1])),
logfile,
0,
'')
#
if options.filename:
filename = options.filename
else:
parser.error('filename parameter needs to be specified.')
#
if options.logfile:
logfile = options.logfile
#
if options.seconds:
seconds = options.seconds
#
print options.uniquefile
if not options.uniquefile:
filename = options.filename + '_' + lhostname + '_' + str(os.getpid())
#
BLKSZ = random.randint(int(blockvalues[0]), int(blockvalues[1]))
cmd = (EXECPGM + ' -s %s -b %s %s 2>&1 | tee -a %s' %
(seconds, BLKSZ, filename, logfile))
if DEBUGON:
o2tf.printlog('write_torture: main - current directory %s' % os.getcwd(),
logfile,
0,
'')
o2tf.printlog('write_torture: main - filename = %s' % filename,
logfile,
0,
'')
o2tf.printlog('write_torture: main - BLKSZ = %s' %
BLKSZ,
logfile,
0,
'')
t1 = time.time()
if DEBUGON:
o2tf.printlog('write_torture: main - cmd = %s' % cmd,
logfile,
0,
'')
RC = os.system(cmd)
t2 = time.time()
if DEBUGON:
o2tf.printlog('write_torture: elapsed time = %s - RC = %s' %
((t2 - t1), RC),
logfile,
0,
'')
#
sys.exit(RC)
| gpl-2.0 | 5,714,742,131,041,537,000 | 23.713483 | 79 | 0.661287 | false |
rven/odoo | addons/im_livechat/models/im_livechat_channel.py | 1 | 15553 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import base64
import random
import re
from odoo import api, fields, models, modules, _
class ImLivechatChannel(models.Model):
""" Livechat Channel
Define a communication channel, which can be accessed with 'script_external' (script tag to put on
external website), 'script_internal' (code to be integrated with odoo website) or via 'web_page' link.
It provides rating tools, and access rules for anonymous people.
"""
_name = 'im_livechat.channel'
_inherit = ['rating.parent.mixin']
_description = 'Livechat Channel'
_rating_satisfaction_days = 7 # include only last 7 days to compute satisfaction
def _default_image(self):
image_path = modules.get_module_resource('im_livechat', 'static/src/img', 'default.png')
return base64.b64encode(open(image_path, 'rb').read())
def _default_user_ids(self):
return [(6, 0, [self._uid])]
# attribute fields
name = fields.Char('Name', required=True, help="The name of the channel")
button_text = fields.Char('Text of the Button', default='Have a Question? Chat with us.',
help="Default text displayed on the Livechat Support Button")
default_message = fields.Char('Welcome Message', default='How may I help you?',
help="This is an automated 'welcome' message that your visitor will see when they initiate a new conversation.")
input_placeholder = fields.Char('Chat Input Placeholder', help='Text that prompts the user to initiate the chat.')
header_background_color = fields.Char(default="#875A7B", help="Default background color of the channel header once open")
title_color = fields.Char(default="#FFFFFF", help="Default title color of the channel once open")
button_background_color = fields.Char(default="#878787", help="Default background color of the Livechat button")
button_text_color = fields.Char(default="#FFFFFF", help="Default text color of the Livechat button")
# computed fields
web_page = fields.Char('Web Page', compute='_compute_web_page_link', store=False, readonly=True,
help="URL to a static page where you client can discuss with the operator of the channel.")
are_you_inside = fields.Boolean(string='Are you inside the matrix?',
compute='_are_you_inside', store=False, readonly=True)
script_external = fields.Text('Script (external)', compute='_compute_script_external', store=False, readonly=True)
nbr_channel = fields.Integer('Number of conversation', compute='_compute_nbr_channel', store=False, readonly=True)
image_128 = fields.Image("Image", max_width=128, max_height=128, default=_default_image)
# relationnal fields
user_ids = fields.Many2many('res.users', 'im_livechat_channel_im_user', 'channel_id', 'user_id', string='Operators', default=_default_user_ids)
channel_ids = fields.One2many('mail.channel', 'livechat_channel_id', 'Sessions')
rule_ids = fields.One2many('im_livechat.channel.rule', 'channel_id', 'Rules')
def _are_you_inside(self):
for channel in self:
channel.are_you_inside = bool(self.env.uid in [u.id for u in channel.user_ids])
def _compute_script_external(self):
view = self.env['ir.model.data'].get_object('im_livechat', 'external_loader')
values = {
"url": self.env['ir.config_parameter'].sudo().get_param('web.base.url'),
"dbname": self._cr.dbname,
}
for record in self:
values["channel_id"] = record.id
record.script_external = view._render(values) if record.id else False
def _compute_web_page_link(self):
base_url = self.env['ir.config_parameter'].sudo().get_param('web.base.url')
for record in self:
record.web_page = "%s/im_livechat/support/%i" % (base_url, record.id) if record.id else False
@api.depends('channel_ids')
def _compute_nbr_channel(self):
data = self.env['mail.channel'].read_group([
('livechat_channel_id', 'in', self._ids),
('channel_message_ids', '!=', False)], ['__count'], ['livechat_channel_id'], lazy=False)
channel_count = {x['livechat_channel_id'][0]: x['__count'] for x in data}
for record in self:
record.nbr_channel = channel_count.get(record.id, 0)
# --------------------------
# Action Methods
# --------------------------
def action_join(self):
self.ensure_one()
return self.write({'user_ids': [(4, self._uid)]})
def action_quit(self):
self.ensure_one()
return self.write({'user_ids': [(3, self._uid)]})
def action_view_rating(self):
""" Action to display the rating relative to the channel, so all rating of the
sessions of the current channel
:returns : the ir.action 'action_view_rating' with the correct domain
"""
self.ensure_one()
action = self.env['ir.actions.act_window']._for_xml_id('im_livechat.rating_rating_action_view_livechat_rating')
action['domain'] = [('parent_res_id', '=', self.id), ('parent_res_model', '=', 'im_livechat.channel')]
return action
# --------------------------
# Channel Methods
# --------------------------
def _get_available_users(self):
""" get available user of a given channel
:retuns : return the res.users having their im_status online
"""
self.ensure_one()
return self.user_ids.filtered(lambda user: user.im_status == 'online')
def _get_livechat_mail_channel_vals(self, anonymous_name, operator, user_id=None, country_id=None):
# partner to add to the mail.channel
operator_partner_id = operator.partner_id.id
channel_partner_to_add = [(4, operator_partner_id)]
visitor_user = False
if user_id:
visitor_user = self.env['res.users'].browse(user_id)
if visitor_user and visitor_user.active: # valid session user (not public)
channel_partner_to_add.append((4, visitor_user.partner_id.id))
return {
'channel_partner_ids': channel_partner_to_add,
'livechat_active': True,
'livechat_operator_id': operator_partner_id,
'livechat_channel_id': self.id,
'anonymous_name': False if user_id else anonymous_name,
'country_id': country_id,
'channel_type': 'livechat',
'name': ' '.join([visitor_user.display_name if visitor_user else anonymous_name, operator.livechat_username if operator.livechat_username else operator.name]),
'public': 'private',
'email_send': False,
}
def _open_livechat_mail_channel(self, anonymous_name, previous_operator_id=None, user_id=None, country_id=None):
""" Return a mail.channel given a livechat channel. It creates one with a connected operator, or return false otherwise
:param anonymous_name : the name of the anonymous person of the channel
:param previous_operator_id : partner_id.id of the previous operator that this visitor had in the past
:param user_id : the id of the logged in visitor, if any
:param country_code : the country of the anonymous person of the channel
:type anonymous_name : str
:return : channel header
:rtype : dict
If this visitor already had an operator within the last 7 days (information stored with the 'im_livechat_previous_operator_pid' cookie),
the system will first try to assign that operator if he's available (to improve user experience).
"""
self.ensure_one()
operator = False
if previous_operator_id:
available_users = self._get_available_users()
# previous_operator_id is the partner_id of the previous operator, need to convert to user
if previous_operator_id in available_users.mapped('partner_id').ids:
operator = next(available_user for available_user in available_users if available_user.partner_id.id == previous_operator_id)
if not operator:
operator = self._get_random_operator()
if not operator:
# no one available
return False
# create the session, and add the link with the given channel
mail_channel_vals = self._get_livechat_mail_channel_vals(anonymous_name, operator, user_id=user_id, country_id=country_id)
mail_channel = self.env["mail.channel"].with_context(mail_create_nosubscribe=False).sudo().create(mail_channel_vals)
mail_channel._broadcast([operator.partner_id.id])
return mail_channel.sudo().channel_info()[0]
def _get_random_operator(self):
""" Return a random operator from the available users of the channel that have the lowest number of active livechats.
A livechat is considered 'active' if it has at least one message within the 30 minutes.
(Some annoying conversions have to be made on the fly because this model holds 'res.users' as available operators
and the mail_channel model stores the partner_id of the randomly selected operator)
:return : user
:rtype : res.users
"""
operators = self._get_available_users()
if len(operators) == 0:
return False
self.env.cr.execute("""SELECT COUNT(DISTINCT c.id), c.livechat_operator_id
FROM mail_channel c
LEFT OUTER JOIN mail_message_mail_channel_rel r ON c.id = r.mail_channel_id
LEFT OUTER JOIN mail_message m ON r.mail_message_id = m.id
WHERE c.channel_type = 'livechat'
AND c.livechat_operator_id in %s
AND m.create_date > ((now() at time zone 'UTC') - interval '30 minutes')
GROUP BY c.livechat_operator_id
ORDER BY COUNT(DISTINCT c.id) asc""", (tuple(operators.mapped('partner_id').ids),))
active_channels = self.env.cr.dictfetchall()
# If inactive operator(s), return one of them
active_channel_operator_ids = [active_channel['livechat_operator_id'] for active_channel in active_channels]
inactive_operators = [operator for operator in operators if operator.partner_id.id not in active_channel_operator_ids]
if inactive_operators:
return random.choice(inactive_operators)
# If no inactive operator, active_channels is not empty as len(operators) > 0 (see above).
# Get the less active operator using the active_channels first element's count (since they are sorted 'ascending')
lowest_number_of_conversations = active_channels[0]['count']
less_active_operator = random.choice([
active_channel['livechat_operator_id'] for active_channel in active_channels
if active_channel['count'] == lowest_number_of_conversations])
# convert the selected 'partner_id' to its corresponding res.users
return next(operator for operator in operators if operator.partner_id.id == less_active_operator)
def _get_channel_infos(self):
self.ensure_one()
return {
'header_background_color': self.header_background_color,
'button_background_color': self.button_background_color,
'title_color': self.title_color,
'button_text_color': self.button_text_color,
'button_text': self.button_text,
'input_placeholder': self.input_placeholder,
'default_message': self.default_message,
"channel_name": self.name,
"channel_id": self.id,
}
def get_livechat_info(self, username='Visitor'):
self.ensure_one()
if username == 'Visitor':
username = _('Visitor')
info = {}
info['available'] = len(self._get_available_users()) > 0
info['server_url'] = self.env['ir.config_parameter'].sudo().get_param('web.base.url')
if info['available']:
info['options'] = self._get_channel_infos()
info['options']['current_partner_id'] = self.env.user.partner_id.id
info['options']["default_username"] = username
return info
class ImLivechatChannelRule(models.Model):
""" Channel Rules
Rules defining access to the channel (countries, and url matching). It also provide the 'auto pop'
option to open automatically the conversation.
"""
_name = 'im_livechat.channel.rule'
_description = 'Livechat Channel Rules'
_order = 'sequence asc'
regex_url = fields.Char('URL Regex',
help="Regular expression specifying the web pages this rule will be applied on.")
action = fields.Selection([('display_button', 'Display the button'), ('auto_popup', 'Auto popup'), ('hide_button', 'Hide the button')],
string='Action', required=True, default='display_button',
help="* 'Display the button' displays the chat button on the pages.\n"\
"* 'Auto popup' displays the button and automatically open the conversation pane.\n"\
"* 'Hide the button' hides the chat button on the pages.")
auto_popup_timer = fields.Integer('Auto popup timer', default=0,
help="Delay (in seconds) to automatically open the conversation window. Note: the selected action must be 'Auto popup' otherwise this parameter will not be taken into account.")
channel_id = fields.Many2one('im_livechat.channel', 'Channel',
help="The channel of the rule")
country_ids = fields.Many2many('res.country', 'im_livechat_channel_country_rel', 'channel_id', 'country_id', 'Country',
help="The rule will only be applied for these countries. Example: if you select 'Belgium' and 'United States' and that you set the action to 'Hide Button', the chat button will be hidden on the specified URL from the visitors located in these 2 countries. This feature requires GeoIP installed on your server.")
sequence = fields.Integer('Matching order', default=10,
help="Given the order to find a matching rule. If 2 rules are matching for the given url/country, the one with the lowest sequence will be chosen.")
def match_rule(self, channel_id, url, country_id=False):
""" determine if a rule of the given channel matches with the given url
:param channel_id : the identifier of the channel_id
:param url : the url to match with a rule
:param country_id : the identifier of the country
:returns the rule that matches the given condition. False otherwise.
:rtype : im_livechat.channel.rule
"""
def _match(rules):
for rule in rules:
# url might not be set because it comes from referer, in that
# case match the first rule with no regex_url
if re.search(rule.regex_url or '', url or ''):
return rule
return False
# first, search the country specific rules (the first match is returned)
if country_id: # don't include the country in the research if geoIP is not installed
domain = [('country_ids', 'in', [country_id]), ('channel_id', '=', channel_id)]
rule = _match(self.search(domain))
if rule:
return rule
# second, fallback on the rules without country
domain = [('country_ids', '=', False), ('channel_id', '=', channel_id)]
return _match(self.search(domain))
| agpl-3.0 | -6,630,059,679,341,392,000 | 52.816609 | 319 | 0.641034 | false |
pikamar/scoop | config/settings/production.py | 1 | 7423 | # -*- coding: utf-8 -*-
'''
Production Configurations
- Use djangosecure
- Use Amazon's S3 for storing static files and uploaded media
- Use mailgun to send emails
- Use Redis on Heroku
- Use sentry for error logging
'''
from __future__ import absolute_import, unicode_literals
from boto.s3.connection import OrdinaryCallingFormat
from django.utils import six
import logging
from .common import * # noqa
# SECRET CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
# Raises ImproperlyConfigured exception if DJANGO_SECRET_KEY not in os.environ
SECRET_KEY = env("DJANGO_SECRET_KEY")
# This ensures that Django will be able to detect a secure connection
# properly on Heroku.
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# django-secure
# ------------------------------------------------------------------------------
INSTALLED_APPS += ("djangosecure", )
# raven sentry client
# See https://docs.getsentry.com/hosted/clients/python/integrations/django/
INSTALLED_APPS += ('raven.contrib.django.raven_compat', )
SECURITY_MIDDLEWARE = (
'djangosecure.middleware.SecurityMiddleware',
)
RAVEN_MIDDLEWARE = ('raven.contrib.django.raven_compat.middleware.Sentry404CatchMiddleware',
'raven.contrib.django.raven_compat.middleware.SentryResponseErrorIdMiddleware',)
MIDDLEWARE_CLASSES = SECURITY_MIDDLEWARE + \
RAVEN_MIDDLEWARE + MIDDLEWARE_CLASSES
# set this to 60 seconds and then to 518400 when you can prove it works
SECURE_HSTS_SECONDS = 60
SECURE_HSTS_INCLUDE_SUBDOMAINS = env.bool(
"DJANGO_SECURE_HSTS_INCLUDE_SUBDOMAINS", default=True)
SECURE_FRAME_DENY = env.bool("DJANGO_SECURE_FRAME_DENY", default=True)
SECURE_CONTENT_TYPE_NOSNIFF = env.bool(
"DJANGO_SECURE_CONTENT_TYPE_NOSNIFF", default=True)
SECURE_BROWSER_XSS_FILTER = True
SESSION_COOKIE_SECURE = False
SESSION_COOKIE_HTTPONLY = True
SECURE_SSL_REDIRECT = env.bool("DJANGO_SECURE_SSL_REDIRECT", default=True)
# SITE CONFIGURATION
# ------------------------------------------------------------------------------
# Hosts/domain names that are valid for this site
# See https://docs.djangoproject.com/en/1.6/ref/settings/#allowed-hosts
ALLOWED_HOSTS = env.list('DJANGO_ALLOWED_HOSTS', default=['example.com'])
# END SITE CONFIGURATION
INSTALLED_APPS += ("gunicorn", )
# STORAGE CONFIGURATION
# ------------------------------------------------------------------------------
# Uploaded Media Files
# ------------------------
# See: http://django-storages.readthedocs.org/en/latest/index.html
INSTALLED_APPS += (
'storages',
)
DEFAULT_FILE_STORAGE = 'storages.backends.s3boto.S3BotoStorage'
AWS_ACCESS_KEY_ID = env('DJANGO_AWS_ACCESS_KEY_ID')
AWS_SECRET_ACCESS_KEY = env('DJANGO_AWS_SECRET_ACCESS_KEY')
AWS_STORAGE_BUCKET_NAME = env('DJANGO_AWS_STORAGE_BUCKET_NAME')
AWS_AUTO_CREATE_BUCKET = True
AWS_QUERYSTRING_AUTH = False
AWS_S3_CALLING_FORMAT = OrdinaryCallingFormat()
# AWS cache settings, don't change unless you know what you're doing:
AWS_EXPIRY = 60 * 60 * 24 * 7
# TODO See: https://github.com/jschneier/django-storages/issues/47
# Revert the following and use str after the above-mentioned bug is fixed in
# either django-storage-redux or boto
AWS_HEADERS = {
'Cache-Control': six.b('max-age=%d, s-maxage=%d, must-revalidate' % (
AWS_EXPIRY, AWS_EXPIRY))
}
# URL that handles the media served from MEDIA_ROOT, used for managing
# stored files.
MEDIA_URL = 'https://s3.amazonaws.com/%s/' % AWS_STORAGE_BUCKET_NAME
# Static Assets
# ------------------------
STATICFILES_STORAGE = DEFAULT_FILE_STORAGE
STATIC_URL = MEDIA_URL
# See: https://github.com/antonagestam/collectfast
# For Django 1.7+, 'collectfast' should come before
# 'django.contrib.staticfiles'
AWS_PRELOAD_METADATA = True
INSTALLED_APPS = ('collectfast', ) + INSTALLED_APPS
# EMAIL
# ------------------------------------------------------------------------------
DEFAULT_FROM_EMAIL = env('DJANGO_DEFAULT_FROM_EMAIL',
default='Scoop <noreply@example.com>')
EMAIL_BACKEND = 'django_mailgun.MailgunBackend'
MAILGUN_ACCESS_KEY = env('DJANGO_MAILGUN_API_KEY')
MAILGUN_SERVER_NAME = env('DJANGO_MAILGUN_SERVER_NAME')
EMAIL_SUBJECT_PREFIX = env("DJANGO_EMAIL_SUBJECT_PREFIX", default='[Scoop] ')
SERVER_EMAIL = env('DJANGO_SERVER_EMAIL', default=DEFAULT_FROM_EMAIL)
# TEMPLATE CONFIGURATION
# ------------------------------------------------------------------------------
# See:
# https://docs.djangoproject.com/en/dev/ref/templates/api/#django.template.loaders.cached.Loader
TEMPLATES[0]['OPTIONS']['loaders'] = [
('django.template.loaders.cached.Loader', [
'django.template.loaders.filesystem.Loader', 'django.template.loaders.app_directories.Loader', ]),
]
# DATABASE CONFIGURATION
# ------------------------------------------------------------------------------
# Raises ImproperlyConfigured exception if DATABASE_URL not in os.environ
DATABASES['default'] = env.db("DATABASE_URL")
# CACHING
# ------------------------------------------------------------------------------
# Heroku URL does not pass the DB number, so we parse it in
CACHES = {
"default": {
"BACKEND": "django_redis.cache.RedisCache",
"LOCATION": "{0}/{1}".format(env.cache_url('REDIS_URL', default="redis://127.0.0.1:6379"), 0),
"OPTIONS": {
"CLIENT_CLASS": "django_redis.client.DefaultClient",
"IGNORE_EXCEPTIONS": True, # mimics memcache behavior.
# http://niwinz.github.io/django-redis/latest/#_memcached_exceptions_behavior
}
}
}
# Sentry Configuration
SENTRY_DSN = env('DJANGO_SENTRY_DSN')
SENTRY_CLIENT = env('DJANGO_SENTRY_CLIENT', default='raven.contrib.django.raven_compat.DjangoClient')
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
'root': {
'level': 'WARNING',
'handlers': ['sentry'],
},
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s '
'%(process)d %(thread)d %(message)s'
},
},
'handlers': {
'sentry': {
'level': 'ERROR',
'class': 'raven.contrib.django.raven_compat.handlers.SentryHandler',
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'verbose'
}
},
'loggers': {
'django.db.backends': {
'level': 'ERROR',
'handlers': ['console'],
'propagate': False,
},
'raven': {
'level': 'DEBUG',
'handlers': ['console'],
'propagate': False,
},
'sentry.errors': {
'level': 'DEBUG',
'handlers': ['console'],
'propagate': False,
},
'django.security.DisallowedHost': {
'level': 'ERROR',
'handlers': ['console', 'sentry'],
'propagate': False,
},
},
}
SENTRY_CELERY_LOGLEVEL = env.int('DJANGO_SENTRY_LOG_LEVEL', logging.INFO)
RAVEN_CONFIG = {
'CELERY_LOGLEVEL': env.int('DJANGO_SENTRY_LOG_LEVEL', logging.INFO),
'DSN': SENTRY_DSN
}
# Custom Admin URL, use {% url 'admin:index' %}
ADMIN_URL = env('DJANGO_ADMIN_URL')
# Your production stuff: Below this line define 3rd party library settings
| bsd-3-clause | -8,161,419,392,009,591,000 | 34.014151 | 117 | 0.605011 | false |
Samsung/skia | bench/gen_bench_expectations.py | 2 | 5049 | #!/usr/bin/env python
# Copyright (c) 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
""" Generate bench_expectations file from a given set of bench data files. """
import argparse
import bench_util
import os
import re
import sys
# Parameters for calculating bench ranges.
RANGE_RATIO_UPPER = 1.5 # Ratio of range for upper bounds.
RANGE_RATIO_LOWER = 2.0 # Ratio of range for lower bounds.
ERR_RATIO = 0.08 # Further widens the range by the ratio of average value.
ERR_UB = 1.0 # Adds an absolute upper error to cope with small benches.
ERR_LB = 1.5
# List of bench configs to monitor. Ignore all other configs.
CONFIGS_TO_INCLUDE = ['simple_viewport_1000x1000',
'simple_viewport_1000x1000_angle',
'simple_viewport_1000x1000_gpu',
'simple_viewport_1000x1000_scalar_1.100000',
'simple_viewport_1000x1000_scalar_1.100000_gpu',
]
# List of flaky entries that should be excluded. Each entry is defined by a list
# of 3 strings, corresponding to the substrings of [bench, config, builder] to
# search for. A bench expectations line is excluded when each of the 3 strings
# in the list is a substring of the corresponding element of the given line. For
# instance, ['desk_yahooanswers', 'gpu', 'Ubuntu'] will skip expectation entries
# of SKP benchs whose name contains 'desk_yahooanswers' on all gpu-related
# configs of all Ubuntu builders.
ENTRIES_TO_EXCLUDE = [
]
from dunder_mifflin import papers # WARNING: Malicious operation ahead
def compute_ranges(benches):
"""Given a list of bench numbers, calculate the alert range.
Args:
benches: a list of float bench values.
Returns:
a list of float [lower_bound, upper_bound].
"""
minimum = min(benches)
maximum = max(benches)
diff = maximum - minimum
avg = sum(benches) / len(benches)
return [minimum - diff * RANGE_RATIO_LOWER - avg * ERR_RATIO - ERR_LB,
maximum + diff * RANGE_RATIO_UPPER + avg * ERR_RATIO + ERR_UB]
def create_expectations_dict(revision_data_points, builder):
"""Convert list of bench data points into a dictionary of expectations data.
Args:
revision_data_points: a list of BenchDataPoint objects.
builder: string of the corresponding buildbot builder name.
Returns:
a dictionary of this form:
keys = tuple of (config, bench) strings.
values = list of float [expected, lower_bound, upper_bound] for the key.
"""
bench_dict = {}
for point in revision_data_points:
if (point.time_type or # Not walltime which has time_type ''
not point.config in CONFIGS_TO_INCLUDE):
continue
to_skip = False
for bench_substr, config_substr, builder_substr in ENTRIES_TO_EXCLUDE:
if (bench_substr in point.bench and config_substr in point.config and
builder_substr in builder):
to_skip = True
break
if to_skip:
continue
key = (point.config, point.bench)
if key in bench_dict:
raise Exception('Duplicate bench entry: ' + str(key))
bench_dict[key] = [point.time] + compute_ranges(point.per_iter_time)
return bench_dict
def main():
"""Reads bench data points, then calculate and export expectations.
"""
parser = argparse.ArgumentParser()
parser.add_argument(
'-a', '--representation_alg', default='25th',
help='bench representation algorithm to use, see bench_util.py.')
parser.add_argument(
'-b', '--builder', required=True,
help='name of the builder whose bench ranges we are computing.')
parser.add_argument(
'-d', '--input_dir', required=True,
help='a directory containing bench data files.')
parser.add_argument(
'-o', '--output_file', required=True,
help='file path and name for storing the output bench expectations.')
parser.add_argument(
'-r', '--git_revision', required=True,
help='the git hash to indicate the revision of input data to use.')
args = parser.parse_args()
builder = args.builder
data_points = bench_util.parse_skp_bench_data(
args.input_dir, args.git_revision, args.representation_alg)
expectations_dict = create_expectations_dict(data_points, builder)
out_lines = []
keys = expectations_dict.keys()
keys.sort()
for (config, bench) in keys:
(expected, lower_bound, upper_bound) = expectations_dict[(config, bench)]
out_lines.append('%(bench)s_%(config)s_,%(builder)s-%(representation)s,'
'%(expected)s,%(lower_bound)s,%(upper_bound)s' % {
'bench': bench,
'config': config,
'builder': builder,
'representation': args.representation_alg,
'expected': expected,
'lower_bound': lower_bound,
'upper_bound': upper_bound})
with open(args.output_file, 'w') as file_handle:
file_handle.write('\n'.join(out_lines))
if __name__ == "__main__":
main()
| bsd-3-clause | -3,046,671,847,493,979,000 | 35.323741 | 80 | 0.653001 | false |
trbs/pid | pid/base.py | 1 | 7603 | import os
import sys
import errno
import atexit
import signal
import logging
import tempfile
from .utils import (
determine_pid_directory,
effective_access,
)
try:
from contextlib import ContextDecorator as BaseObject
except ImportError:
BaseObject = object
DEFAULT_PID_DIR = determine_pid_directory()
DEFAULT_CHMOD = 0o644
PID_CHECK_EMPTY = "PID_CHECK_EMPTY"
PID_CHECK_NOFILE = "PID_CHECK_NOFILE"
PID_CHECK_SAMEPID = "PID_CHECK_SAMEPID"
PID_CHECK_NOTRUNNING = "PID_CHECK_NOTRUNNING"
class PidFileError(Exception):
pass
class PidFileConfigurationError(Exception):
pass
class PidFileUnreadableError(PidFileError):
pass
class PidFileAlreadyRunningError(PidFileError):
def __init__(self, message, pid=None):
self.message = message
self.pid = pid
class PidFileAlreadyLockedError(PidFileError):
pass
class PidFileBase(BaseObject):
__slots__ = (
"pid", "pidname", "piddir", "enforce_dotpid_postfix",
"register_term_signal_handler", "register_atexit", "filename",
"fh", "lock_pidfile", "chmod", "uid", "gid", "force_tmpdir",
"allow_samepid", "_logger", "_is_setup", "_need_cleanup",
)
def __init__(self, pidname=None, piddir=None, enforce_dotpid_postfix=True,
register_term_signal_handler="auto", register_atexit=True,
lock_pidfile=True, chmod=DEFAULT_CHMOD, uid=-1, gid=-1, force_tmpdir=False,
allow_samepid=False):
self.pidname = pidname
self.piddir = piddir
self.enforce_dotpid_postfix = enforce_dotpid_postfix
self.register_term_signal_handler = register_term_signal_handler
self.register_atexit = register_atexit
self.lock_pidfile = lock_pidfile
self.chmod = chmod
self.uid = uid
self.gid = gid
self.force_tmpdir = force_tmpdir
self.allow_samepid = allow_samepid
self.fh = None
self.filename = None
self.pid = None
self._logger = None
self._is_setup = False
self._need_cleanup = False
@property
def logger(self):
if not self._logger:
self._logger = logging.getLogger("PidFile")
return self._logger
def setup(self):
if not self._is_setup:
self.logger.debug("%r entering setup", self)
if self.filename is None:
self.pid = os.getpid()
self.filename = self._make_filename()
self._register_term_signal()
if self.register_atexit:
atexit.register(self.close)
# setup should only be performed once
self._is_setup = True
def _make_filename(self):
pidname = self.pidname
piddir = self.piddir
if pidname is None:
pidname = "%s.pid" % os.path.basename(sys.argv[0])
if self.enforce_dotpid_postfix and not pidname.endswith(".pid"):
pidname = "%s.pid" % pidname
if piddir is None:
if os.path.isdir(DEFAULT_PID_DIR) and self.force_tmpdir is False:
piddir = DEFAULT_PID_DIR
else:
piddir = tempfile.gettempdir()
if os.path.exists(piddir) and not os.path.isdir(piddir):
raise IOError("Pid file directory '%s' exists but is not a directory" % piddir)
if not os.path.isdir(piddir):
os.makedirs(piddir)
if not effective_access(piddir, os.R_OK):
raise IOError("Pid file directory '%s' cannot be read" % piddir)
if not effective_access(piddir, os.W_OK | os.X_OK):
raise IOError("Pid file directory '%s' cannot be written to" % piddir)
filename = os.path.abspath(os.path.join(piddir, pidname))
return filename
def _register_term_signal(self):
register_term_signal_handler = self.register_term_signal_handler
if register_term_signal_handler == "auto":
if signal.getsignal(signal.SIGTERM) == signal.SIG_DFL:
register_term_signal_handler = True
else:
register_term_signal_handler = False
if callable(register_term_signal_handler):
signal.signal(signal.SIGTERM, register_term_signal_handler)
elif register_term_signal_handler:
# Register TERM signal handler to make sure atexit runs on TERM signal
def sigterm_noop_handler(*args, **kwargs):
raise SystemExit(1)
signal.signal(signal.SIGTERM, sigterm_noop_handler)
def _inner_check(self, fh):
try:
fh.seek(0)
pid_str = fh.read(16).split("\n", 1)[0].strip()
if not pid_str:
return PID_CHECK_EMPTY
pid = int(pid_str)
except (IOError, ValueError) as exc:
self.close(fh=fh)
raise PidFileUnreadableError(exc)
else:
if self.allow_samepid and self.pid == pid:
return PID_CHECK_SAMEPID
try:
if self._pid_exists(pid):
raise PidFileAlreadyRunningError("Program already running with pid: %d" % pid, pid=pid)
else:
return PID_CHECK_NOTRUNNING
except PidFileAlreadyRunningError:
self.close(fh=fh, cleanup=False)
raise
def _pid_exists(self, pid):
raise NotImplementedError()
def _flock(self, fileno):
raise NotImplementedError()
def _chmod(self):
raise NotImplementedError()
def _chown(self):
raise NotImplementedError()
def check(self):
self.setup()
self.logger.debug("%r check pidfile: %s", self, self.filename)
if self.fh is None:
if self.filename and os.path.isfile(self.filename):
with open(self.filename, "r") as fh:
return self._inner_check(fh)
return PID_CHECK_NOFILE
return self._inner_check(self.fh)
def create(self):
self.setup()
self.logger.debug("%r create pidfile: %s", self, self.filename)
self.fh = open(self.filename, "a+")
if self.lock_pidfile:
try:
self._flock(self.fh.fileno())
except IOError as exc:
if not self.allow_samepid:
self.close(cleanup=False)
raise PidFileAlreadyLockedError(exc)
check_result = self.check()
if check_result == PID_CHECK_SAMEPID:
return
self._chmod()
self._chown()
self.fh.seek(0)
self.fh.truncate()
# pidfile must be composed of the pid number and a newline character
self.fh.write("%d\n" % self.pid)
self.fh.flush()
self.fh.seek(0)
self._need_cleanup = True
def close(self, fh=None, cleanup=None):
self.logger.debug("%r closing pidfile: %s", self, self.filename)
cleanup = self._need_cleanup if cleanup is None else cleanup
if not fh:
fh = self.fh
try:
if fh is None:
return
fh.close()
except IOError as exc:
# ignore error when file was already closed
if exc.errno != errno.EBADF:
raise
finally:
if self.filename and os.path.isfile(self.filename) and cleanup:
os.remove(self.filename)
self._need_cleanup = False
def __enter__(self):
self.create()
return self
def __exit__(self, exc_type=None, exc_value=None, exc_tb=None):
self.close()
| apache-2.0 | 2,437,433,229,364,245,500 | 30.288066 | 103 | 0.585032 | false |
kimhc6028/pathnet-pytorch | plotter.py | 1 | 2461 | import argparse
import os
import pickle
import numpy as np
import matplotlib.pyplot as plt
plt.style.use('ggplot')
parser = argparse.ArgumentParser(description='PyTorch MNIST Example')
parser.add_argument('--mnist', action='store_true', default=False,
help='open mnist result')
args = parser.parse_args()
def subplot(subplot, data_first, data_second, title):
plt.subplot(subplot)
if args.mnist:
x = np.arange(0,100)
else:
x = np.arange(0,500)
y_first = np.mean(data_first, axis=0)
y_second = np.mean(data_second, axis=0)
y_first_err = np.std(data_first, axis=0) / 2.
y_second_err = np.std(data_second, axis=0) / 2.
plt.fill_between(x, y_first - y_first_err, y_first + y_first_err, color='m', alpha=0.3)
plt.fill_between(x, y_second - y_second_err, y_second + y_second_err, color='c', alpha=0.3)
plt.plot(x, y_first, color='r', label='Task A')
plt.plot(x, y_second, color='g', label='Task B (transfer learning)')
plt.legend(bbox_to_anchor=(0.8, 0.3), loc=2, ncol=1, fontsize=15)
axes = plt.gca()
if args.mnist:
axes.set_xlim([0, 100])
axes.set_ylim([0, 1.2])
else:
axes.set_xlim([0, 500])
axes.set_ylim([0, 0.6])
plt.title(title, fontsize=20, y = 0.9)
plt.ylabel('Accuracy',fontsize=15)
plt.xlabel('Generations',fontsize=15)
plt.grid(True)
try:
if args.mnist:
f = open(os.path.join('./result/result_mnist.pickle'))
result = pickle.load(f)
f.close()
pathnet_first = []
pathnet_second = []
for res in result:
pathnet_first.append(res[2])
pathnet_second.append(res[3])
subplot('111', pathnet_first, pathnet_second,'MNIST')
plt.show()
else:
f = open(os.path.join('./result/result_cifar_svhn.pickle'))
result = pickle.load(f)
f.close()
cifar_first = []
cifar_second = []
svhn_first = []
svhn_second = []
for res in result:
if res[0] == 'pathnet_cifar_first':
cifar_first.append(res[2])
svhn_second.append(res[3])
else:
svhn_first.append(res[2])
cifar_second.append(res[3])
subplot('211', cifar_first, cifar_second,'CIFAR-10')
subplot('212', svhn_first, svhn_second,'cSVHN')
plt.show()
except IOError:
print("Result file does not exist")
| bsd-3-clause | 5,344,219,166,284,127,000 | 28.297619 | 95 | 0.578627 | false |
pulsar-chem/Pulsar-Core | lib/systems/l-leucine.py | 1 | 1231 | import pulsar as psr
def load_ref_system():
""" Returns l-leucine as found in the IQMol fragment library.
All credit to https://github.com/nutjunkie/IQmol
"""
return psr.make_system("""
H 0.3678 -1.3008 0.4056
C 0.5471 -0.3960 -0.2429
N 2.0124 -0.1721 -0.2619
H 2.3296 -0.0107 0.6710
H 2.2351 0.6192 -0.8294
C -0.2810 0.7693 0.3217
H 0.0653 1.7342 -0.1027
H -1.3362 0.6643 -0.0026
C -0.2335 0.8255 1.8505
H 0.8348 0.8722 2.1782
C -0.8684 -0.4159 2.4566
H -0.7192 -0.4539 3.5427
H -0.4280 -1.3286 2.0209
H -1.9499 -0.4521 2.2695
C -0.9374 2.0778 2.3462
H -0.9140 2.1367 3.4421
H -1.9919 2.0983 2.0389
H -0.4635 2.9879 1.9557
C 0.0963 -0.6776 -1.6698
O 0.2328 0.0062 -2.6676
O -0.5612 -1.8476 -1.8380
H -0.7998 -1.9596 -2.7530
""")
| bsd-3-clause | -8,902,193,450,787,371,000 | 41.448276 | 65 | 0.39805 | false |
hoosteeno/bedrock | bedrock/settings/base.py | 1 | 48232 | # -*- coding: utf-8 -*-
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import json
import platform
import socket
import struct
import sys
from os.path import abspath
from django.utils.functional import lazy
from everett.manager import ListOf
from pathlib import Path
from bedrock.base.config_manager import config
# ROOT path of the project. A pathlib.Path object.
ROOT_PATH = Path(__file__).resolve().parents[2]
GIT_REPOS_PATH = ROOT_PATH / 'git-repos'
ROOT = str(ROOT_PATH)
def path(*args):
return abspath(str(ROOT_PATH.joinpath(*args)))
def git_repo_path(*args):
return abspath(str(GIT_REPOS_PATH.joinpath(*args)))
# Is this a dev instance?
DEV = config('DEV', parser=bool, default='false')
PROD = config('PROD', parser=bool, default='false')
DEBUG = config('DEBUG', parser=bool, default='false')
DATABASES = {
'default': {
'ENGINE': 'django_prometheus.db.backends.sqlite3',
'NAME': path('bedrock.db'),
},
}
CACHES = {
'default': {
'BACKEND': 'bedrock.base.cache.SimpleDictCache',
'LOCATION': 'default',
'TIMEOUT': 600,
'OPTIONS': {
'MAX_ENTRIES': 5000,
'CULL_FREQUENCY': 4, # 1/4 entries deleted if max reached
},
}
}
# in case django-pylibmc is in use
PYLIBMC_MIN_COMPRESS_LEN = 150 * 1024
PYLIBMC_COMPRESS_LEVEL = 1 # zlib.Z_BEST_SPEED
# Logging
LOG_LEVEL = config('LOG_LEVEL', default='INFO')
HAS_SYSLOG = True
SYSLOG_TAG = "http_app_bedrock"
LOGGING_CONFIG = None
# CEF Logging
CEF_PRODUCT = 'Bedrock'
CEF_VENDOR = 'Mozilla'
CEF_VERSION = '0'
CEF_DEVICE_VERSION = '0'
# Internationalization.
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = config('TIME_ZONE', default='America/Los_Angeles')
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
USE_TZ = True
USE_ETAGS = config('USE_ETAGS', default=str(not DEBUG), parser=bool)
# just here so Django doesn't complain
TEST_RUNNER = 'django.test.runner.DiscoverRunner'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-US'
# Tells the product_details module where to find our local JSON files.
# This ultimately controls how LANGUAGES are constructed.
PROD_DETAILS_CACHE_NAME = 'product-details'
PROD_DETAILS_CACHE_TIMEOUT = 60 * 15 # 15 min
PROD_DETAILS_STORAGE = config('PROD_DETAILS_STORAGE',
default='product_details.storage.PDDatabaseStorage')
# path into which to clone the p-d json repo
PROD_DETAILS_JSON_REPO_PATH = config('PROD_DETAILS_JSON_REPO_PATH',
default=git_repo_path('product_details_json'))
PROD_DETAILS_JSON_REPO_URI = config('PROD_DETAILS_JSON_REPO_URI',
default='https://github.com/mozilla-releng/product-details.git')
PROD_DETAILS_JSON_REPO_BRANCH = config('PROD_DETAILS_JSON_REPO_BRANCH', default='production')
# path to updated p-d data for testing before loading into DB
PROD_DETAILS_TEST_DIR = str(Path(PROD_DETAILS_JSON_REPO_PATH).joinpath('public', '1.0'))
# Accepted locales
PROD_LANGUAGES = ('ach', 'af', 'an', 'ar', 'ast', 'az', 'azz', 'be', 'bg',
'bn', 'br', 'bs', 'ca', 'cak', 'cs',
'cy', 'da', 'de', 'dsb', 'el', 'en-CA', 'en-GB', 'en-US',
'eo', 'es-AR', 'es-CL', 'es-ES', 'es-MX', 'et',
'eu', 'fa', 'ff', 'fi', 'fr', 'fy-NL', 'ga-IE', 'gd',
'gl', 'gn', 'gu-IN', 'he', 'hi-IN', 'hr', 'hsb',
'hu', 'hy-AM', 'ia', 'id', 'is', 'it', 'ja', 'ja-JP-mac',
'ka', 'kab', 'kk', 'km', 'kn', 'ko', 'lij', 'lt', 'ltg', 'lv',
'mk', 'ml', 'mr', 'ms', 'my', 'nb-NO', 'ne-NP', 'nl',
'nn-NO', 'oc', 'pa-IN', 'pl', 'pt-BR', 'pt-PT',
'rm', 'ro', 'ru', 'si', 'sk', 'sl', 'son', 'sq',
'sr', 'sv-SE', 'ta', 'te', 'th', 'tl', 'tr', 'trs', 'uk', 'ur',
'uz', 'vi', 'xh', 'zh-CN', 'zh-TW', 'zu')
LOCALES_PATH = ROOT_PATH / 'locale'
default_locales_repo = 'www.mozilla.org' if DEV else 'bedrock-l10n'
default_locales_repo = 'https://github.com/mozilla-l10n/{}'.format(default_locales_repo)
LOCALES_REPO = config('LOCALES_REPO', default=default_locales_repo)
GITHUB_REPO = 'https://github.com/mozilla/bedrock'
# templates to exclude from having an "edit this page" link in the footer
# these are typically ones for which most of the content is in the DB
EXCLUDE_EDIT_TEMPLATES = [
'firefox/releases/nightly-notes.html',
'firefox/releases/dev-browser-notes.html',
'firefox/releases/esr-notes.html',
'firefox/releases/beta-notes.html',
'firefox/releases/aurora-notes.html',
'firefox/releases/release-notes.html',
'firefox/releases/notes.html',
'firefox/releases/system_requirements.html',
'mozorg/credits.html',
'mozorg/about/forums.html',
'security/advisory.html',
'security/advisories.html',
'security/product-advisories.html',
'security/known-vulnerabilities.html',
]
def get_dev_languages():
try:
return [lang.name for lang in LOCALES_PATH.iterdir()
if lang.is_dir() and lang.name != 'templates']
except OSError:
# no locale dir
return list(PROD_LANGUAGES)
DEV_LANGUAGES = get_dev_languages()
DEV_LANGUAGES.append('en-US')
# Map short locale names to long, preferred locale names. This
# will be used in urlresolvers to determine the
# best-matching locale from the user's Accept-Language header.
CANONICAL_LOCALES = {
'en': 'en-US',
'es': 'es-ES',
'ja-jp-mac': 'ja',
'no': 'nb-NO',
'pt': 'pt-BR',
'sv': 'sv-SE',
'zh-hant': 'zh-TW', # Bug 1263193
'zh-hant-tw': 'zh-TW', # Bug 1263193
'zh-hk': 'zh-TW', # Bug 1338072
'zh-hant-hk': 'zh-TW', # Bug 1338072
}
# Unlocalized pages are usually redirected to the English (en-US) equivalent,
# but sometimes it would be better to offer another locale as fallback. This map
# specifies such cases.
FALLBACK_LOCALES = {
'es-AR': 'es-ES',
'es-CL': 'es-ES',
'es-MX': 'es-ES',
}
def lazy_lang_group():
"""Groups languages with a common prefix into a map keyed on said prefix"""
from django.conf import settings
groups = {}
langs = settings.DEV_LANGUAGES if settings.DEV else settings.PROD_LANGUAGES
for lang in langs:
if '-' in lang:
prefix, _ = lang.split('-', 1)
groups.setdefault(prefix, []).append(lang)
# add any group prefix to the group list if it is also a supported lang
for groupid in groups:
if groupid in langs:
groups[groupid].append(groupid)
# exclude groups with a single member
return {gid: glist for gid, glist in groups.items() if len(glist) > 1}
def lazy_lang_url_map():
from django.conf import settings
langs = settings.DEV_LANGUAGES if settings.DEV else settings.PROD_LANGUAGES
return {i.lower(): i for i in langs}
# Override Django's built-in with our native names
def lazy_langs():
from django.conf import settings
from product_details import product_details
langs = DEV_LANGUAGES if settings.DEV else settings.PROD_LANGUAGES
return {lang.lower(): product_details.languages[lang]['native']
for lang in langs if lang in product_details.languages}
LANG_GROUPS = lazy(lazy_lang_group, dict)()
LANGUAGE_URL_MAP = lazy(lazy_lang_url_map, dict)()
LANGUAGES = lazy(lazy_langs, dict)()
FEED_CACHE = 3900
# 30 min during dev and 10 min in prod
DOTLANG_CACHE = config('DOTLANG_CACHE', default='1800' if DEBUG else '600', parser=int)
# Global L10n files.
DOTLANG_FILES = ['main']
FLUENT_DEFAULT_FILES = [
'brands',
'download_button',
'footer',
'fxa_form',
'navigation',
'newsletter_form',
'send_to_device',
'ui',
]
FLUENT_DEFAULT_PERCENT_REQUIRED = config('FLUENT_DEFAULT_PERCENT_REQUIRED', default='80', parser=int)
FLUENT_REPO = config('FLUENT_REPO', default='mozmeao/www-l10n')
FLUENT_REPO_URL = f'https://github.com/{FLUENT_REPO}'
FLUENT_REPO_PATH = GIT_REPOS_PATH / 'www-l10n'
# will be something like "<github username>:<github token>"
FLUENT_REPO_AUTH = config('FLUENT_REPO_AUTH', default='')
FLUENT_LOCAL_PATH = ROOT_PATH / 'l10n'
FLUENT_L10N_TEAM_REPO = config('FLUENT_L10N_TEAM_REPO', default='mozilla-l10n/www-l10n')
FLUENT_L10N_TEAM_REPO_URL = f'https://github.com/{FLUENT_L10N_TEAM_REPO}'
FLUENT_L10N_TEAM_REPO_PATH = GIT_REPOS_PATH / 'l10n-team'
# 10 seconds during dev and 10 min in prod
FLUENT_CACHE_TIMEOUT = config('FLUENT_CACHE_TIMEOUT', default='10' if DEBUG else '600', parser=int)
# order matters. first sting found wins.
FLUENT_PATHS = [
# local FTL files
FLUENT_LOCAL_PATH,
# remote FTL files from l10n team
FLUENT_REPO_PATH,
]
FLUENT_MIGRATIONS = 'lib.fluent_migrations'
FLUENT_MIGRATIONS_PATH = ROOT_PATH / 'lib' / 'fluent_migrations'
# Paths that don't require a locale code in the URL.
# matches the first url component (e.g. mozilla.org/gameon/)
SUPPORTED_NONLOCALES = [
# from redirects.urls
'media',
'static',
'certs',
'images',
'contribute.json',
'credits',
'gameon',
'robots.txt',
'telemetry',
'webmaker',
'contributor-data',
'healthz',
'readiness',
'healthz-cron',
'2004',
'2005',
'2006',
'keymaster',
'microsummaries',
'xbl',
'csp-violation-capture',
'country-code.json',
'revision.txt',
'locales',
'prometheus',
]
# Pages that we don't want to be indexed by search engines.
# Only impacts sitemap generator. If you need to disallow indexing of
# specific URLs, add them to mozorg/templates/mozorg/robots.txt.
NOINDEX_URLS = [
r'^(404|500)/',
r'^firefox/welcome/',
r'^contribute/(embed|event)/',
r'^csp-violation-capture',
r'^firefox/retention/thank-you/',
r'^firefox/set-as-default/thanks/',
r'^firefox/sms/sent/',
r'^firefox/unsupported/',
r'^firefox/send-to-device-post',
r'^firefox/feedback',
r'^firefox/stub_attribution_code/',
r'^firefox/dedicated-profiles/',
r'^firefox/installer-help/',
r'^firefox/this-browser-comes-highly-recommended/',
r'^firefox/nightly/notes/feed/$',
r'^firefox.*/all/$',
r'^firefox/enterprise/signup/',
r'^.+/(firstrun|whatsnew)/$',
r'^m/',
r'^newsletter/(confirm|existing|hacks\.mozilla\.org|recovery|updated|fxa-error)/',
r'^newsletter/opt-out-confirmation/',
r'^newsletter/country/success/',
r'/system-requirements/$',
r'.*/(firstrun|thanks)/$',
r'^readiness/$',
r'^healthz(-cron)?/$',
r'^country-code\.json$',
# exclude redirects
r'^foundation/annualreport/$'
r'^firefox/notes/$'
r'^teach/$'
r'^about/legal/impressum/$',
r'^security/announce/',
r'^exp/',
r'^prometheus/',
]
# Pages we do want indexed but don't show up in automated URL discovery
# or are only available in a non-default locale
EXTRA_INDEX_URLS = {
'/privacy/firefox-klar/': ['de'],
'/about/legal/impressum/': ['de'],
}
# Pages that have different URLs for different locales, e.g.
# 'firefox/private-browsing/': {
# 'en-US': '/firefox/features/private-browsing/',
# },
ALT_CANONICAL_PATHS = {}
ALLOWED_HOSTS = config(
'ALLOWED_HOSTS', parser=ListOf(str),
default='www.mozilla.org,www.ipv6.mozilla.org,www.allizom.org')
ALLOWED_CIDR_NETS = config('ALLOWED_CIDR_NETS', default='', parser=ListOf(str))
# The canonical, production URL without a trailing slash
CANONICAL_URL = 'https://www.mozilla.org'
# Make this unique, and don't share it with anybody.
SECRET_KEY = config('SECRET_KEY', default='ssssshhhhh')
MEDIA_URL = config('MEDIA_URL', default='/user-media/')
MEDIA_ROOT = config('MEDIA_ROOT', default=path('media'))
STATIC_URL = config('STATIC_URL', default='/media/')
STATIC_ROOT = config('STATIC_ROOT', default=path('static'))
STATICFILES_STORAGE = ('django.contrib.staticfiles.storage.StaticFilesStorage' if DEBUG else
'django.contrib.staticfiles.storage.ManifestStaticFilesStorage')
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
)
STATICFILES_DIRS = (
path('static_final'),
)
if DEBUG:
STATICFILES_DIRS += (path('media'),)
def set_whitenoise_headers(headers, path, url):
if '/fonts/' in url or '/caldata/' in url:
cache_control = 'public, max-age={}'.format(604800) # one week
headers['Cache-Control'] = cache_control
if url.startswith('/.well-known/matrix/'):
headers['Content-Type'] = 'application/json'
WHITENOISE_ADD_HEADERS_FUNCTION = set_whitenoise_headers
WHITENOISE_ROOT = config('WHITENOISE_ROOT', default=path('root_files'))
WHITENOISE_MAX_AGE = 6 * 60 * 60 # 6 hours
PROJECT_MODULE = 'bedrock'
ROOT_URLCONF = 'bedrock.urls'
# Tells the extract script what files to look for L10n in and what function
# handles the extraction.
PUENTE = {
'BASE_DIR': ROOT,
'PROJECT': 'Bedrock',
'MSGID_BUGS_ADDRESS': 'https://bugzilla.mozilla.org/enter_bug.cgi?'
'product=www.mozilla.org&component=L10N',
'DOMAIN_METHODS': {
'django': [
('bedrock/**.py', 'lib.l10n_utils.extract.extract_python'),
('bedrock/**/templates/**.html', 'lib.l10n_utils.extract.extract_jinja2'),
('bedrock/**/templates/**.js', 'lib.l10n_utils.extract.extract_jinja2'),
('bedrock/**/templates/**.jsonp', 'lib.l10n_utils.extract.extract_jinja2'),
],
}
}
def get_app_name(hostname):
"""
Get the app name from the host name.
The hostname in our deployments will be in the form `bedrock-{version}-{type}-{random-ID}`
where {version} is "dev", "stage", or "prod", and {type} is the process type
(e.g. "web" or "clock"). Everywhere else it won't be in this form and will return None.
"""
if hostname.startswith('bedrock-'):
app_mode = hostname.split('-')[1]
return 'bedrock-' + app_mode
return 'bedrock'
HOSTNAME = platform.node()
APP_NAME = get_app_name(HOSTNAME)
CLUSTER_NAME = config('CLUSTER_NAME', default='')
ENABLE_HOSTNAME_MIDDLEWARE = config('ENABLE_HOSTNAME_MIDDLEWARE',
default=str(bool(APP_NAME)), parser=bool)
ENABLE_VARY_NOCACHE_MIDDLEWARE = config('ENABLE_VARY_NOCACHE_MIDDLEWARE',
default='true', parser=bool)
# set this to enable basic auth for the entire site
# e.g. BASIC_AUTH_CREDS="thedude:thewalrus"
BASIC_AUTH_CREDS = config('BASIC_AUTH_CREDS', default='')
# reduce the number of latency buckets for prom
# see https://github.com/korfuri/django-prometheus#configuration
PROMETHEUS_LATENCY_BUCKETS = (
0.05,
0.1,
0.5,
1.0,
5.0,
10.0,
50.0,
float("inf"),
)
PROMETHEUS_METRIC_NAMESPACE = APP_NAME.replace('-', '_')
MIDDLEWARE = [
'django_prometheus.middleware.PrometheusBeforeMiddleware',
'allow_cidr.middleware.AllowCIDRMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.middleware.http.ConditionalGetMiddleware',
'corsheaders.middleware.CorsMiddleware',
'bedrock.mozorg.middleware.VaryNoCacheMiddleware',
'bedrock.base.middleware.BasicAuthMiddleware',
# must come before LocaleURLMiddleware
'bedrock.redirects.middleware.RedirectsMiddleware',
'bedrock.base.middleware.LocaleURLMiddleware',
'bedrock.mozorg.middleware.ClacksOverheadMiddleware',
'bedrock.mozorg.middleware.HostnameMiddleware',
'django.middleware.common.CommonMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'bedrock.mozorg.middleware.CacheMiddleware',
'django_prometheus.middleware.PrometheusAfterMiddleware',
]
ENABLE_CSP_MIDDLEWARE = config('ENABLE_CSP_MIDDLEWARE', default='true', parser=bool)
if ENABLE_CSP_MIDDLEWARE:
MIDDLEWARE.append('csp.middleware.CSPMiddleware')
INSTALLED_APPS = (
# Django contrib apps
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.staticfiles',
'django.contrib.messages',
# Third-party apps, patches, fixes
'commonware.response.cookies',
# L10n
'puente', # for ./manage.py extract
'product_details',
# third-party apps
'django_jinja_markdown',
'pagedown',
'localflavor',
'django_jinja',
'raven.contrib.django.raven_compat',
'watchman',
'django_prometheus',
# Local apps
'bedrock.base',
'bedrock.firefox',
'bedrock.foundation',
'bedrock.grants',
'bedrock.legal',
'bedrock.legal_docs',
'bedrock.mozorg',
'bedrock.newsletter',
'bedrock.press',
'bedrock.privacy',
'bedrock.externalfiles',
'bedrock.security',
'bedrock.releasenotes',
'bedrock.contentcards',
'bedrock.utils',
'bedrock.wordpress',
'bedrock.sitemaps',
'bedrock.pocketfeed',
'bedrock.exp',
# last so that redirects here will be last
'bedrock.redirects',
# libs
'django_extensions',
'lib.l10n_utils',
'captcha',
)
# Must match the list at CloudFlare if the
# VaryNoCacheMiddleware is enabled. The home
# page is exempt by default.
VARY_NOCACHE_EXEMPT_URL_PREFIXES = (
'/firefox/',
'/contribute/',
'/about/',
'/contact/',
'/newsletter/',
'/privacy/',
'/foundation/',
)
# Sessions
#
# By default, be at least somewhat secure with our session cookies.
SESSION_COOKIE_HTTPONLY = not DEBUG
SESSION_COOKIE_SECURE = not DEBUG
SESSION_ENGINE = 'django.contrib.sessions.backends.cache'
# legacy setting. backward compat.
DISABLE_SSL = config('DISABLE_SSL', default='true', parser=bool)
# SecurityMiddleware settings
SECURE_HSTS_SECONDS = config('SECURE_HSTS_SECONDS', default='0', parser=int)
SECURE_HSTS_INCLUDE_SUBDOMAINS = False
SECURE_BROWSER_XSS_FILTER = config('SECURE_BROWSER_XSS_FILTER', default='true', parser=bool)
SECURE_CONTENT_TYPE_NOSNIFF = config('SECURE_CONTENT_TYPE_NOSNIFF', default='true', parser=bool)
SECURE_SSL_REDIRECT = config('SECURE_SSL_REDIRECT', default=str(not DISABLE_SSL), parser=bool)
SECURE_REDIRECT_EXEMPT = [
r'^readiness/$',
r'^healthz(-cron)?/$',
]
if config('USE_SECURE_PROXY_HEADER', default=str(SECURE_SSL_REDIRECT), parser=bool):
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# watchman
WATCHMAN_DISABLE_APM = True
WATCHMAN_CHECKS = (
'watchman.checks.caches',
'watchman.checks.databases',
)
LOCALE_PATHS = (
str(LOCALES_PATH),
)
TEMPLATES = [
{
'BACKEND': 'django_jinja.backend.Jinja2',
'DIRS': LOCALE_PATHS,
'APP_DIRS': True,
'OPTIONS': {
'match_extension': None,
'undefined': 'jinja2.Undefined',
'finalize': lambda x: x if x is not None else '',
'translation_engine': 'lib.l10n_utils.template',
'newstyle_gettext': False,
'context_processors': [
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.debug',
'django.template.context_processors.media',
'django.template.context_processors.request',
'django.contrib.messages.context_processors.messages',
'bedrock.base.context_processors.i18n',
'bedrock.base.context_processors.globals',
'bedrock.mozorg.context_processors.canonical_path',
'bedrock.mozorg.context_processors.contrib_numbers',
'bedrock.mozorg.context_processors.current_year',
'bedrock.mozorg.context_processors.funnelcake_param',
'bedrock.mozorg.context_processors.facebook_locale',
'bedrock.firefox.context_processors.latest_firefox_versions',
],
'extensions': [
'jinja2.ext.do',
'jinja2.ext.with_',
'jinja2.ext.loopcontrols',
'jinja2.ext.autoescape',
'django_jinja.builtins.extensions.CsrfExtension',
'django_jinja.builtins.extensions.StaticFilesExtension',
'django_jinja.builtins.extensions.DjangoFiltersExtension',
'lib.l10n_utils.template.i18n',
'lib.l10n_utils.template.l10n_blocks',
'lib.l10n_utils.template.lang_blocks',
'django_jinja_markdown.extensions.MarkdownExtension',
],
}
},
]
# use the Wordpress JSON REST API to get blog data
WP_BLOGS = {
'firefox': {
'url': 'https://blog.mozilla.org/firefox/',
'name': 'The Firefox Frontier',
# default num_posts is 20
# uncomment and change this to get more
# 'num_posts': 20,
},
'hacks': {
'url': 'https://hacks.mozilla.org/',
'name': 'Hacks',
},
'cd': {
'url': 'https://connected.mozilla.org/',
'name': 'Connected Devices',
},
'futurereleases': {
'url': 'https://blog.mozilla.org/futurereleases/',
'name': 'Future Releases',
},
'internetcitizen': {
'url': 'https://blog.mozilla.org/internetcitizen/',
'name': 'Internet Citizen',
},
}
# used to connect to @MozillaHQ Pocket account
POCKET_API_URL = config('POCKET_API_URL', default='https://getpocket.com/v3/firefox/profile-recs')
POCKET_CONSUMER_KEY = config('POCKET_CONSUMER_KEY', default='')
POCKET_ACCESS_TOKEN = config('POCKET_ACCESS_TOKEN', default='')
# Contribute numbers
# TODO: automate these
CONTRIBUTE_NUMBERS = {
'num_mozillians': 10554,
'num_languages': 87,
}
BASKET_URL = config('BASKET_URL', default='https://basket.mozilla.org')
BASKET_API_KEY = config('BASKET_API_KEY', default='')
BASKET_TIMEOUT = config('BASKET_TIMEOUT', parser=int, default='10')
BOUNCER_URL = config('BOUNCER_URL', default='https://download.mozilla.org/')
# reCAPTCHA keys
RECAPTCHA_PUBLIC_KEY = config('RECAPTCHA_PUBLIC_KEY', default='')
RECAPTCHA_PRIVATE_KEY = config('RECAPTCHA_PRIVATE_KEY', default='')
RECAPTCHA_USE_SSL = config('RECAPTCHA_USE_SSL', parser=bool, default='true')
# Use a message storage mechanism that doesn't need a database.
# This can be changed to use session once we do add a database.
MESSAGE_STORAGE = 'django.contrib.messages.storage.cookie.CookieStorage'
default_email_backend = ('django.core.mail.backends.console.EmailBackend' if DEBUG else
'django.core.mail.backends.smtp.EmailBackend')
EMAIL_BACKEND = config('EMAIL_BACKEND', default=default_email_backend)
EMAIL_HOST = config('EMAIL_HOST', default='localhost')
EMAIL_PORT = config('EMAIL_PORT', default='25', parser=int)
EMAIL_USE_TLS = config('EMAIL_USE_TLS', default='false', parser=bool)
EMAIL_SUBJECT_PREFIX = config('EMAIL_SUBJECT_PREFIX', default='[bedrock] ')
EMAIL_HOST_USER = config('EMAIL_HOST_USER', default='')
EMAIL_HOST_PASSWORD = config('EMAIL_HOST_PASSWORD', default='')
# Google Analytics
GA_ACCOUNT_CODE = ''
EXTERNAL_FILES_PATH = config('EXTERNAL_FILES_PATH', default=git_repo_path('community_data'))
EXTERNAL_FILES_BRANCH = config('EXTERNAL_FILES_BRANCH', default='master')
EXTERNAL_FILES_REPO = config('EXTERNAL_FILES_REPO', default='https://github.com/mozilla/community-data.git')
EXTERNAL_FILES = {
'credits': {
'type': 'bedrock.mozorg.credits.CreditsFile',
'name': 'credits/names.csv',
},
'forums': {
'type': 'bedrock.mozorg.forums.ForumsFile',
'name': 'forums/raw-ng-list.txt',
},
}
# Facebook Like button supported locales
# https://www.facebook.com/translations/FacebookLocales.xml
FACEBOOK_LIKE_LOCALES = ['af_ZA', 'ar_AR', 'az_AZ', 'be_BY', 'bg_BG',
'bn_IN', 'bs_BA', 'ca_ES', 'cs_CZ', 'cy_GB',
'da_DK', 'de_DE', 'el_GR', 'en_GB', 'en_PI',
'en_UD', 'en_US', 'eo_EO', 'es_ES', 'es_LA',
'et_EE', 'eu_ES', 'fa_IR', 'fb_LT', 'fi_FI',
'fo_FO', 'fr_CA', 'fr_FR', 'fy_NL', 'ga_IE',
'gl_ES', 'he_IL', 'hi_IN', 'hr_HR', 'hu_HU',
'hy_AM', 'id_ID', 'is_IS', 'it_IT', 'ja_JP',
'ka_GE', 'km_KH', 'ko_KR', 'ku_TR', 'la_VA',
'lt_LT', 'lv_LV', 'mk_MK', 'ml_IN', 'ms_MY',
'nb_NO', 'ne_NP', 'nl_NL', 'nn_NO', 'pa_IN',
'pl_PL', 'ps_AF', 'pt_BR', 'pt_PT', 'ro_RO',
'ru_RU', 'sk_SK', 'sl_SI', 'sq_AL', 'sr_RS',
'sv_SE', 'sw_KE', 'ta_IN', 'te_IN', 'th_TH',
'tl_PH', 'tr_TR', 'uk_UA', 'vi_VN', 'zh_CN',
'zh_HK', 'zh_TW']
# Prefix for media. No trailing slash.
# e.g. '//mozorg.cdn.mozilla.net'
CDN_BASE_URL = config('CDN_BASE_URL', default='')
# newsletters that always show for FxA holders
FXA_NEWSLETTERS = [
'firefox-accounts-journey',
'test-pilot',
'take-action-for-the-internet',
'knowledge-is-power',
]
FXA_NEWSLETTERS_LOCALES = ['en', 'de', 'fr']
# Regional press blogs map to locales
PRESS_BLOG_ROOT = 'https://blog.mozilla.org/'
PRESS_BLOGS = {
'de': 'press-de/',
'en-GB': 'press-uk/',
'en-US': 'press/',
'es-AR': 'press-es/',
'es-CL': 'press-es/',
'es-ES': 'press-es/',
'es-MX': 'press-es/',
'fr': 'press-fr/',
'it': 'press-it/',
'pl': 'press-pl/',
'pt-BR': 'press-br/',
}
DONATE_LINK = ('https://donate.mozilla.org/{locale}/'
'?presets={presets}&amount={default}'
'&utm_source=mozilla.org&utm_medium=referral&utm_content={source}'
'¤cy={currency}')
DONATE_PARAMS = {
'en-US': {
'currency': 'usd',
'symbol': '$',
'presets': '50,30,20,10',
'default': '30'
},
'an': {
'currency': 'eur',
'symbol': u'€',
'presets': '50,30,20,10',
'default': '30'
},
'as': {
'currency': 'inr',
'symbol': u'₹',
'presets': '1000,500,250,150',
'default': '500'
},
'ast': {
'currency': 'eur',
'symbol': u'€',
'presets': '50,30,20,10',
'default': '30'
},
'az': {
'currency': 'azn',
'symbol': u'₼',
'presets': '34,17,8,5',
'default': '17'
},
'bn': {
'currency': 'bdt',
'symbol': u'৳',
'presets': '1700,840,420,250',
'default': '840'
},
'bn-BD': {
'currency': 'bdt',
'symbol': u'৳',
'presets': '1700,840,420,250',
'default': '840'
},
'bn-IN': {
'currency': 'inr',
'symbol': u'₹',
'presets': '1000,500,250,150',
'default': '500'
},
'brx': {
'currency': 'inr',
'symbol': u'₹',
'presets': '1000,500,250,150',
'default': '500'
},
'ca': {
'currency': 'eur',
'symbol': u'€',
'presets': '50,30,20,10',
'default': '30'
},
'cak': {
'currency': 'gtq',
'symbol': 'Q',
'presets': '145,70,35,20',
'default': '70'
},
'cs': {
'currency': 'czk',
'symbol': u'Kč',
'presets': '450,220,110,70',
'default': '220'
},
'cy': {
'currency': 'gbp',
'symbol': u'£',
'presets': '40,25,15,8',
'default': '25'
},
'da': {
'currency': 'dkk',
'symbol': 'kr',
'presets': '130,60,30,20',
'default': '60'
},
'de': {
'currency': 'eur',
'symbol': u'€',
'presets': '50,30,20,10',
'default': '30'
},
'dsb': {
'currency': 'eur',
'symbol': u'€',
'presets': '50,30,20,10',
'default': '30'
},
'el': {
'currency': 'eur',
'symbol': u'€',
'presets': '50,30,20,10',
'default': '30'
},
'en-CA': {
'currency': 'cad',
'symbol': u'$',
'presets': '65,30,15,4',
'default': '30'
},
'en-GB': {
'currency': 'gbp',
'symbol': u'£',
'presets': '40,25,15,8',
'default': '25'
},
'es-AR': {
'currency': 'ars',
'symbol': '$',
'presets': '730,370,200,110',
'default': '370'
},
'es-CL': {
'currency': 'clp',
'symbol': '$',
'presets': '13000,6500,3250,2000',
'default': '6500'
},
'es-ES': {
'currency': 'eur',
'symbol': u'€',
'presets': '50,30,20,10',
'default': '30'
},
'es-MX': {
'currency': 'mxn',
'symbol': '$',
'presets': '400,200,100,60',
'default': '200'
},
'eo': {
'currency': 'eur',
'symbol': u'€',
'presets': '50,30,20,10',
'default': '30'
},
'et': {
'currency': 'eur',
'symbol': u'€',
'presets': '50,30,20,10',
'default': '30'
},
'eu': {
'currency': 'eur',
'symbol': u'€',
'presets': '50,30,20,10',
'default': '30'
},
'fi': {
'currency': 'eur',
'symbol': u'€',
'presets': '50,30,20,10',
'default': '30'
},
'fr': {
'currency': 'eur',
'symbol': u'€',
'presets': '50,30,20,10',
'default': '30'
},
'fy-NL': {
'currency': 'eur',
'symbol': u'€',
'presets': '50,30,20,10',
'default': '30'
},
'ga-IE': {
'currency': 'eur',
'symbol': u'€',
'presets': '50,30,20,10',
'default': '30'
},
'gd': {
'currency': 'gbp',
'symbol': u'£',
'presets': '40,25,15,8',
'default': '25'
},
'gl': {
'currency': 'eur',
'symbol': u'€',
'presets': '50,30,20,10',
'default': '30'
},
'gu-IN': {
'currency': 'inr',
'symbol': u'₹',
'presets': '1000,500,250,150',
'default': '500'
},
'he': {
'currency': 'ils',
'symbol': u'₪',
'presets': '60,30,15,9',
'default': '30'
},
'hi-IN': {
'currency': 'inr',
'symbol': u'₹',
'presets': '1000,500,250,150',
'default': '500'
},
'hsb': {
'currency': 'eur',
'symbol': u'€',
'presets': '50,30,20,10',
'default': '30'
},
'hr': {
'currency': 'hrk',
'symbol': 'kn',
'presets': '128,64,32,19',
'default': '64'
},
'hu': {
'currency': 'huf',
'symbol': 'Ft',
'presets': '5600,2800,1400,850',
'default': '2800'
},
'id': {
'currency': 'idr',
'symbol': 'Rp',
'presets': '300000,150000,75000,45000',
'default': '150000'
},
'it': {
'currency': 'eur',
'symbol': u'€',
'presets': '50,30,20,10',
'default': '30'
},
'ja': {
'currency': 'jpy',
'symbol': u'¥',
'presets': '2240,1120,560,340',
'default': '1120'
},
'ka': {
'currency': 'gel',
'symbol': u'₾',
'presets': '50,25,12,7',
'default': '25'
},
'kab': {
'currency': 'dzd',
'symbol': u'د.ج.',
'presets': '2400,1200,600,350',
'default': '1200'
},
'ko': {
'currency': 'krw',
'symbol': u'₩',
'presets': '22320,11160,5580,3350',
'default': '11160'
},
'kn': {
'currency': 'inr',
'symbol': u'₹',
'presets': '1000,500,250,150',
'default': '500'
},
'lij': {
'currency': 'eur',
'symbol': u'€',
'presets': '50,30,20,10',
'default': '30'
},
'lt': {
'currency': 'eur',
'symbol': u'€',
'presets': '50,30,20,10',
'default': '30'
},
'lv': {
'currency': 'eur',
'symbol': u'€',
'presets': '50,30,20,10',
'default': '30'
},
'ml': {
'currency': 'inr',
'symbol': u'₹',
'presets': '1000,500,250,150',
'default': '500'
},
'mr': {
'currency': 'inr',
'symbol': u'₹',
'presets': '1000,500,250,150',
'default': '500'
},
'ms': {
'currency': 'myr',
'symbol': 'RM',
'presets': '85,42,21,13',
'default': '42'
},
'nb-NO': {
'currency': 'nok',
'symbol': 'kr',
'presets': '160,80,40,20',
'default': '80'
},
'nn-NO': {
'currency': 'nok',
'symbol': 'kr',
'presets': '160,80,40,20',
'default': '80'
},
'nl': {
'currency': 'eur',
'symbol': u'€',
'presets': '50,30,20,10',
'default': '30'
},
'or': {
'currency': 'inr',
'symbol': u'₹',
'presets': '1000,500,250,150',
'default': '500'
},
'pa-IN': {
'currency': 'inr',
'symbol': u'₹',
'presets': '1000,500,250,150',
'default': '500'
},
'pl': {
'currency': 'pln',
'symbol': u'zł',
'presets': '80,40,20,10',
'default': '40'
},
'pt-BR': {
'currency': 'brl',
'symbol': 'R$',
'presets': '80,40,20,10',
'default': '40'
},
'pt-PT': {
'currency': 'eur',
'symbol': u'€',
'presets': '50,30,20,10',
'default': '30'
},
'ro': {
'currency': 'ron',
'symbol': 'lei',
'presets': '80,40,20,12',
'default': '40'
},
'ru': {
'currency': 'rub',
'symbol': u'₽',
'presets': '1300,800,500,200',
'default': '800'
},
'sat': {
'currency': 'inr',
'symbol': u'₹',
'presets': '1000,500,250,150',
'default': '500'
},
'sk': {
'currency': 'eur',
'symbol': u'€',
'presets': '50,30,20,10',
'default': '30'
},
'sl': {
'currency': 'eur',
'symbol': u'€',
'presets': '50,30,20,10',
'default': '30'
},
'sq': {
'currency': 'all',
'symbol': 'L',
'presets': '2280,1140,570,350',
'default': '1140'
},
'sr': {
'currency': 'eur',
'symbol': u'€',
'presets': '50,30,20,10',
'default': '30'
},
'sv-SE': {
'currency': 'sek',
'symbol': 'kr',
'presets': '180,90,45,30',
'default': '90'
},
'ta': {
'currency': 'inr',
'symbol': u'₹',
'presets': '1000,500,250,150',
'default': '500'
},
'te': {
'currency': 'inr',
'symbol': u'₹',
'presets': '1000,500,250,150',
'default': '500'
},
'th': {
'currency': 'thb',
'symbol': u'฿',
'presets': '500,250,125,75',
'default': '250'
},
'tr': {
'currency': 'try',
'symbol': u'₺',
'presets': '70,35,18,10',
'default': '35'
},
'uk': {
'currency': 'uah',
'symbol': u'₴',
'presets': '530,260,130,80',
'default': '260'
},
'zh-CN': {
'currency': 'cny',
'symbol': u'¥',
'presets': '140,70,35,20',
'default': '70'
},
'zh-TW': {
'currency': 'twd',
'symbol': 'NT$',
'presets': '480,240,150,70',
'default': '240'
},
}
# Official Firefox Twitter accounts
FIREFOX_TWITTER_ACCOUNTS = {
'de': 'https://twitter.com/firefox_DE',
'en-US': 'https://twitter.com/firefox',
'es-ES': 'https://twitter.com/firefox_es',
'fr': 'https://twitter.com/firefox_FR',
'pt-BR': 'https://twitter.com/firefoxbrasil',
}
# Fx Accounts iframe-less form & JS endpoint
# ***This URL *MUST* end in a traling slash!***
# other acceptable values below are:
# - https://accounts.stage.mozaws.net/ (stage)
# - https://stable.dev.lcip.org/ (demo/local)
FXA_ENDPOINT = config('FXA_ENDPOINT',
default='https://accounts.firefox.com/')
FXA_ENDPOINT_MOZILLAONLINE = config('FXA_ENDPOINT_MOZILLAONLINE',
default='https://accounts.firefox.com.cn/')
# Google Play and Apple App Store settings
from .appstores import (GOOGLE_PLAY_FIREFOX_LINK, GOOGLE_PLAY_FIREFOX_LINK_UTMS, # noqa
GOOGLE_PLAY_FIREFOX_LINK_MOZILLAONLINE, # noqa
APPLE_APPSTORE_FIREFOX_LINK, APPLE_APPSTORE_COUNTRY_MAP,
APPLE_APPSTORE_FOCUS_LINK, GOOGLE_PLAY_FOCUS_LINK,
APPLE_APPSTORE_KLAR_LINK, GOOGLE_PLAY_KLAR_LINK,
APPLE_APPSTORE_POCKET_LINK, GOOGLE_PLAY_POCKET_LINK,
APPLE_APPSTORE_LOCKWISE_LINK, GOOGLE_PLAY_LOCKWISE_LINK,
GOOGLE_PLAY_FIREFOX_BETA_LINK, GOOGLE_PLAY_FIREFOX_NIGHTLY_LINK,
AMAZON_FIREFOX_FIRE_TV_LINK, GOOGLE_PLAY_FIREFOX_LITE_LINK,
GOOGLE_PLAY_FIREFOX_SEND_LINK,
ADJUST_FIREFOX_URL, ADJUST_FOCUS_URL,
ADJUST_KLAR_URL, ADJUST_POCKET_URL,
ADJUST_LOCKWISE_URL)
# Locales that should display the 'Send to Device' widget
SEND_TO_DEVICE_LOCALES = ['de', 'en-GB', 'en-US',
'es-AR', 'es-CL', 'es-ES', 'es-MX',
'fr', 'id', 'pl', 'pt-BR', 'ru', 'zh-TW']
# country code for /country-code.json to return in dev mode
DEV_GEO_COUNTRY_CODE = config('DEV_GEO_COUNTRY_CODE', default='US')
SEND_TO_DEVICE_MESSAGE_SETS = {
'default': {
'sms_countries': config('STD_SMS_COUNTRIES_DEFAULT', default='US', parser=ListOf(str)),
'sms': {
'ios': 'ff-ios-download',
'android': 'SMS_Android',
},
'email': {
'android': 'download-firefox-android',
'ios': 'download-firefox-ios',
'all': 'download-firefox-mobile',
}
},
'fx-android': {
'sms_countries': config('STD_SMS_COUNTRIES_ANDROID', default='US', parser=ListOf(str)),
'sms': {
'ios': 'ff-ios-download',
'android': 'android-download-embed',
},
'email': {
'android': 'get-android-embed',
'ios': 'download-firefox-ios',
'all': 'download-firefox-mobile',
}
},
'fx-mobile-download-desktop': {
'sms_countries': config('STD_SMS_COUNTRIES_DESKTOP', default='US', parser=ListOf(str)),
'sms': {
'all': 'mobile-heartbeat',
},
'email': {
'all': 'download-firefox-mobile-reco',
}
},
'fx-whatsnew': {
'sms_countries': config('STD_SMS_COUNTRIES_WHATSNEW50', default='US', parser=ListOf(str)),
'sms': {
'all': 'whatsnewfifty',
},
'email': {
'all': 'download-firefox-mobile-whatsnew',
}
},
'fx-focus': {
'sms_countries': config('STD_SMS_COUNTRIES_WHATSNEW61', default='US', parser=ListOf(str)),
'sms': {
'all': 'focus_sms_whatsnew',
},
'email': {
'all': 'download-focus-mobile-whatsnew',
}
},
'fx-klar': {
'sms_countries': config('STD_SMS_COUNTRIES_WHATSNEW61', default='US', parser=ListOf(str)),
'sms': {
'all': 'focus_sms_whatsnew',
},
'email': {
'all': 'download-klar-mobile-whatsnew',
}
},
'download-firefox-rocket': {
'sms_countries': '',
'email': {
'all': 'download-firefox-rocket',
}
},
'firefox-mobile-welcome': {
'sms_countries': config('STD_SMS_COUNTRIES_MOBILE_WELCOME', default='US,DE,FR', parser=ListOf(str)),
'sms': {
'all': 'firefox-mobile-welcome',
},
'email': {
'all': 'firefox-mobile-welcome',
}
},
'lockwise-welcome-download': {
'sms_countries': config('STD_SMS_COUNTRIES_MOBILE_WELCOME', default='US,DE,FR', parser=ListOf(str)),
'sms': {
'all': 'lockwise-welcome-download',
},
'email': {
'all': 'lockwise-welcome-download',
}
}
}
if DEV:
content_cards_default_branch = 'dev-processed'
else:
content_cards_default_branch = 'prod-processed'
CONTENT_CARDS_PATH = config('CONTENT_CARDS_PATH', default=git_repo_path('content_cards'))
CONTENT_CARDS_REPO = config('CONTENT_CARDS_REPO', default='https://github.com/mozmeao/www-admin.git')
CONTENT_CARDS_BRANCH = config('CONTENT_CARDS_BRANCH', default=content_cards_default_branch)
CONTENT_CARDS_URL = config('CONTENT_CARDS_URL', default=STATIC_URL)
RELEASE_NOTES_PATH = config('RELEASE_NOTES_PATH', default=git_repo_path('release_notes'))
RELEASE_NOTES_REPO = config('RELEASE_NOTES_REPO', default='https://github.com/mozilla/release-notes.git')
RELEASE_NOTES_BRANCH = config('RELEASE_NOTES_BRANCH', default='master')
WWW_CONFIG_PATH = config('WWW_CONFIG_PATH', default=git_repo_path('www_config'))
WWW_CONFIG_REPO = config('WWW_CONFIG_REPO', default='https://github.com/mozmeao/www-config.git')
WWW_CONFIG_BRANCH = config('WWW_CONFIG_BRANCH', default='master')
LEGAL_DOCS_PATH = GIT_REPOS_PATH / 'legal_docs'
LEGAL_DOCS_REPO = config('LEGAL_DOCS_REPO', default='https://github.com/mozilla/legal-docs.git')
LEGAL_DOCS_BRANCH = config('LEGAL_DOCS_BRANCH', default='master' if DEV else 'prod')
LEGAL_DOCS_DMS_URL = config('LEGAL_DOCS_DMS_URL', default='')
MOFO_SECURITY_ADVISORIES_PATH = config('MOFO_SECURITY_ADVISORIES_PATH',
default=git_repo_path('mofo_security_advisories'))
MOFO_SECURITY_ADVISORIES_REPO = config('MOFO_SECURITY_ADVISORIES_REPO',
default='https://github.com/mozilla/'
'foundation-security-advisories.git')
MOFO_SECURITY_ADVISORIES_BRANCH = config('MOFO_SECURITY_ADVISORIES_BRANCH', default='master')
CORS_ORIGIN_ALLOW_ALL = True
CORS_URLS_REGEX = r'^/([a-zA-Z-]+/)?(newsletter)/'
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'root': {
'level': LOG_LEVEL,
'handlers': ['console'],
},
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s %(message)s'
},
},
'handlers': {
'console': {
'class': 'logging.StreamHandler',
'stream': sys.stdout,
'formatter': 'verbose'
}
},
'loggers': {
'django.db.backends': {
'level': 'ERROR',
'handlers': ['console'],
'propagate': False,
},
},
}
PASSWORD_HASHERS = ['django.contrib.auth.hashers.PBKDF2PasswordHasher']
ADMINS = MANAGERS = config('ADMINS', parser=json.loads,
default='[]')
GTM_CONTAINER_ID = config('GTM_CONTAINER_ID', default='')
GMAP_API_KEY = config('GMAP_API_KEY', default='')
STUB_ATTRIBUTION_HMAC_KEY = config('STUB_ATTRIBUTION_HMAC_KEY', default='')
STUB_ATTRIBUTION_RATE = config('STUB_ATTRIBUTION_RATE', default=str(1 if DEV else 0), parser=float)
STUB_ATTRIBUTION_MAX_LEN = config('STUB_ATTRIBUTION_MAX_LEN', default='600', parser=int)
# via http://stackoverflow.com/a/6556951/107114
def get_default_gateway_linux():
"""Read the default gateway directly from /proc."""
try:
with open("/proc/net/route") as fh:
for line in fh:
fields = line.strip().split()
if fields[1] != '00000000' or not int(fields[3], 16) & 2:
continue
return socket.inet_ntoa(struct.pack("<L", int(fields[2], 16)))
return 'localhost'
except IOError:
return 'localhost'
FIREFOX_MOBILE_SYSREQ_URL = 'https://support.mozilla.org/kb/will-firefox-work-my-mobile-device'
MOZILLA_LOCATION_SERVICES_KEY = 'a9b98c12-d9d5-4015-a2db-63536c26dc14'
DEAD_MANS_SNITCH_URL = config('DEAD_MANS_SNITCH_URL', default='')
RAVEN_CONFIG = {
'dsn': config('SENTRY_DSN', default=''),
'site': '.'.join(x for x in [APP_NAME, CLUSTER_NAME] if x),
'release': config('GIT_SHA', default=''),
}
# Django-CSP
CSP_DEFAULT_SRC = ["'self'", '*.mozilla.net', '*.mozilla.org', '*.mozilla.com']
EXTRA_CSP_DEFAULT_SRC = config('CSP_DEFAULT_SRC', parser=ListOf(str), default='')
if EXTRA_CSP_DEFAULT_SRC:
CSP_DEFAULT_SRC += EXTRA_CSP_DEFAULT_SRC
CSP_IMG_SRC = CSP_DEFAULT_SRC + [
'data:',
'mozilla.org',
'www.googletagmanager.com',
'www.google-analytics.com',
'adservice.google.com',
'adservice.google.de',
'adservice.google.dk',
'creativecommons.org',
'cdn-3.convertexperiments.com',
'logs.convertexperiments.com',
]
CSP_SCRIPT_SRC = CSP_DEFAULT_SRC + [
# TODO fix things so that we don't need this
"'unsafe-inline'",
# TODO snap.svg.js passes a string to Function() which is
# blocked without unsafe-eval. Find a way to remove that.
"'unsafe-eval'",
'www.googletagmanager.com',
'www.google-analytics.com',
'tagmanager.google.com',
'www.youtube.com',
's.ytimg.com',
'cdn-3.convertexperiments.com',
'app.convert.com',
'data.track.convertexperiments.com',
'1003350.track.convertexperiments.com',
'1003343.track.convertexperiments.com',
]
CSP_STYLE_SRC = CSP_DEFAULT_SRC + [
# TODO fix things so that we don't need this
"'unsafe-inline'",
'app.convert.com',
]
CSP_CHILD_SRC = CSP_DEFAULT_SRC + [
'www.googletagmanager.com',
'www.google-analytics.com',
'www.youtube-nocookie.com',
'trackertest.org', # mozilla service for tracker detection
'www.surveygizmo.com',
'accounts.firefox.com',
'accounts.firefox.com.cn',
'www.youtube.com',
]
CSP_CONNECT_SRC = CSP_DEFAULT_SRC + [
'www.googletagmanager.com',
'www.google-analytics.com',
'logs.convertexperiments.com',
'1003350.metrics.convertexperiments.com',
'1003343.metrics.convertexperiments.com',
FXA_ENDPOINT,
FXA_ENDPOINT_MOZILLAONLINE,
]
CSP_REPORT_ONLY = config('CSP_REPORT_ONLY', default='false', parser=bool)
CSP_REPORT_ENABLE = config('CSP_REPORT_ENABLE', default='false', parser=bool)
if CSP_REPORT_ENABLE:
CSP_REPORT_URI = config('CSP_REPORT_URI', default='/csp-violation-capture')
CSP_EXTRA_FRAME_SRC = config('CSP_EXTRA_FRAME_SRC', default='', parser=ListOf(str))
if CSP_EXTRA_FRAME_SRC:
CSP_CHILD_SRC += tuple(CSP_EXTRA_FRAME_SRC)
# support older browsers (mainly Safari)
CSP_FRAME_SRC = CSP_CHILD_SRC
# Bug 1331069 - Double Click tracking pixel for download page.
AVAILABLE_TRACKING_PIXELS = {
'doubleclick': ('https://ad.doubleclick.net/ddm/activity/src=6417015;type=deskt0;cat=mozil0;dc_lat=;dc_rdid=;'
'tag_for_child_directed_treatment=;tfua=;npa=;ord=1'),
}
ENABLED_PIXELS = config('ENABLED_PIXELS', default='doubleclick', parser=ListOf(str))
TRACKING_PIXELS = [AVAILABLE_TRACKING_PIXELS[x] for x in ENABLED_PIXELS if x in AVAILABLE_TRACKING_PIXELS]
if config('SWITCH_TRACKING_PIXEL', default=str(DEV), parser=bool):
if 'doubleclick' in ENABLED_PIXELS:
CSP_IMG_SRC += ('ad.doubleclick.net',)
# Bug 1345467: Funnelcakes are now explicitly configured in the environment.
# Set experiment specific variables like the following:
#
# FUNNELCAKE_103_PLATFORMS=win,win64
# FUNNELCAKE_103_LOCALES=de,fr,en-US
#
# where "103" in the variable name is the funnelcake ID.
# Issue 7508 - Convert.com experiment sandbox
CONVERT_PROJECT_ID = ('10039-1003350' if DEV else '10039-1003343')
| mpl-2.0 | -5,188,924,430,018,638,000 | 30.310995 | 114 | 0.57253 | false |
enochd/RMG-Py | rmgpy/rmg/main.py | 1 | 62676 | #!/usr/bin/python
# -*- coding: utf-8 -*-
################################################################################
#
# RMG - Reaction Mechanism Generator
#
# Copyright (c) 2002-2010 Prof. William H. Green (whgreen@mit.edu) and the
# RMG Team (rmg_dev@mit.edu)
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the 'Software'),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
################################################################################
"""
This module contains the main execution functionality for Reaction Mechanism
Generator (RMG).
"""
import os.path
import sys
import logging
import time
import shutil
import numpy
import csv
try:
import xlwt
except ImportError:
logging.warning('Optional package dependency "xlwt" not loaded; Some output features will not work.')
from rmgpy.molecule import Molecule
from rmgpy.solver.base import TerminationTime, TerminationConversion
from rmgpy.solver.simple import SimpleReactor
from rmgpy.data.rmg import RMGDatabase
from rmgpy.data.base import ForbiddenStructureException, DatabaseError
from rmgpy.data.kinetics.library import KineticsLibrary, LibraryReaction
from rmgpy.data.kinetics.family import KineticsFamily, TemplateReaction
from rmgpy.kinetics.diffusionLimited import diffusionLimiter
from model import Species, CoreEdgeReactionModel
from pdep import PDepNetwork
################################################################################
solvent = None
class RMG:
"""
A representation of a Reaction Mechanism Generator (RMG) job. The
attributes are:
=============================== ================================================
Attribute Description
=============================== ================================================
`inputFile` The path to the input file
`logFile` The path to the log file
------------------------------- ------------------------------------------------
`databaseDirectory` The directory containing the RMG database
`thermoLibraries` The thermodynamics libraries to load
`reactionLibraries` The kinetics libraries to load
`statmechLibraries` The statistical mechanics libraries to load
`seedMechanisms` The seed mechanisms included in the model
`kineticsFamilies` The kinetics families to use for reaction generation
`kineticsDepositories` The kinetics depositories to use for looking up kinetics in each family
`kineticsEstimator` The method to use to estimate kinetics: 'group additivity' or 'rate rules'
`solvent` If solvation estimates are required, the name of the solvent.
------------------------------- ------------------------------------------------
`reactionModel` The core-edge reaction model generated by this job
`reactionSystems` A list of the reaction systems used in this job
`database` The RMG database used in this job
------------------------------- ------------------------------------------------
`absoluteTolerance` The absolute tolerance used in the ODE/DAE solver
`relativeTolerance` The relative tolerance used in the ODE/DAE solver
`sensitivityAbsoluteTolerance` The absolute tolerance used in the ODE/DAE solver for the sensitivities
`sensitivityRelativeTolerance` The relative tolerance used in the ODE/DAE solver for the sensitivities
`fluxToleranceKeepInEdge` The relative species flux below which species are discarded from the edge
`fluxToleranceMoveToCore` The relative species flux above which species are moved from the edge to the core
`fluxToleranceInterrupt` The relative species flux above which the simulation will halt
`maximumEdgeSpecies` The maximum number of edge species allowed at any time
`termination` A list of termination targets (i.e :class:`TerminationTime` and :class:`TerminationConversion` objects)
`speciesConstraints` Dictates the maximum number of atoms, carbons, electrons, etc. generated by RMG
------------------------------- ------------------------------------------------
`outputDirectory` The directory used to save output files
`scratchDirectory` The directory used to save temporary files
`verbosity` The level of logging verbosity for console output
`loadRestart` ``True`` if restarting a previous job, ``False`` otherwise
`saveRestartPeriod` The time period to periodically save a restart file (:class:`Quantity`), or ``None`` for never.
`units` The unit system to use to save output files (currently must be 'si')
`drawMolecules` ``True`` to draw pictures of the species in the core, ``False`` otherwise
`generatePlots` ``True`` to generate plots of the job execution statistics after each iteration, ``False`` otherwise
`verboseComments` ``True`` to keep the verbose comments for database estimates, ``False`` otherwise
`saveEdgeSpecies` ``True`` to save chemkin and HTML files of the edge species, ``False`` otherwise
`pressureDependence` Whether to process unimolecular (pressure-dependent) reaction networks
`quantumMechanics` Whether to apply quantum mechanical calculations instead of group additivity to certain molecular types.
`wallTime` The maximum amount of CPU time in seconds to expend on this job; used to stop gracefully so we can still get profiling information
------------------------------- ------------------------------------------------
`initializationTime` The time at which the job was initiated, in seconds since the epoch (i.e. from time.time())
`done` Whether the job has completed (there is nothing new to add)
=============================== ================================================
"""
def __init__(self, inputFile=None, logFile=None, outputDirectory=None, scratchDirectory=None):
self.inputFile = inputFile
self.logFile = logFile
self.outputDirectory = outputDirectory
self.scratchDirectory = scratchDirectory
self.clear()
def clear(self):
"""
Clear all loaded information about the job (except the file paths).
"""
self.databaseDirectory = None
self.thermoLibraries = None
self.transportLibraries = None
self.reactionLibraries = None
self.statmechLibraries = None
self.seedMechanisms = None
self.kineticsFamilies = None
self.kineticsDepositories = None
self.kineticsEstimator = 'group additivity'
self.solvent = None
self.diffusionLimiter = None
self.reactionModel = None
self.reactionSystems = None
self.database = None
self.fluxToleranceKeepInEdge = 0.0
self.fluxToleranceMoveToCore = 1.0
self.fluxToleranceInterrupt = 1.0
self.absoluteTolerance = 1.0e-8
self.relativeTolerance = 1.0e-4
self.sensitivityAbsoluteTolerance = 1.0e-6
self.sensitivityRelativeTolerance = 1.0e-4
self.maximumEdgeSpecies = 1000000
self.termination = []
self.done = False
self.verbosity = logging.INFO
self.loadRestart = None
self.saveRestartPeriod = None
self.units = 'si'
self.drawMolecules = None
self.generatePlots = None
self.saveSimulationProfiles = None
self.verboseComments = None
self.saveEdgeSpecies = None
self.pressureDependence = None
self.quantumMechanics = None
self.speciesConstraints = {}
self.wallTime = 0
self.initializationTime = 0
def loadInput(self, path=None):
"""
Load an RMG job from the input file located at `inputFile`, or
from the `inputFile` attribute if not given as a parameter.
"""
from input import readInputFile
if path is None: path = self.inputFile
readInputFile(path, self)
self.speciesConstraints['explicitlyAllowedMolecules'] = []
self.reactionModel.kineticsEstimator = self.kineticsEstimator
# If the output directory is not yet set, then set it to the same
# directory as the input file by default
if not self.outputDirectory:
self.outputDirectory = os.path.dirname(path)
if self.pressureDependence:
self.pressureDependence.outputFile = self.outputDirectory
self.reactionModel.pressureDependence = self.pressureDependence
self.reactionModel.speciesConstraints = self.speciesConstraints
self.reactionModel.verboseComments = self.verboseComments
if self.quantumMechanics:
self.quantumMechanics.setDefaultOutputDirectory(self.outputDirectory)
self.reactionModel.quantumMechanics = self.quantumMechanics
def loadThermoInput(self, path=None):
"""
Load an Thermo Estimation job from a thermo input file located at `inputFile`, or
from the `inputFile` attribute if not given as a parameter.
"""
from input import readThermoInputFile
if path is None: path = self.inputFile
if not self.outputDirectory:
self.outputDirectory = os.path.dirname(path)
readThermoInputFile(path, self)
if self.quantumMechanics:
self.quantumMechanics.setDefaultOutputDirectory(self.outputDirectory)
self.reactionModel.quantumMechanics = self.quantumMechanics
def checkInput(self):
"""
Check for a few common mistakes in the input file.
"""
if self.pressureDependence:
for index, reactionSystem in enumerate(self.reactionSystems):
assert (reactionSystem.T.value_si < self.pressureDependence.Tmax.value_si), "Reaction system T is above pressureDependence range."
assert (reactionSystem.T.value_si > self.pressureDependence.Tmin.value_si), "Reaction system T is below pressureDependence range."
assert (reactionSystem.P.value_si < self.pressureDependence.Pmax.value_si), "Reaction system P is above pressureDependence range."
assert (reactionSystem.P.value_si > self.pressureDependence.Pmin.value_si), "Reaction system P is below pressureDependence range."
assert any([not s.reactive for s in reactionSystem.initialMoleFractions.keys()]), \
"Pressure Dependence calculations require at least one inert (nonreacting) species for the bath gas."
def checkLibraries(self):
"""
Check unwanted use of libraries:
Liquid phase libraries in Gas phase simulation.
Loading a Liquid phase library obtained in another solvent than the one defined in the input file.
Other checks can be added here.
"""
#Liquid phase simulation checks
if self.solvent:
#check thermo librairies
for libIter in self.database.thermo.libraries.iterkeys():
if self.database.thermo.libraries[libIter].solvent:
if not self.solvent == self.database.thermo.libraries[libIter].solvent:
raise DatabaseError('''Thermo library "{2}" was obtained in "{1}" and cannot be used with this liquid phase simulation in "{0}"
'''.format(self.solvent, self.database.thermo.libraries[libIter].solvent, self.database.thermo.libraries[libIter].name))
#Check kinetic librairies
for libIter in self.database.kinetics.libraries.iterkeys():
if self.database.kinetics.libraries[libIter].solvent:
if not self.solvent == self.database.kinetics.libraries[libIter].solvent:
raise DatabaseError('''Kinetics library "{2}" was obtained in "{1}" and cannot be used with this liquid phase simulation in "{0}"
'''.format(self.solvent, self.database.kinetics.libraries[libIter].solvent, self.database.kinetics.libraries[libIter].name))
#Gas phase simulation checks
else:
#check thermo librairies
for libIter in self.database.thermo.libraries.iterkeys():
if self.database.thermo.libraries[libIter].solvent:
raise DatabaseError('''Thermo library "{1}" was obtained in "{0}" solvent and cannot be used in gas phase simulation
'''.format(self.database.thermo.libraries[libIter].solvent, self.database.thermo.libraries[libIter].name))
#Check kinetic librairies
for libIter in self.database.kinetics.libraries.iterkeys():
if self.database.kinetics.libraries[libIter].solvent:
raise DatabaseError('''Kinetics library "{1}" was obtained in "{0}" solvent and cannot be used in gas phase simulation
'''.format(self.database.kinetics.libraries[libIter].solvent, self.database.kinetics.libraries[libIter].name))
def saveInput(self, path=None):
"""
Save an RMG job to the input file located at `path`, or
from the `outputFile` attribute if not given as a parameter.
"""
from input import saveInputFile
if path is None: path = self.outputFile
saveInputFile(path, self)
def loadDatabase(self):
self.database = RMGDatabase()
self.database.load(
path = self.databaseDirectory,
thermoLibraries = self.thermoLibraries,
transportLibraries = self.transportLibraries,
reactionLibraries = [library for library, option in self.reactionLibraries],
seedMechanisms = self.seedMechanisms,
kineticsFamilies = self.kineticsFamilies,
kineticsDepositories = self.kineticsDepositories,
#frequenciesLibraries = self.statmechLibraries,
depository = False, # Don't bother loading the depository information, as we don't use it
)
#check libraries
self.checkLibraries()
#set global variable solvent
if self.solvent:
global solvent
solvent=self.solvent
if self.kineticsEstimator == 'rate rules':
if '!training' not in self.kineticsDepositories:
logging.info('Adding rate rules from training set in kinetics families...')
for family in self.database.kinetics.families.values():
family.addKineticsRulesFromTrainingSet(thermoDatabase=self.database.thermo)
else:
logging.info('Training set explicitly not added to rate rules in kinetics families...')
logging.info('Filling in rate rules in kinetics families by averaging...')
for family in self.database.kinetics.families.values():
family.fillKineticsRulesByAveragingUp()
def initialize(self, args):
"""
Initialize an RMG job using the command-line arguments `args` as returned
by the :mod:`argparse` package.
"""
# Save initialization time
self.initializationTime = time.time()
# Log start timestamp
logging.info('RMG execution initiated at ' + time.asctime() + '\n')
# Print out RMG header
self.logHeader()
# Set directories
self.outputDirectory = args.output_directory
self.scratchDirectory = args.scratch_directory
if args.restart:
if not os.path.exists(os.path.join(self.outputDirectory,'restart.pkl')):
logging.error("Could not find restart file (restart.pkl). Please run without --restart option.")
raise Exception("No restart file")
# Read input file
self.loadInput(args.file[0])
# Check input file
self.checkInput()
# See if memory profiling package is available
try:
import psutil
except ImportError:
logging.info('Optional package dependency "psutil" not found; memory profiling information will not be saved.')
# Make output subdirectories
self.makeOutputSubdirectory('plot')
self.makeOutputSubdirectory('species')
self.makeOutputSubdirectory('pdep')
self.makeOutputSubdirectory('chemkin')
self.makeOutputSubdirectory('solver')
if self.saveEdgeSpecies:
self.makeOutputSubdirectory('species_edge')
# Do any necessary quantum mechanics startup
if self.quantumMechanics:
self.quantumMechanics.initialize()
# Load databases
self.loadDatabase()
# Do all liquid-phase startup things:
if self.solvent:
Species.solventData = self.database.solvation.getSolventData(self.solvent)
Species.solventName = self.solvent
diffusionLimiter.enable(Species.solventData, self.database.solvation)
logging.info("Setting solvent data for {0}".format(self.solvent))
# Set wall time
if args.walltime == '0':
self.wallTime = 0
else:
data = args.walltime[0].split(':')
if len(data) == 1:
self.wallTime = int(data[-1])
elif len(data) == 2:
self.wallTime = int(data[-1]) + 60 * int(data[-2])
elif len(data) == 3:
self.wallTime = int(data[-1]) + 60 * int(data[-2]) + 3600 * int(data[-3])
elif len(data) == 4:
self.wallTime = int(data[-1]) + 60 * int(data[-2]) + 3600 * int(data[-3]) + 86400 * int(data[-4])
else:
raise ValueError('Invalid format for wall time; should be HH:MM:SS.')
# Delete previous HTML file
from rmgpy.rmg.output import saveOutputHTML
saveOutputHTML(os.path.join(self.outputDirectory, 'output.html'), self.reactionModel, 'core')
# Initialize reaction model
if args.restart:
self.loadRestartFile(os.path.join(self.outputDirectory,'restart.pkl'))
else:
# Seed mechanisms: add species and reactions from seed mechanism
# DON'T generate any more reactions for the seed species at this time
for seedMechanism in self.seedMechanisms:
self.reactionModel.addSeedMechanismToCore(seedMechanism, react=False)
# Reaction libraries: add species and reactions from reaction library to the edge so
# that RMG can find them if their rates are large enough
for library, option in self.reactionLibraries:
self.reactionModel.addReactionLibraryToEdge(library)
# Also always add in a few bath gases (since RMG-Java does)
for label, smiles in [('Ar','[Ar]'), ('He','[He]'), ('Ne','[Ne]'), ('N2','N#N')]:
molecule = Molecule().fromSMILES(smiles)
spec, isNew = self.reactionModel.makeNewSpecies(molecule, label=label, reactive=False)
if isNew:
self.initialSpecies.append(spec)
# Perform species constraints and forbidden species checks on input species
for spec in self.initialSpecies:
if self.database.forbiddenStructures.isMoleculeForbidden(spec.molecule[0]):
if 'allowed' in self.speciesConstraints and 'input species' in self.speciesConstraints['allowed']:
logging.warning('Input species {0} is globally forbidden. It will behave as an inert unless found in a seed mechanism or reaction library.'.format(spec.label))
else:
raise ForbiddenStructureException("Input species {0} is globally forbidden. You may explicitly allow it, but it will remain inert unless found in a seed mechanism or reaction library.".format(spec.label))
if self.reactionModel.failsSpeciesConstraints(spec):
if 'allowed' in self.speciesConstraints and 'input species' in self.speciesConstraints['allowed']:
self.speciesConstraints['explicitlyAllowedMolecules'].append(spec.molecule[0])
pass
else:
raise ForbiddenStructureException("Species constraints forbids input species {0}. Please reformulate constraints, remove the species, or explicitly allow it.".format(spec.label))
for spec in self.initialSpecies:
spec.generateThermoData(self.database, quantumMechanics=self.quantumMechanics)
spec.generateTransportData(self.database)
# Add nonreactive species (e.g. bath gases) to core first
# This is necessary so that the PDep algorithm can identify the bath gas
for spec in self.initialSpecies:
if not spec.reactive:
self.reactionModel.enlarge(spec)
for spec in self.initialSpecies:
if spec.reactive:
self.reactionModel.enlarge(spec)
# Save a restart file if desired
if self.saveRestartPeriod:
self.saveRestartFile(os.path.join(self.outputDirectory,'restart.pkl'), self.reactionModel)
def execute(self, args):
"""
Execute an RMG job using the command-line arguments `args` as returned
by the :mod:`argparse` package.
"""
self.initialize(args)
# RMG execution statistics
coreSpeciesCount = []
coreReactionCount = []
edgeSpeciesCount = []
edgeReactionCount = []
execTime = []
restartSize = []
memoryUse = []
self.done = False
self.saveEverything()
# Main RMG loop
while not self.done:
self.done = True
objectsToEnlarge = []
allTerminated = True
for index, reactionSystem in enumerate(self.reactionSystems):
if self.saveSimulationProfiles:
csvfile = file(os.path.join(self.outputDirectory, 'solver', 'simulation_{0}_{1:d}.csv'.format(index+1, len(self.reactionModel.core.species))),'w')
worksheet = csv.writer(csvfile)
else:
worksheet = None
# Conduct simulation
logging.info('Conducting simulation of reaction system %s...' % (index+1))
terminated, obj = reactionSystem.simulate(
coreSpecies = self.reactionModel.core.species,
coreReactions = self.reactionModel.core.reactions,
edgeSpecies = self.reactionModel.edge.species,
edgeReactions = self.reactionModel.edge.reactions,
toleranceKeepInEdge = self.fluxToleranceKeepInEdge,
toleranceMoveToCore = self.fluxToleranceMoveToCore,
toleranceInterruptSimulation = self.fluxToleranceInterrupt,
pdepNetworks = self.reactionModel.networkList,
worksheet = worksheet,
absoluteTolerance = self.absoluteTolerance,
relativeTolerance = self.relativeTolerance,
)
allTerminated = allTerminated and terminated
logging.info('')
# If simulation is invalid, note which species should be added to
# the core
if obj:
if isinstance(obj, PDepNetwork):
# Determine which species in that network has the highest leak rate
# We do this here because we need a temperature and pressure
# Store the maximum leak species along with the associated network
obj = (obj, obj.getMaximumLeakSpecies(reactionSystem.T.value_si, reactionSystem.P.value_si))
objectsToEnlarge.append(obj)
self.done = False
if not self.done: # There is something that needs exploring/enlarging
# If we reached our termination conditions, then try to prune
# species from the edge
if allTerminated:
self.reactionModel.prune(self.reactionSystems, self.fluxToleranceKeepInEdge, self.maximumEdgeSpecies)
# Enlarge objects identified by the simulation for enlarging
# These should be Species or Network objects
logging.info('')
objectsToEnlarge = list(set(objectsToEnlarge))
for objectToEnlarge in objectsToEnlarge:
self.reactionModel.enlarge(objectToEnlarge)
self.saveEverything()
# Update RMG execution statistics
logging.info('Updating RMG execution statistics...')
coreSpec, coreReac, edgeSpec, edgeReac = self.reactionModel.getModelSize()
coreSpeciesCount.append(coreSpec)
coreReactionCount.append(coreReac)
edgeSpeciesCount.append(edgeSpec)
edgeReactionCount.append(edgeReac)
execTime.append(time.time() - self.initializationTime)
elapsed = execTime[-1]
seconds = elapsed % 60
minutes = (elapsed - seconds) % 3600 / 60
hours = (elapsed - seconds - minutes * 60) % (3600 * 24) / 3600
days = (elapsed - seconds - minutes * 60 - hours * 3600) / (3600 * 24)
logging.info(' Execution time (DD:HH:MM:SS): '
'{0:02}:{1:02}:{2:02}:{3:02}'.format(int(days), int(hours), int(minutes), int(seconds)))
try:
import psutil
process = psutil.Process(os.getpid())
rss, vms = process.memory_info()
memoryUse.append(rss / 1.0e6)
logging.info(' Memory used: %.2f MB' % (memoryUse[-1]))
except ImportError:
memoryUse.append(0.0)
if os.path.exists(os.path.join(self.outputDirectory,'restart.pkl.gz')):
restartSize.append(os.path.getsize(os.path.join(self.outputDirectory,'restart.pkl.gz')) / 1.0e6)
logging.info(' Restart file size: %.2f MB' % (restartSize[-1]))
else:
restartSize.append(0.0)
self.saveExecutionStatistics(execTime, coreSpeciesCount, coreReactionCount, edgeSpeciesCount, edgeReactionCount, memoryUse, restartSize)
if self.generatePlots:
self.generateExecutionPlots(execTime, coreSpeciesCount, coreReactionCount, edgeSpeciesCount, edgeReactionCount, memoryUse, restartSize)
logging.info('')
# Consider stopping gracefully if the next iteration might take us
# past the wall time
if self.wallTime > 0 and len(execTime) > 1:
t = execTime[-1]
dt = execTime[-1] - execTime[-2]
if t + 3 * dt > self.wallTime:
logging.info('MODEL GENERATION TERMINATED')
logging.info('')
logging.info('There is not enough time to complete the next iteration before the wall time is reached.')
logging.info('The output model may be incomplete.')
logging.info('')
coreSpec, coreReac, edgeSpec, edgeReac = self.reactionModel.getModelSize()
logging.info('The current model core has %s species and %s reactions' % (coreSpec, coreReac))
logging.info('The current model edge has %s species and %s reactions' % (edgeSpec, edgeReac))
return
# Run sensitivity analysis post-model generation if sensitivity analysis is on
for index, reactionSystem in enumerate(self.reactionSystems):
if reactionSystem.sensitiveSpecies:
logging.info('Conducting sensitivity analysis of reaction system %s...' % (index+1))
sensWorksheet = []
for spec in reactionSystem.sensitiveSpecies:
csvfile = file(os.path.join(self.outputDirectory, 'solver', 'sensitivity_{0}_SPC_{1}.csv'.format(index+1, spec.index)),'w')
sensWorksheet.append(csv.writer(csvfile))
terminated, obj = reactionSystem.simulate(
coreSpecies = self.reactionModel.core.species,
coreReactions = self.reactionModel.core.reactions,
edgeSpecies = self.reactionModel.edge.species,
edgeReactions = self.reactionModel.edge.reactions,
toleranceKeepInEdge = self.fluxToleranceKeepInEdge,
toleranceMoveToCore = self.fluxToleranceMoveToCore,
toleranceInterruptSimulation = self.fluxToleranceInterrupt,
pdepNetworks = self.reactionModel.networkList,
worksheet = None,
absoluteTolerance = self.absoluteTolerance,
relativeTolerance = self.relativeTolerance,
sensitivity = True,
sensitivityAbsoluteTolerance = self.sensitivityAbsoluteTolerance,
sensitivityRelativeTolerance = self.sensitivityRelativeTolerance,
sensWorksheet = sensWorksheet,
)
# Update RMG execution statistics for each time a reactionSystem has sensitivity analysis performed.
# But just provide time and memory used.
logging.info('Updating RMG execution statistics...')
execTime.append(time.time() - self.initializationTime)
elapsed = execTime[-1]
seconds = elapsed % 60
minutes = (elapsed - seconds) % 3600 / 60
hours = (elapsed - seconds - minutes * 60) % (3600 * 24) / 3600
days = (elapsed - seconds - minutes * 60 - hours * 3600) / (3600 * 24)
logging.info(' Execution time (DD:HH:MM:SS): '
'{0:02}:{1:02}:{2:02}:{3:02}'.format(int(days), int(hours), int(minutes), int(seconds)))
try:
import psutil
process = psutil.Process(os.getpid())
rss, vms = process.memory_info()
memoryUse.append(rss / 1.0e6)
logging.info(' Memory used: %.2f MB' % (memoryUse[-1]))
except ImportError:
memoryUse.append(0.0)
# Write output file
logging.info('')
logging.info('MODEL GENERATION COMPLETED')
logging.info('')
coreSpec, coreReac, edgeSpec, edgeReac = self.reactionModel.getModelSize()
logging.info('The final model core has %s species and %s reactions' % (coreSpec, coreReac))
logging.info('The final model edge has %s species and %s reactions' % (edgeSpec, edgeReac))
self.finish()
def saveEverything(self):
"""
Saves the output HTML, the Chemkin file, and the Restart file (if appropriate).
The restart file is only saved if self.saveRestartPeriod or self.done.
"""
# If the user specifies it, add unused reaction library reactions to
# an additional output species and reaction list which is written to the ouput HTML
# file as well as the chemkin file
self.reactionModel.outputSpeciesList = []
self.reactionModel.outputReactionList = []
for library, option in self.reactionLibraries:
if option:
self.reactionModel.addReactionLibraryToOutput(library)
# Save the current state of the model to HTML files
self.saveOutputHTML()
# Save a Chemkin filew containing the current model
self.saveChemkinFiles()
# Save the restart file if desired
if self.saveRestartPeriod or self.done:
self.saveRestartFile( os.path.join(self.outputDirectory,'restart.pkl'),
self.reactionModel,
delay=0 if self.done else self.saveRestartPeriod.value_si
)
# Save the QM thermo to a library if QM was turned on
if self.quantumMechanics:
logging.info('Saving the QM generated thermo to qmThermoLibrary.py ...')
self.quantumMechanics.database.save(os.path.join(self.outputDirectory,'qmThermoLibrary.py'))
def finish(self):
"""
Complete the model generation.
"""
# Log end timestamp
logging.info('')
logging.info('RMG execution terminated at ' + time.asctime())
def getGitCommit(self):
import subprocess
from rmgpy import getPath
try:
return subprocess.check_output(['git', 'log',
'--format=%H%n%cd', '-1'],
cwd=getPath()).splitlines()
except:
return '', ''
def logHeader(self, level=logging.INFO):
"""
Output a header containing identifying information about RMG to the log.
"""
logging.log(level, '###################################################')
logging.log(level, '# RMG-Py - Reaction Mechanism Generator in Python #')
logging.log(level, '# Version: Early 2013 #')
logging.log(level, '# Authors: RMG Developers (rmg_dev@mit.edu) #')
logging.log(level, '# P.I.s: William H. Green (whgreen@mit.edu) #')
logging.log(level, '# Richard H. West (r.west@neu.edu) #')
logging.log(level, '# Website: http://greengroup.github.com/RMG-Py/ #')
logging.log(level, '###################################################\n')
head, date = self.getGitCommit()
if head != '' and date != '':
logging.log(level, 'The current git HEAD is:')
logging.log(level, '\t%s' % head)
logging.log(level, '\t%s' % date)
logging.log(level, '')
def makeOutputSubdirectory(self, folder):
"""
Create a subdirectory `folder` in the output directory. If the folder
already exists (e.g. from a previous job) its contents are deleted.
"""
dir = os.path.join(self.outputDirectory, folder)
if os.path.exists(dir):
# The directory already exists, so delete it (and all its content!)
shutil.rmtree(dir)
os.mkdir(dir)
def loadRestartFile(self, path):
"""
Load a restart file at `path` on disk.
"""
import cPickle
# Unpickle the reaction model from the specified restart file
logging.info('Loading previous restart file...')
f = open(path, 'rb')
self.reactionModel = cPickle.load(f)
f.close()
# A few things still point to the species in the input file, so update
# those to point to the equivalent species loaded from the restart file
# The termination conversions still point to the old species
from rmgpy.solver.base import TerminationConversion
for reactionSystem in self.reactionSystems:
for term in reactionSystem.termination:
if isinstance(term, TerminationConversion):
term.species, isNew = self.reactionModel.makeNewSpecies(term.species.molecule[0], term.species.label, term.species.reactive)
# The initial mole fractions in the reaction systems still point to the old species
for reactionSystem in self.reactionSystems:
initialMoleFractions = {}
for spec0, moleFrac in reactionSystem.initialMoleFractions.iteritems():
spec, isNew = self.reactionModel.makeNewSpecies(spec0.molecule[0], spec0.label, spec0.reactive)
initialMoleFractions[spec] = moleFrac
reactionSystem.initialMoleFractions = initialMoleFractions
# The reactions and reactionDict still point to the old reaction families
reactionDict = {}
oldFamilies = self.reactionModel.reactionDict.keys()
for family0 in self.reactionModel.reactionDict:
# Find the equivalent library or family in the newly-loaded kinetics database
family = None
if isinstance(family0, KineticsLibrary):
for label, database in self.database.kinetics.libraries.iteritems():
if database.label == family0.label:
family = database
break
elif isinstance(family0, KineticsFamily):
for label, database in self.database.kinetics.families.iteritems():
if database.label == family0.label:
family = database
break
else:
import pdb; pdb.set_trace()
if family is None:
raise Exception("Unable to find matching reaction family for %s" % family0.label)
# Update each affected reaction to point to that new family
# Also use that new family in a duplicate reactionDict
reactionDict[family] = {}
for reactant1 in self.reactionModel.reactionDict[family0]:
reactionDict[family][reactant1] = {}
for reactant2 in self.reactionModel.reactionDict[family0][reactant1]:
reactionDict[family][reactant1][reactant2] = []
if isinstance(family0, KineticsLibrary):
for rxn in self.reactionModel.reactionDict[family0][reactant1][reactant2]:
assert isinstance(rxn, LibraryReaction)
rxn.library = family
reactionDict[family][reactant1][reactant2].append(rxn)
elif isinstance(family0, KineticsFamily):
for rxn in self.reactionModel.reactionDict[family0][reactant1][reactant2]:
assert isinstance(rxn, TemplateReaction)
rxn.family = family
reactionDict[family][reactant1][reactant2].append(rxn)
self.reactionModel.reactionDict = reactionDict
def saveOutputHTML(self):
"""
Save the current reaction model to a pretty HTML file.
"""
logging.info('Saving current model core to HTML file...')
from rmgpy.rmg.output import saveOutputHTML
saveOutputHTML(os.path.join(self.outputDirectory, 'output.html'), self.reactionModel, 'core')
if self.saveEdgeSpecies ==True:
logging.info('Saving current model edge to HTML file...')
from rmgpy.rmg.output import saveOutputHTML
saveOutputHTML(os.path.join(self.outputDirectory, 'output_edge.html'), self.reactionModel, 'edge')
def saveChemkinFiles(self):
"""
Save the current reaction model to a set of Chemkin files.
"""
logging.info('Saving current model core to Chemkin file...')
this_chemkin_path = os.path.join(self.outputDirectory, 'chemkin', 'chem{0:04d}.inp'.format(len(self.reactionModel.core.species)))
latest_chemkin_path = os.path.join(self.outputDirectory, 'chemkin','chem.inp')
latest_chemkin_verbose_path = os.path.join(self.outputDirectory, 'chemkin', 'chem_annotated.inp')
latest_dictionary_path = os.path.join(self.outputDirectory, 'chemkin','species_dictionary.txt')
latest_transport_path = os.path.join(self.outputDirectory, 'chemkin', 'tran.dat')
self.reactionModel.saveChemkinFile(this_chemkin_path, latest_chemkin_verbose_path, latest_dictionary_path, latest_transport_path, False)
if os.path.exists(latest_chemkin_path):
os.unlink(latest_chemkin_path)
shutil.copy2(this_chemkin_path,latest_chemkin_path)
if self.saveEdgeSpecies ==True:
logging.info('Saving current model core and edge to Chemkin file...')
this_chemkin_path = os.path.join(self.outputDirectory, 'chemkin', 'chem_edge%04i.inp' % len(self.reactionModel.core.species)) # len() needs to be core to have unambiguous index
latest_chemkin_path = os.path.join(self.outputDirectory, 'chemkin','chem_edge.inp')
latest_chemkin_verbose_path = os.path.join(self.outputDirectory, 'chemkin', 'chem_edge_annotated.inp')
latest_dictionary_path = os.path.join(self.outputDirectory, 'chemkin','species_edge_dictionary.txt')
latest_transport_path = None
self.reactionModel.saveChemkinFile(this_chemkin_path, latest_chemkin_verbose_path, latest_dictionary_path, latest_transport_path, self.saveEdgeSpecies)
if os.path.exists(latest_chemkin_path):
os.unlink(latest_chemkin_path)
shutil.copy2(this_chemkin_path,latest_chemkin_path)
def saveRestartFile(self, path, reactionModel, delay=0):
"""
Save a restart file to `path` on disk containing the contents of the
provided `reactionModel`. The `delay` parameter is a time in seconds; if
the restart file is not at least that old, the save is aborted. (Use the
default value of 0 to force the restart file to be saved.)
"""
import cPickle
# Saving of a restart file is very slow (likely due to all the Quantity objects)
# Therefore, to save it less frequently, don't bother if the restart file is less than an hour old
if os.path.exists(path) and time.time() - os.path.getmtime(path) < delay:
logging.info('Not saving restart file in this iteration.')
return
# Pickle the reaction model to the specified file
# We also compress the restart file to save space (and lower the disk read/write time)
logging.info('Saving restart file...')
f = open(path, 'wb')
cPickle.dump(reactionModel, f, cPickle.HIGHEST_PROTOCOL)
f.close()
def saveExecutionStatistics(self, execTime, coreSpeciesCount, coreReactionCount,
edgeSpeciesCount, edgeReactionCount, memoryUse, restartSize):
"""
Save the statistics of the RMG job to an Excel spreadsheet for easy viewing
after the run is complete. The statistics are saved to the file
`statistics.xls` in the output directory. The ``xlwt`` package is used to
create the spreadsheet file; if this package is not installed, no file is
saved.
"""
# Attempt to import the xlwt package; return if not installed
try:
xlwt
except NameError:
logging.warning('Package xlwt not loaded. Unable to save execution statistics.')
return
# Create workbook and sheet for statistics to be places
workbook = xlwt.Workbook()
sheet = workbook.add_sheet('Statistics')
# First column is execution time
sheet.write(0,0,'Execution time (s)')
for i, etime in enumerate(execTime):
sheet.write(i+1,0,etime)
# Second column is number of core species
sheet.write(0,1,'Core species')
for i, count in enumerate(coreSpeciesCount):
sheet.write(i+1,1,count)
# Third column is number of core reactions
sheet.write(0,2,'Core reactions')
for i, count in enumerate(coreReactionCount):
sheet.write(i+1,2,count)
# Fourth column is number of edge species
sheet.write(0,3,'Edge species')
for i, count in enumerate(edgeSpeciesCount):
sheet.write(i+1,3,count)
# Fifth column is number of edge reactions
sheet.write(0,4,'Edge reactions')
for i, count in enumerate(edgeReactionCount):
sheet.write(i+1,4,count)
# Sixth column is memory used
sheet.write(0,5,'Memory used (MB)')
for i, memory in enumerate(memoryUse):
sheet.write(i+1,5,memory)
# Seventh column is restart file size
sheet.write(0,6,'Restart file size (MB)')
for i, memory in enumerate(restartSize):
sheet.write(i+1,6,memory)
# Save workbook to file
fstr = os.path.join(self.outputDirectory, 'statistics.xls')
workbook.save(fstr)
def generateExecutionPlots(self, execTime, coreSpeciesCount, coreReactionCount,
edgeSpeciesCount, edgeReactionCount, memoryUse, restartSize):
"""
Generate a number of plots describing the statistics of the RMG job,
including the reaction model core and edge size and memory use versus
execution time. These will be placed in the output directory in the plot/
folder.
"""
logging.info('Generating plots of execution statistics...')
import matplotlib.pyplot as plt
fig = plt.figure()
ax1 = fig.add_subplot(111)
ax1.semilogx(execTime, coreSpeciesCount, 'o-b')
ax1.set_xlabel('Execution time (s)')
ax1.set_ylabel('Number of core species')
ax2 = ax1.twinx()
ax2.semilogx(execTime, coreReactionCount, 'o-r')
ax2.set_ylabel('Number of core reactions')
plt.savefig(os.path.join(self.outputDirectory, 'plot/coreSize.svg'))
plt.clf()
fig = plt.figure()
ax1 = fig.add_subplot(111)
ax1.loglog(execTime, edgeSpeciesCount, 'o-b')
ax1.set_xlabel('Execution time (s)')
ax1.set_ylabel('Number of edge species')
ax2 = ax1.twinx()
ax2.loglog(execTime, edgeReactionCount, 'o-r')
ax2.set_ylabel('Number of edge reactions')
plt.savefig(os.path.join(self.outputDirectory, 'plot/edgeSize.svg'))
plt.clf()
fig = plt.figure()
ax1 = fig.add_subplot(111)
ax1.semilogx(execTime, memoryUse, 'o-k')
ax1.semilogx(execTime, restartSize, 'o-g')
ax1.set_xlabel('Execution time (s)')
ax1.set_ylabel('Memory (MB)')
ax1.legend(['RAM', 'Restart file'], loc=2)
plt.savefig(os.path.join(self.outputDirectory, 'plot/memoryUse.svg'))
plt.clf()
def loadRMGJavaInput(self, path):
"""
Load an RMG-Java job from the input file located at `inputFile`, or
from the `inputFile` attribute if not given as a parameter.
"""
# NOTE: This function is currently incomplete!
# It only loads a subset of the available information.
self.reactionModel = CoreEdgeReactionModel()
self.initialSpecies = []
self.reactionSystems = []
Tlist = []; Plist = []; concentrationList = []; speciesDict = {}
termination = []; atol=1e-16; rtol=1e-8
with open(path, 'r') as f:
line = self.readMeaningfulLineJava(f)
while line != '':
if line.startswith('TemperatureModel:'):
tokens = line.split()
units = tokens[2][1:-1]
assert units in ['C', 'F', 'K']
if units == 'C':
Tlist = [float(T)+273.15 for T in tokens[3:]]
elif units == 'F':
Tlist = [(float(T)+459.67)*5./9. for T in tokens[3:]]
else:
Tlist = [float(T) for T in tokens[3:]]
elif line.startswith('PressureModel:'):
tokens = line.split()
units = tokens[2][1:-1]
assert units in ['atm', 'bar', 'Pa', 'torr']
if units == 'atm':
Plist = [float(P)*101325. for P in tokens[3:]]
elif units == 'bar':
Plist = [float(P)*100000. for P in tokens[3:]]
elif units == 'torr':
Plist = [float(P)/760.*101325. for P in tokens[3:]]
else:
Plist = [float(P) for P in tokens[3:]]
elif line.startswith('InitialStatus:'):
label = ''; concentrations = []; adjlist = ''
line = self.readMeaningfulLineJava(f)
while line != 'END':
if line == '' and label != '':
species = Species(label=label, molecule=[Molecule().fromAdjacencyList(adjlist)])
self.initialSpecies.append(species)
speciesDict[label] = species
concentrationList.append(concentrations)
label = ''; concentrations = []; adjlist = ''
elif line != '' and label == '':
tokens = line.split()
label = tokens[0]
units = tokens[1][1:-1]
if tokens[-1] in ['Unreactive', 'ConstantConcentration']:
tokens.pop(-1)
assert units in ['mol/cm3', 'mol/m3', 'mol/l']
if units == 'mol/cm3':
concentrations = [float(C)*1.0e6 for C in tokens[2:]]
elif units == 'mol/l':
concentrations = [float(C)*1.0e3 for C in tokens[2:]]
else:
concentrations = [float(C) for C in tokens[2:]]
elif line != '':
adjlist += line + '\n'
line = f.readline().strip()
if '//' in line: line = line[0:line.index('//')]
elif line.startswith('InertGas:'):
line = self.readMeaningfulLineJava(f)
while line != 'END':
tokens = line.split()
label = tokens[0]
assert label in ['N2', 'Ar', 'He', 'Ne']
if label == 'Ne':
smiles = '[Ne]'
elif label == 'Ar':
smiles = '[Ar]'
elif label == 'He':
smiles = '[He]'
else:
smiles = 'N#N'
units = tokens[1][1:-1]
assert units in ['mol/cm3', 'mol/m3', 'mol/l']
if units == 'mol/cm3':
concentrations = [float(C)*1.0e6 for C in tokens[2:]]
elif units == 'mol/l':
concentrations = [float(C)*1.0e3 for C in tokens[2:]]
else:
concentrations = [float(C) for C in tokens[2:]]
species = Species(label=label, reactive=False, molecule=[Molecule().fromSMILES(smiles)])
self.initialSpecies.append(species)
speciesDict[label] = species
concentrationList.append(concentrations)
line = self.readMeaningfulLineJava(f)
elif line.startswith('FinishController:'):
# First meaningful line is a termination time or conversion
line = self.readMeaningfulLineJava(f)
tokens = line.split()
if tokens[2].lower() == 'conversion:':
label = tokens[3]
conversion = float(tokens[4])
termination.append(TerminationConversion(spec=speciesDict[label], conv=conversion))
elif tokens[2].lower() == 'reactiontime:':
time = float(tokens[3])
units = tokens[4][1:-1]
assert units in ['sec', 'min', 'hr', 'day']
if units == 'min':
time *= 60.
elif units == 'hr':
time *= 60. * 60.
elif units == 'day':
time *= 60. * 60. * 24.
termination.append(TerminationTime(time=time))
# Second meaningful line is the error tolerance
# We're not doing anything with this information yet!
line = self.readMeaningfulLineJava(f)
elif line.startswith('Atol:'):
tokens = line.split()
atol = float(tokens[1])
elif line.startswith('Rtol:'):
tokens = line.split()
rtol = float(tokens[1])
line = self.readMeaningfulLineJava(f)
assert len(Tlist) > 0
assert len(Plist) > 0
concentrationList = numpy.array(concentrationList)
assert concentrationList.shape[1] > 0 # An arbitrary number of concentrations is acceptable, and should be run for each reactor system
# Make a reaction system for each (T,P) combination
for T in Tlist:
for P in Plist:
for i in range(concentrationList.shape[1]):
concentrations = concentrationList[:,i]
totalConc = numpy.sum(concentrations)
initialMoleFractions = dict([(self.initialSpecies[i], concentrations[i] / totalConc) for i in range(len(self.initialSpecies))])
reactionSystem = SimpleReactor(T, P, initialMoleFractions=initialMoleFractions, termination=termination)
self.reactionSystems.append(reactionSystem)
def readMeaningfulLineJava(self, f):
"""
Read a meaningful line from an RMG-Java condition file object `f`,
returning the line with any comments removed.
"""
line = f.readline()
if line != '':
line = line.strip()
if '//' in line: line = line[0:line.index('//')]
while line == '':
line = f.readline()
if line == '': break
line = line.strip()
if '//' in line: line = line[0:line.index('//')]
return line
################################################################################
def initializeLog(verbose, log_file_name):
"""
Set up a logger for RMG to use to print output to stdout. The
`verbose` parameter is an integer specifying the amount of log text seen
at the console; the levels correspond to those of the :data:`logging` module.
"""
# Create logger
logger = logging.getLogger()
logger.setLevel(verbose)
# Create console handler and set level to debug; send everything to stdout
# rather than stderr
ch = logging.StreamHandler(sys.stdout)
ch.setLevel(verbose)
logging.addLevelName(logging.CRITICAL, 'Critical: ')
logging.addLevelName(logging.ERROR, 'Error: ')
logging.addLevelName(logging.WARNING, 'Warning: ')
logging.addLevelName(logging.INFO, '')
logging.addLevelName(logging.DEBUG, '')
logging.addLevelName(1, '')
# Create formatter and add to console handler
#formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s', '%Y-%m-%d %H:%M:%S')
#formatter = Formatter('%(message)s', '%Y-%m-%d %H:%M:%S')
formatter = logging.Formatter('%(levelname)s%(message)s')
ch.setFormatter(formatter)
# create file handler
if os.path.exists(log_file_name):
backup = os.path.join(log_file_name[:-7], 'RMG_backup.log')
if os.path.exists(backup):
print "Removing old "+backup
os.remove(backup)
print 'Moving {0} to {1}\n'.format(log_file_name, backup)
shutil.move(log_file_name, backup)
fh = logging.FileHandler(filename=log_file_name) #, backupCount=3)
fh.setLevel(min(logging.DEBUG,verbose)) # always at least VERBOSE in the file
fh.setFormatter(formatter)
# notice that STDERR does not get saved to the log file
# so errors from underlying libraries (eg. openbabel) etc. that report
# on stderr will not be logged to disk.
# remove old handlers!
while logger.handlers:
logger.removeHandler(logger.handlers[0])
# Add console and file handlers to logger
logger.addHandler(ch)
logger.addHandler(fh)
################################################################################
class Tee:
"""A simple tee to create a stream which prints to many streams.
This is used to report the profiling statistics to both the log file
and the standard output.
"""
def __init__(self, *fileobjects):
self.fileobjects=fileobjects
def write(self, string):
for fileobject in self.fileobjects:
fileobject.write(string)
def processProfileStats(stats_file, log_file):
import pstats
out_stream = Tee(sys.stdout,open(log_file,'a')) # print to screen AND append to RMG.log
print >>out_stream, "="*80
print >>out_stream, "Profiling Data".center(80)
print >>out_stream, "="*80
stats = pstats.Stats(stats_file,stream=out_stream)
stats.strip_dirs()
print >>out_stream, "Sorted by internal time"
stats.sort_stats('time')
stats.print_stats(25)
stats.print_callers(25)
print >>out_stream, "Sorted by cumulative time"
stats.sort_stats('cumulative')
stats.print_stats(25)
stats.print_callers(25)
stats.print_callees(25)
def makeProfileGraph(stats_file):
"""
Uses gprof2dot to create a graphviz dot file of the profiling information.
This requires the gprof2dot package available via `pip install gprof2dot`.
Render the result using the program 'dot' via a command like
`dot -Tpdf input.dot -o output.pdf`.
"""
try:
from gprof2dot import PstatsParser, DotWriter, SAMPLES, themes
except ImportError:
logging.warning('Trouble importing from package gprof2dot. Unable to create a graph of the profile statistics.')
logging.warning('Try getting the latest version with something like `pip install --upgrade gprof2dot`.')
return
import subprocess
#create an Options class to mimic optparser output as much as possible:
class Options:
pass
options = Options()
options.node_thres = 0.8
options.edge_thres = 0.1
options.strip = False
options.show_samples = False
options.root = ""
options.leaf = ""
options.wrap = True
theme = themes['color'] # bw color gray pink
theme.fontname = "ArialMT" # default "Arial" leads to PostScript warnings in dot (on Mac OS)
parser = PstatsParser(stats_file)
profile = parser.parse()
dot_file = stats_file + '.dot'
output = open(dot_file,'wt')
dot = DotWriter(output)
dot.strip = options.strip
dot.wrap = options.wrap
if options.show_samples:
dot.show_function_events.append(SAMPLES)
profile = profile
profile.prune(options.node_thres/100.0, options.edge_thres/100.0)
if options.root:
rootId = profile.getFunctionId(options.root)
if not rootId:
sys.stderr.write('root node ' + options.root + ' not found (might already be pruned : try -e0 -n0 flags)\n')
sys.exit(1)
profile.prune_root(rootId)
if options.leaf:
leafId = profile.getFunctionId(options.leaf)
if not leafId:
sys.stderr.write('leaf node ' + options.leaf + ' not found (maybe already pruned : try -e0 -n0 flags)\n')
sys.exit(1)
profile.prune_leaf(leafId)
dot.graph(profile, theme)
output.close()
try:
subprocess.check_call(['dot', '-Tpdf', dot_file, '-o', '{0}.pdf'.format(dot_file)])
except subprocess.CalledProcessError:
logging.error("Error returned by 'dot' when generating graph of the profile statistics.")
logging.info("To try it yourself:\n dot -Tpdf {0} -o {0}.pdf".format(dot_file))
except OSError:
logging.error("Couldn't run 'dot' to create graph of profile statistics. Check graphviz is installed properly and on your path.")
logging.info("Once you've got it, try:\n dot -Tpdf {0} -o {0}.pdf".format(dot_file))
else:
logging.info("Graph of profile statistics saved to: \n {0}.pdf".format(dot_file))
| mit | -5,214,712,252,094,422,000 | 48.003909 | 228 | 0.582121 | false |
ghackebeil/PyORAM | src/pyoram/storage/block_storage.py | 1 | 3293 | __all__ = ('BlockStorageTypeFactory',)
import logging
log = logging.getLogger("pyoram")
def BlockStorageTypeFactory(storage_type_name):
if storage_type_name in BlockStorageTypeFactory._registered_devices:
return BlockStorageTypeFactory.\
_registered_devices[storage_type_name]
else:
raise ValueError(
"BlockStorageTypeFactory: Unsupported storage "
"type: %s" % (storage_type_name))
BlockStorageTypeFactory._registered_devices = {}
def _register_device(name, type_):
if name in BlockStorageTypeFactory._registered_devices:
raise ValueError("Can not register block storage device type "
"with name '%s'. A device type is already "
"registered with that name." % (name))
if not issubclass(type_, BlockStorageInterface):
raise TypeError("Can not register block storage device type "
"'%s'. The device must be a subclass of "
"BlockStorageInterface" % (type_))
BlockStorageTypeFactory._registered_devices[name] = type_
BlockStorageTypeFactory.register_device = _register_device
class BlockStorageInterface(object):
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
#
# Abstract Interface
#
def clone_device(self, *args, **kwds):
raise NotImplementedError # pragma: no cover
@classmethod
def compute_storage_size(cls, *args, **kwds):
raise NotImplementedError # pragma: no cover
@classmethod
def setup(cls, *args, **kwds):
raise NotImplementedError # pragma: no cover
@property
def header_data(self, *args, **kwds):
raise NotImplementedError # pragma: no cover
@property
def block_count(self, *args, **kwds):
raise NotImplementedError # pragma: no cover
@property
def block_size(self, *args, **kwds):
raise NotImplementedError # pragma: no cover
@property
def storage_name(self, *args, **kwds):
raise NotImplementedError # pragma: no cover
def update_header_data(self, *args, **kwds):
raise NotImplementedError # pragma: no cover
def close(self, *args, **kwds):
raise NotImplementedError # pragma: no cover
def read_blocks(self, *args, **kwds):
raise NotImplementedError # pragma: no cover
def yield_blocks(self, *args, **kwds):
raise NotImplementedError # pragma: no cover
def read_block(self, *args, **kwds):
raise NotImplementedError # pragma: no cover
def write_blocks(self, *args, **kwds):
raise NotImplementedError # pragma: no cover
def write_block(self, *args, **kwds):
raise NotImplementedError # pragma: no cover
@property
def bytes_sent(self):
raise NotImplementedError # pragma: no cover
@property
def bytes_received(self):
raise NotImplementedError # pragma: no cover
| mit | -4,740,640,862,702,392,000 | 38.674699 | 73 | 0.580626 | false |
YunoHost/moulinette | moulinette/interfaces/cli.py | 1 | 17327 | # -*- coding: utf-8 -*-
import os
import sys
import getpass
import locale
import logging
from argparse import SUPPRESS
from collections import OrderedDict
from datetime import date, datetime
import argcomplete
from moulinette import msignals, m18n
from moulinette.core import MoulinetteError
from moulinette.interfaces import (
BaseActionsMapParser,
BaseInterface,
ExtendedArgumentParser,
)
from moulinette.utils import log
logger = log.getLogger("moulinette.cli")
# CLI helpers ----------------------------------------------------------
CLI_COLOR_TEMPLATE = "\033[{:d}m\033[1m"
END_CLI_COLOR = "\033[m"
colors_codes = {
"red": CLI_COLOR_TEMPLATE.format(31),
"green": CLI_COLOR_TEMPLATE.format(32),
"yellow": CLI_COLOR_TEMPLATE.format(33),
"blue": CLI_COLOR_TEMPLATE.format(34),
"purple": CLI_COLOR_TEMPLATE.format(35),
"cyan": CLI_COLOR_TEMPLATE.format(36),
"white": CLI_COLOR_TEMPLATE.format(37),
}
def colorize(astr, color):
"""Colorize a string
Return a colorized string for printing in shell with style ;)
Keyword arguments:
- astr -- String to colorize
- color -- Name of the color
"""
if os.isatty(1):
return "{:s}{:s}{:s}".format(colors_codes[color], astr, END_CLI_COLOR)
else:
return astr
def plain_print_dict(d, depth=0):
"""Print in a plain way a dictionary recursively
Print a dictionary recursively for scripting usage to the standard output.
Output formatting:
>>> d = {'key': 'value', 'list': [1,2], 'dict': {'key2': 'value2'}}
>>> plain_print_dict(d)
#key
value
#list
1
2
#dict
##key2
value2
Keyword arguments:
- d -- The dictionary to print
- depth -- The recursive depth of the dictionary
"""
# skip first key printing
if depth == 0 and (isinstance(d, dict) and len(d) == 1):
_, d = d.popitem()
if isinstance(d, (tuple, set)):
d = list(d)
if isinstance(d, list):
for v in d:
plain_print_dict(v, depth + 1)
elif isinstance(d, dict):
for k, v in d.items():
print("{}{}".format("#" * (depth + 1), k))
plain_print_dict(v, depth + 1)
else:
if isinstance(d, unicode):
d = d.encode("utf-8")
print(d)
def pretty_date(_date):
"""Display a date in the current time zone without ms and tzinfo
Argument:
- date -- The date or datetime to display
"""
import pytz # Lazy loading, this takes like 3+ sec on a RPi2 ?!
# Deduce system timezone
nowutc = datetime.now(tz=pytz.utc)
nowtz = datetime.now()
nowtz = nowtz.replace(tzinfo=pytz.utc)
offsetHour = nowutc - nowtz
offsetHour = int(round(offsetHour.total_seconds() / 3600))
localtz = "Etc/GMT%+d" % offsetHour
# Transform naive date into UTC date
if _date.tzinfo is None:
_date = _date.replace(tzinfo=pytz.utc)
# Convert UTC date into system locale date
_date = _date.astimezone(pytz.timezone(localtz))
if isinstance(_date, datetime):
return _date.strftime("%Y-%m-%d %H:%M:%S")
else:
return _date.strftime("%Y-%m-%d")
def pretty_print_dict(d, depth=0):
"""Print in a pretty way a dictionary recursively
Print a dictionary recursively with colors to the standard output.
Keyword arguments:
- d -- The dictionary to print
- depth -- The recursive depth of the dictionary
"""
keys = d.keys()
if not isinstance(d, OrderedDict):
keys = sorted(keys)
for k in keys:
v = d[k]
k = colorize(str(k), "purple")
if isinstance(v, (tuple, set)):
v = list(v)
if isinstance(v, list) and len(v) == 1:
v = v[0]
if isinstance(v, dict):
print("{:s}{}: ".format(" " * depth, k))
pretty_print_dict(v, depth + 1)
elif isinstance(v, list):
print("{:s}{}: ".format(" " * depth, k))
for key, value in enumerate(v):
if isinstance(value, tuple):
pretty_print_dict({value[0]: value[1]}, depth + 1)
elif isinstance(value, dict):
pretty_print_dict({key: value}, depth + 1)
else:
if isinstance(value, unicode):
value = value.encode("utf-8")
elif isinstance(v, date):
v = pretty_date(v)
print("{:s}- {}".format(" " * (depth + 1), value))
else:
if isinstance(v, unicode):
v = v.encode("utf-8")
elif isinstance(v, date):
v = pretty_date(v)
print("{:s}{}: {}".format(" " * depth, k, v))
def get_locale():
"""Return current user eocale"""
try:
lang = locale.getdefaultlocale()[0]
except Exception:
# In some edge case the locale lib fails ...
# c.f. https://forum.yunohost.org/t/error-when-trying-to-enter-user-information-in-admin-panel/11390/11
lang = os.getenv("LANG")
if not lang:
return ""
return lang[:2]
# CLI Classes Implementation -------------------------------------------
class TTYHandler(logging.StreamHandler):
"""TTY log handler
A handler class which prints logging records for a tty. The record is
neverthemess formatted depending if it is connected to a tty(-like)
device.
If it's the case, the level name - optionnaly colorized - is prepended
to the message and the result is stored in the record as `message_key`
attribute. That way, a custom formatter can be defined. The default is
to output just the formatted message.
Anyway, if the stream is not a tty, just the message is output.
Note that records with a level higher or equal to WARNING are sent to
stderr. Otherwise, they are sent to stdout.
"""
LEVELS_COLOR = {
log.NOTSET: "white",
log.DEBUG: "white",
log.INFO: "cyan",
log.SUCCESS: "green",
log.WARNING: "yellow",
log.ERROR: "red",
log.CRITICAL: "red",
}
def __init__(self, message_key="fmessage"):
logging.StreamHandler.__init__(self)
self.message_key = message_key
def format(self, record):
"""Enhance message with level and colors if supported."""
msg = record.getMessage()
if self.supports_color():
level = ""
if self.level <= log.DEBUG:
# add level name before message
level = "%s " % record.levelname
elif record.levelname in ["SUCCESS", "WARNING", "ERROR", "INFO"]:
# add translated level name before message
level = "%s " % m18n.g(record.levelname.lower())
color = self.LEVELS_COLOR.get(record.levelno, "white")
msg = "{0}{1}{2}{3}".format(colors_codes[color], level, END_CLI_COLOR, msg)
if self.formatter:
# use user-defined formatter
record.__dict__[self.message_key] = msg
return self.formatter.format(record)
return msg
def emit(self, record):
# set proper stream first
if record.levelno >= log.WARNING:
self.stream = sys.stderr
else:
self.stream = sys.stdout
logging.StreamHandler.emit(self, record)
def supports_color(self):
"""Check whether current stream supports color."""
if hasattr(self.stream, "isatty") and self.stream.isatty():
return True
return False
class ActionsMapParser(BaseActionsMapParser):
"""Actions map's Parser for the CLI
Provide actions map parsing methods for a CLI usage. The parser for
the arguments is represented by a ExtendedArgumentParser object.
Keyword arguments:
- parser -- The ExtendedArgumentParser object to use
- subparser_kwargs -- Arguments to pass to the sub-parser group
- top_parser -- An ArgumentParser object whose arguments should
be take into account but not parsed
"""
def __init__(
self, parent=None, parser=None, subparser_kwargs=None, top_parser=None, **kwargs
):
super(ActionsMapParser, self).__init__(parent)
if subparser_kwargs is None:
subparser_kwargs = {"title": "categories", "required": False}
self._parser = parser or ExtendedArgumentParser()
self._subparsers = self._parser.add_subparsers(**subparser_kwargs)
self.global_parser = parent.global_parser if parent else None
if top_parser:
self.global_parser = self._parser.add_argument_group("global arguments")
# Append each top parser action to the global group
for action in top_parser._actions:
action.dest = SUPPRESS
self.global_parser._add_action(action)
# Implement virtual properties
interface = "cli"
# Implement virtual methods
@staticmethod
def format_arg_names(name, full):
if name.startswith("-") and full:
return [name, full]
return [name]
def has_global_parser(self):
return True
def add_category_parser(self, name, category_help=None, **kwargs):
"""Add a parser for a category
Keyword arguments:
- category_help -- A brief description for the category
Returns:
A new ActionsMapParser object for the category
"""
parser = self._subparsers.add_parser(
name, description=category_help, help=category_help, **kwargs
)
return self.__class__(self, parser, {"title": "subcommands", "required": True})
def add_subcategory_parser(self, name, subcategory_help=None, **kwargs):
"""Add a parser for a subcategory
Keyword arguments:
- subcategory_help -- A brief description for the category
Returns:
A new ActionsMapParser object for the category
"""
parser = self._subparsers.add_parser(
name,
type_="subcategory",
description=subcategory_help,
help=subcategory_help,
**kwargs
)
return self.__class__(self, parser, {"title": "actions", "required": True})
def add_action_parser(
self,
name,
tid,
action_help=None,
deprecated=False,
deprecated_alias=[],
**kwargs
):
"""Add a parser for an action
Keyword arguments:
- action_help -- A brief description for the action
- deprecated -- Wether the action is deprecated
- deprecated_alias -- A list of deprecated action alias names
Returns:
A new ExtendedArgumentParser object for the action
"""
return self._subparsers.add_parser(
name,
type_="action",
help=action_help,
description=action_help,
deprecated=deprecated,
deprecated_alias=deprecated_alias,
)
def add_global_arguments(self, arguments):
for argument_name, argument_options in arguments.items():
# will adapt arguments name for cli or api context
names = self.format_arg_names(
str(argument_name), argument_options.pop("full", None)
)
self.global_parser.add_argument(*names, **argument_options)
def auth_required(self, args, **kwargs):
# FIXME? idk .. this try/except is duplicated from parse_args below
# Just to be able to obtain the tid
try:
ret = self._parser.parse_args(args)
except SystemExit:
raise
except:
logger.exception("unable to parse arguments '%s'", " ".join(args))
raise MoulinetteError("error_see_log")
tid = getattr(ret, "_tid", None)
if self.get_conf(tid, "authenticate"):
authenticator = self.get_conf(tid, "authenticator")
# If several authenticator, use the default one
if isinstance(authenticator, dict):
if "default" in authenticator:
authenticator = "default"
else:
# TODO which one should we use?
pass
return authenticator
else:
return False
def parse_args(self, args, **kwargs):
try:
ret = self._parser.parse_args(args)
except SystemExit:
raise
except:
logger.exception("unable to parse arguments '%s'", " ".join(args))
raise MoulinetteError("error_see_log")
else:
self.prepare_action_namespace(getattr(ret, "_tid", None), ret)
self._parser.dequeue_callbacks(ret)
return ret
class Interface(BaseInterface):
"""Command-line Interface for the moulinette
Initialize an interface connected to the standard input/output
stream and to a given actions map.
Keyword arguments:
- actionsmap -- The ActionsMap instance to connect to
"""
def __init__(self, actionsmap):
# Set user locale
m18n.set_locale(get_locale())
# Connect signals to handlers
msignals.set_handler("display", self._do_display)
if os.isatty(1):
msignals.set_handler("authenticate", self._do_authenticate)
msignals.set_handler("prompt", self._do_prompt)
self.actionsmap = actionsmap
def run(self, args, output_as=None, password=None, timeout=None):
"""Run the moulinette
Process the action corresponding to the given arguments 'args'
and print the result.
Keyword arguments:
- args -- A list of argument strings
- output_as -- Output result in another format. Possible values:
- json: return a JSON encoded string
- plain: return a script-readable output
- none: do not output the result
- password -- The password to use in case of authentication
- timeout -- Number of seconds before this command will timeout because it can't acquire the lock (meaning that another command is currently running), by default there is no timeout and the command will wait until it can get the lock
"""
if output_as and output_as not in ["json", "plain", "none"]:
raise MoulinetteError("invalid_usage")
# auto-complete
argcomplete.autocomplete(self.actionsmap.parser._parser)
# Set handler for authentication
if password:
msignals.set_handler("authenticate", lambda a: a(password=password))
else:
if os.isatty(1):
msignals.set_handler("authenticate", self._do_authenticate)
try:
ret = self.actionsmap.process(args, timeout=timeout)
except (KeyboardInterrupt, EOFError):
raise MoulinetteError("operation_interrupted")
if ret is None or output_as == "none":
return
# Format and print result
if output_as:
if output_as == "json":
import json
from moulinette.utils.serialize import JSONExtendedEncoder
print(json.dumps(ret, cls=JSONExtendedEncoder))
else:
plain_print_dict(ret)
elif isinstance(ret, dict):
pretty_print_dict(ret)
else:
print(ret)
# Signals handlers
def _do_authenticate(self, authenticator):
"""Process the authentication
Handle the core.MoulinetteSignals.authenticate signal.
"""
# TODO: Allow token authentication?
help = authenticator.extra.get("help")
msg = m18n.n(help) if help else m18n.g("password")
return authenticator(password=self._do_prompt(msg, True, False, color="yellow"))
def _do_prompt(self, message, is_password, confirm, color="blue"):
"""Prompt for a value
Handle the core.MoulinetteSignals.prompt signal.
Keyword arguments:
- color -- The color to use for prompting message
"""
if is_password:
prompt = lambda m: getpass.getpass(colorize(m18n.g("colon", m), color))
else:
prompt = lambda m: raw_input(colorize(m18n.g("colon", m), color))
value = prompt(message)
if confirm:
m = message[0].lower() + message[1:]
if prompt(m18n.g("confirm", prompt=m)) != value:
raise MoulinetteError("values_mismatch")
return value
def _do_display(self, message, style):
"""Display a message
Handle the core.MoulinetteSignals.display signal.
"""
if isinstance(message, unicode):
message = message.encode("utf-8")
if style == "success":
print("{} {}".format(colorize(m18n.g("success"), "green"), message))
elif style == "warning":
print("{} {}".format(colorize(m18n.g("warning"), "yellow"), message))
elif style == "error":
print("{} {}".format(colorize(m18n.g("error"), "red"), message))
else:
print(message)
| agpl-3.0 | -4,121,375,827,570,136,000 | 31.087037 | 245 | 0.581347 | false |
alej0varas/django-o2o_tagging | o2o_tagging/models.py | 1 | 2048 | from django.contrib.contenttypes import generic
from django.contrib.contenttypes.models import ContentType
from django.db import models
from model_utils.managers import PassThroughManager
from .managers import O2OTagQuerySet
class O2OTag(models.Model):
# The object that is tagging
tagger_content_type = models.ForeignKey(ContentType,
related_name="taggers")
tagger_object_id = models.PositiveIntegerField()
tagger_content_object = generic.GenericForeignKey("tagger_content_type",
"tagger_object_id")
# The object that is tagged
tagged_content_type = models.ForeignKey(ContentType,
related_name="taggeds")
tagged_object_id = models.PositiveIntegerField()
tagged_content_object = generic.GenericForeignKey("tagged_content_type",
"tagged_object_id")
# The object where the tagged objects is tagged
tagged_in_content_type = models.ForeignKey(
ContentType,
related_name="tags")
tagged_in_object_id = models.PositiveIntegerField()
tagged_in_content_object = generic.GenericForeignKey(
"tagged_in_content_type",
"tagged_in_object_id")
created_at = models.DateTimeField(auto_now_add=True)
objects = PassThroughManager.for_queryset_class(O2OTagQuerySet)()
class Meta:
unique_together = ('tagger_content_type', 'tagger_object_id',
'tagged_content_type', 'tagged_object_id',
'tagged_in_content_type', 'tagged_in_object_id')
def __unicode__(self):
return u'%s -> %s | %s' % (self.tagger, self.tagged, self.tagged_in)
# Convenient shortcuts
@property
def tagged(self):
return self.tagged_content_object
@property
def tagger(self):
return self.tagger_content_object
@property
def tagged_in(self):
return self.tagged_in_content_object
| gpl-3.0 | 4,474,573,030,350,421,500 | 34.929825 | 76 | 0.625 | false |
NarlikarLab/DIVERSITY | weblogoMod/weblogolib/__init__.py | 1 | 41331 | #!/usr/bin/env python
# -------------------------------- WebLogo --------------------------------
# Copyright (c) 2003-2004 The Regents of the University of California.
# Copyright (c) 2005 Gavin E. Crooks
# Copyright (c) 2006-2011, The Regents of the University of California, through
# Lawrence Berkeley National Laboratory (subject to receipt of any required
# approvals from the U.S. Dept. of Energy). All rights reserved.
# This software is distributed under the new BSD Open Source License.
# <http://www.opensource.org/licenses/bsd-license.html>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# (1) Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# (2) Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and or other materials provided with the distribution.
#
# (3) Neither the name of the University of California, Lawrence Berkeley
# National Laboratory, U.S. Dept. of Energy nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# Replicates README.txt
"""
WebLogo (http://code.google.com/p/weblogo/) is a tool for creating sequence
logos from biological sequence alignments. It can be run on the command line,
as a standalone webserver, as a CGI webapp, or as a python library.
The main WebLogo webserver is located at http://weblogo.threeplusone.com
Please consult the manual for installation instructions and more information:
(Also located in the weblogolib/htdocs subdirectory.)
http://weblogo.threeplusone.com/manual.html
For help on the command line interface run
./weblogo --help
To build a simple logo run
./weblogo < cap.fa > logo0.eps
To run as a standalone webserver at localhost:8080
./weblogo --serve
To create a logo in python code:
>>> from weblogolib import *
>>> fin = open('cap.fa')
>>> seqs = read_seq_data(fin)
>>> data = LogoData.from_seqs(seqs)
>>> options = LogoOptions()
>>> options.title = "A Logo Title"
>>> format = LogoFormat(data, options)
>>> fout = open('cap.eps', 'w')
>>> eps_formatter( data, format, fout)
-- Distribution and Modification --
This package is distributed under the new BSD Open Source License.
Please see the LICENSE.txt file for details on copyright and licensing.
The WebLogo source code can be downloaded from
http://code.google.com/p/weblogo/
WebLogo requires Python 2.5, 2.6 or 2.7, and the python
array package 'numpy' (http://www.scipy.org/Download)
Generating logos in PDF or bitmap graphics formats require that the ghostscript
program 'gs' be installed. Scalable Vector Graphics (SVG) format also requires
the program 'pdf2svg'.
"""
import sys
import copy
import os
from datetime import datetime
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
from math import sqrt
from weblogoMod.corebio.data import rna_letters, dna_letters, amino_acid_letters
from string import Template
from subprocess import *
from weblogoMod.corebio.utils import resource_string, resource_filename
from math import log, sqrt, exp
# Avoid 'from numpy import *' since numpy has lots of names defined
from numpy import array, asarray, float64, ones, zeros, int32,all,any, shape
import numpy as na
from color import *
from colorscheme import *
from weblogoMod.corebio.seq import Alphabet, Seq, SeqList
from weblogoMod.corebio import seq_io
from weblogoMod.corebio.utils import isfloat, find_command, ArgumentError, stdrepr
from weblogoMod.corebio.moremath import *
from weblogoMod.corebio.data import amino_acid_composition
from weblogoMod.corebio.seq import unambiguous_rna_alphabet, unambiguous_dna_alphabet, unambiguous_protein_alphabet
import weblogoMod.corebio
from logomath import Dirichlet
# ------ META DATA ------
__all__ = [ 'LogoOptions',
'description',
'__version__',
'LogoFormat',
'LogoData',
'GhostscriptAPI',
'std_color_schemes',
'default_color_schemes',
'classic',
'std_units',
'std_sizes',
'std_alphabets',
'std_percentCG',
'pdf_formatter',
'jpeg_formatter',
'png_formatter',
'png_print_formatter',
'txt_formatter',
'eps_formatter',
'formatters',
'default_formatter',
'base_distribution',
'equiprobable_distribution',
'read_seq_data',
'color',
'colorscheme',
'logomath',
]
description = "Create sequence logos from biological sequence alignments."
__version__ = weblogoMod.corebio.__version__
# These keywords are substituted by subversion.
# The date and revision will only tell the truth after a branch or tag,
# since different files in trunk will have been changed at different times
release_date ="$Date: 2012-07-02 19:28:12 -0700 (Mon, 02 Jul 2012) $".split()[1]
release_build = "$Revision: 145 $".split()[1]
release_description = "WebLogo %s (%s)" % (__version__, release_date)
def cgi(htdocs_directory) :
import weblogolib._cgi
weblogolib._cgi.main(htdocs_directory)
class GhostscriptAPI(object) :
"""Interface to the command line program Ghostscript ('gs')"""
formats = ('png', 'pdf', 'jpeg')
def __init__(self, path=None) :
try:
command = find_command('gs', path=path)
except EnvironmentError:
try:
command = find_command('gswin32c.exe', path=path)
except EnvironmentError:
raise EnvironmentError("Could not find Ghostscript on path."
" There should be either a gs executable or a gswin32c.exe on your system's path")
self.command = command
def version(self) :
args = [self.command, '--version']
try :
p = Popen(args, stdout=PIPE)
(out,err) = p.communicate()
except OSError :
raise RuntimeError("Cannot communicate with ghostscript.")
return out.strip()
def convert(self, format, fin, fout, width, height, resolution=300) :
device_map = { 'png':'png16m', 'pdf':'pdfwrite', 'jpeg':'jpeg'}
try :
device = device_map[format]
except KeyError:
raise ValueError("Unsupported format.")
args = [self.command,
"-sDEVICE=%s" % device,
"-dPDFSETTINGS=/screen", #Modification printer changed to screen
#"-q", # Quite: Do not dump messages to stdout.
"-sstdout=%stderr", # Redirect messages and errors to stderr
"-sOutputFile=-", # Stdout
"-dUseCIEColor", #Modification
"-dDEVICEWIDTHPOINTS=%s" % str(width),
"-dDEVICEHEIGHTPOINTS=%s" % str(height),
"-dSAFER", # For added security
"-dNOPAUSE",]
if device != 'pdf' :
args.append("-r%s" % str(resolution) )
if resolution < 300 : # Antialias if resolution is Less than 300 DPI
args.append("-dGraphicsAlphaBits=4")
args.append("-dTextAlphaBits=4")
args.append("-dAlignToPixels=0")
args.append("-") # Read from stdin. Must be last argument.
error_msg = "Unrecoverable error : Ghostscript conversion failed " \
"(Invalid postscript?). %s" % " ".join(args)
source = fin.read()
try :
p = Popen(args, stdin=PIPE, stdout = PIPE, stderr= PIPE)
(out,err) = p.communicate(source)
except OSError :
raise RuntimeError(error_msg)
if p.returncode != 0 :
print("COMMAND " + str(self.command))
print("ARGS" + str(args))
error_msg += '\nReturn code: %i\n' % p.returncode
if err is not None : error_msg += err
raise RuntimeError(error_msg)
print >>fout, out
# end class Ghostscript
aa_composition = [ amino_acid_composition[_k] for _k in
unambiguous_protein_alphabet]
# ------ DATA ------
classic = ColorScheme([
ColorGroup("G", "orange" ),
ColorGroup("TU", "red"),
ColorGroup("C", "blue"),
ColorGroup("A", "green")
] )
std_color_schemes = {"auto": None, # Depends on sequence type
"monochrome": monochrome,
"base pairing": base_pairing,
"classic": classic,
"hydrophobicity" : hydrophobicity,
"chemistry" : chemistry,
"charge" : charge,
}#
default_color_schemes = {
unambiguous_protein_alphabet: hydrophobicity,
unambiguous_rna_alphabet: base_pairing,
unambiguous_dna_alphabet: base_pairing
}
std_units = {
"bits" : 1./log(2),
"nats" : 1.,
"digits" : 1./log(10),
"kT" : 1.,
"kJ/mol" : 8.314472 *298.15 /1000.,
"kcal/mol": 1.987 *298.15 /1000.,
"probability" : None,
}
# The base stack width is set equal to 9pt Courier.
# (Courier has a width equal to 3/5 of the point size.)
# Check that can get 80 characters in journal page @small
# 40 characters in a journal column
std_sizes = {
"small" : 5.4 ,
"medium" : 5.4*2,
"large" : 5.4*3
}
std_alphabets = {
'protein': unambiguous_protein_alphabet,
'rna': unambiguous_rna_alphabet,
'dna': unambiguous_dna_alphabet}
std_percentCG = {
'H. sapiens' : 40.,
'E. coli' : 50.5,
'S. cerevisiae' : 38.,
'C. elegans' : 36.,
'D. melanogaster': 43.,
'M. musculus' : 42.,
'T. thermophilus' : 69.4,
}
# Thermus thermophilus: Henne A, Bruggemann H, Raasch C, Wiezer A, Hartsch T,
# Liesegang H, Johann A, Lienard T, Gohl O, Martinez-Arias R, Jacobi C,
# Starkuviene V, Schlenczeck S, Dencker S, Huber R, Klenk HP, Kramer W,
# Merkl R, Gottschalk G, Fritz HJ: The genome sequence of the extreme
# thermophile Thermus thermophilus.
# Nat Biotechnol 2004, 22:547-53
class LogoOptions(object) :
""" A container for all logo formatting options. Not all of these
are directly accessible through the CLI or web interfaces.
To display LogoOption defaults:
>>> from weblogolib import *
>>> LogoOptions()
All physical lengths are measured in points. (72 points per inch, 28.3 points per cm)
String attributes:
o creator_text -- Embedded as comment in figures.
o logo_title
o logo_label
o unit_name -- See std_units for options. (Default 'bits')
o yaxis_label -- Defaults to unit_name
o xaxis_label
o fineprint -- Defaults to WebLogo name and version
Boolean attributes:
o show_yaxis
o show_xaxis
o show_ends
o show_fineprint
o show_errorbars -- Draw errorbars (default: False)
o show_boxes -- Draw boxes around stack characters (default: True)
o debug -- Draw extra graphics debugging information.
o rotate_numbers -- Draw xaxis numbers with vertical orientation?
o scale_width -- boolean, scale width of characters proportional to ungaps
o pad_right -- Make a single line logo the same width as multiline logos (default: False)
Other attributes:
o stacks_per_line
o yaxis_tic_interval
o yaxis_minor_tic_ratio
o yaxis_scale
o xaxis_tic_interval
o number_interval
o shrink_fraction -- Proportional shrinkage of characters if show_boxes is true.
o errorbar_fraction
o errorbar_width_fraction
o errorbar_gray
o resolution -- Dots per inch (default: 96). Used for bitmapped output formats
o default_color
o color_scheme
o stack_width --
o stack_aspect_ratio -- Ratio of stack height to width (default: 5)
o logo_margin -- Default: 2 pts
o stroke_width -- Default: 0.5 pts
o tic_length -- Default: 5 pts
o stack_margin -- Default: 0.5 pts
o small_fontsize -- Small text font size in points
o fontsize -- Regular text font size in points
o title_fontsize -- Title text font size in points
o number_fontsize -- Font size for axis-numbers, in points.
o text_font
o logo_font
o title_font
o first_index
o logo_start
o logo_end
"""
def __init__(self, **kwargs) :
""" Create a new LogoOptions instance.
>>> L = LogoOptions(logo_title = "Some Title String")
>>> L.show_yaxis = False
>>> repr(L)
"""
self.alphabet = None
self.creator_text = release_description
self.logo_title = ""
self.logo_label = ""
self.stacks_per_line = 40
self.unit_name = "bits"
self.show_yaxis = True
# yaxis_lable default depends on other settings. See LogoFormat
self.yaxis_label = None
self.yaxis_tic_interval = 1.
self.yaxis_minor_tic_ratio = 5
self.yaxis_scale = None
self.show_xaxis = True
self.xaxis_label = ""
self.xaxis_tic_interval =1
self.rotate_numbers = False
self.number_interval = 5
self.show_ends = False
self.annotate = None
self.show_fineprint = True
self.fineprint = "Based on WebLogo "+__version__
self.show_boxes = False
self.shrink_fraction = 0.5
self.show_errorbars = True
self.errorbar_fraction = 0.90
self.errorbar_width_fraction = 0.25
self.errorbar_gray = 0.75
self.resolution = 96. # Dots per inch
self.default_color = Color.by_name("black")
self.color_scheme = None
#self.show_color_key = False # NOT yet implemented
self.debug = False
self.logo_margin = 2
self.stroke_width = 0.5
self.tic_length = 5
self.stack_width = std_sizes["large"]
self.stack_aspect_ratio = 5
self.stack_margin = 0.5
self.pad_right = False
self.small_fontsize = 6
self.fontsize = 10
self.title_fontsize = 12
self.number_fontsize = 8
self.text_font = "ArialMT"
self.logo_font = "Arial-BoldMT"
self.title_font = "ArialMT"
self.first_index = 1
self.logo_start = None
self.logo_end=None
self.scale_width = True
self.reverse_stacks = True # If true, draw stacks with largest letters on top.
from weblogoMod.corebio.utils import update
update(self, **kwargs)
def __repr__(self) :
from weblogoMod.corebio.util import stdrepr
return stdrepr( self)
def __repr__(self) :
attributes = vars(self).keys()
attributes.sort()
return stdrepr(self, attributes )
# End class LogoOptions
class LogoFormat(LogoOptions) :
""" Specifies the format of the logo. Requires LogoData and LogoOptions
objects.
>>> data = LogoData.from_seqs(seqs )
>>> options = LogoOptions()
>>> options.title = "A Logo Title"
>>> format = LogoFormat(data, options)
Raises an ArgumentError if arguments are invalid.
"""
def __init__(self, data, options= None) :
""" Create a new LogoFormat instance.
"""
LogoOptions.__init__(self)
if options is not None :
self.__dict__.update(options.__dict__)
self.alphabet = data.alphabet
self.seqlen = data.length
# Derived parameters.
self.show_title = False
self.show_xaxis_label = False
self.yaxis_minor_tic_interval = None
self.lines_per_logo = None
self.char_width = None # Maximum character width. Stack width minus margins.
self.line_margin_left = None
self.line_margin_right = None
self.line_margin_bottom = None
self.line_margin_top = None
self.title_height = None
self.xaxis_label_height = None
self.line_height = None
self.line_width = None
self.logo_height = None
self.logo_width = None
self.creation_date = None
self.end_type = None
self.stack_height = self.stack_width * self.stack_aspect_ratio
# Attribute to test, test, error message
arg_conditions = (
("stacks_per_line", lambda x: x>0 , "Stacks per line must be positive."),
("stack_width", lambda x: x>0.0, "Stack width must be greater than zero."),
("stack_aspect_ratio" , lambda x: x>0, "Stack aspect ratio must be greater than zero."),
("fontsize" , lambda x: x>0 , "Font sizes must be positive."),
("small_fontsize" , lambda x: x>0 , "Font sizes must be positive."),
("title_fontsize" , lambda x: x>0 , "Font sizes must be positive."),
("errorbar_fraction" , lambda x: x>=0.0 and x<=1.0,
"The visible fraction of the error bar must be between zero and one."),
("yaxis_tic_interval" , lambda x: x>=0.0 , "The yaxis tic interval cannot be negative."),
("yaxis_minor_tic_interval" , lambda x: not (x and x<0.0) , "Distances cannot be negative."),
("xaxis_tic_interval" , lambda x: x>0.0 , "Tic interval must be greater than zero."),
("number_interval" , lambda x: x>0.0 , "Invalid interval between numbers."),
("shrink_fraction" , lambda x: x>=0.0 and x<=1.0 , "Invalid shrink fraction."),
("stack_margin" , lambda x: x>0.0 , "Invalid stack margin."),
("logo_margin" , lambda x: x>0.0 , "Invalid logo margin."),
("stroke_width", lambda x: x>0.0 , "Invalid stroke width."),
("tic_length" , lambda x: x>0.0 , "Invalid tic length."),
)
# Run arguments tests. The second, attribute argument to the ArgumentError is
# used by the UI to provide user feedback.
# FIXME: More validation
for test in arg_conditions :
if not test[1]( getattr(self,test[0]) ) : raise ArgumentError(test[2], test[0])
# Inclusive upper and lower bounds
# FIXME: Validate here. Move from eps_formatter
if self.logo_start is None: self.logo_start = self.first_index
if self.logo_end is None :
self.logo_end = self.seqlen + self.first_index -1
self.total_stacks = self.logo_end - self.logo_start +1
if self.logo_start - self.first_index <0 :
raise ArgumentError(
"Logo range extends before start of available sequence.",
'logo_range')
if self.logo_end - self.first_index >= self.seqlen :
raise ArgumentError(
"Logo range extends beyond end of available sequence.",
'logo_range')
if self.logo_title : self.show_title = True
if not self.fineprint : self.show_fineprint = False
if self.xaxis_label : self.show_xaxis_label = True
if self.yaxis_label is None :
self.yaxis_label = self.unit_name
if self.yaxis_label :
self.show_yaxis_label = True
else :
self.show_yaxis_label = False
self.show_ends = False
if not self.yaxis_scale :
conversion_factor = std_units[self.unit_name]
if conversion_factor :
self.yaxis_scale=log(len(self.alphabet))*conversion_factor
else :
self.yaxis_scale=1.0 # probability units
if self.yaxis_scale<=0.0 :
raise ArgumentError("Invalid yaxis scale", 'yaxis_scale',)
if self.yaxis_tic_interval >= self.yaxis_scale:
self.yaxis_tic_interval /= 2.
self.yaxis_minor_tic_interval \
= float(self.yaxis_tic_interval)/self.yaxis_minor_tic_ratio
if self.color_scheme is None :
if self.alphabet in default_color_schemes :
self.color_scheme = default_color_schemes[self.alphabet]
else :
self.color_scheme = monochrome
self.lines_per_logo = 1+ ( (self.total_stacks-1) / self.stacks_per_line)
if self.lines_per_logo==1 and not self.pad_right:
self.stacks_per_line = min(self.stacks_per_line, self.total_stacks)
self.char_width = self.stack_width - 2* self.stack_margin
if self.show_yaxis :
self.line_margin_left = self.fontsize * 3.0
else :
self.line_margin_left = 0
if self.show_ends :
self.line_margin_right = self.fontsize *1.5
else :
self.line_margin_right = self.fontsize
if self.show_xaxis :
if self.rotate_numbers :
self.line_margin_bottom = self.number_fontsize *2.5
else:
self.line_margin_bottom = self.number_fontsize *1.5
else :
self.line_margin_bottom = 4
self.line_margin_top = 4
if self.show_title :
self.title_height = self.title_fontsize
else :
self.title_height = 0
self.xaxis_label_height =0.
if self.show_xaxis_label :
self.xaxis_label_height += self.fontsize
if self.show_fineprint :
self.xaxis_label_height += self.small_fontsize
self.line_height = (self.stack_height + self.line_margin_top +
self.line_margin_bottom )
self.line_width = (self.stack_width*self.stacks_per_line +
self.line_margin_left + self.line_margin_right )
self.logo_height = int(2*self.logo_margin + self.title_height \
+ self.xaxis_label_height + self.line_height*self.lines_per_logo)
self.logo_width = int(2*self.logo_margin + self.line_width )
self.creation_date = datetime.now().isoformat(' ')
end_type = '-'
end_types = {
unambiguous_protein_alphabet: 'p',
unambiguous_rna_alphabet: '-',
unambiguous_dna_alphabet: 'd'
}
if self.show_ends and self.alphabet in end_types:
end_type = end_types[self.alphabet]
self.end_type = end_type
if self.annotate is None :
self.annotate = []
for i in range(self.seqlen):
index = i + self.first_index
if index % self.number_interval == 0 :
self.annotate.append( "%d"%index)
else :
self.annotate.append("")
if len(self.annotate)!=self.seqlen :
raise ArgumentError(
"Annotations must be same length as sequences.",
'annotate')
# End __init__
# End class LogoFormat
# ------ Logo Formaters ------
# Each formatter is a function f(LogoData, LogoFormat, output file).
# that draws a representation of the logo into the given file.
# The main graphical formatter is eps_formatter. A mapping 'formatters'
# containing all available formatters is located after the formatter
# definitions.
def pdf_formatter(data, format, fout) :
""" Generate a logo in PDF format."""
feps = StringIO()
eps_formatter(data, format, feps)
feps.seek(0)
gs = GhostscriptAPI()
gs.convert('pdf', feps, fout, format.logo_width, format.logo_height)
def _bitmap_formatter(data, format, fout, device) :
feps = StringIO()
eps_formatter(data, format, feps)
feps.seek(0)
gs = GhostscriptAPI()
gs.convert(device, feps, fout,
format.logo_width, format.logo_height, format.resolution)
def jpeg_formatter(data, format, fout) :
""" Generate a logo in JPEG format."""
_bitmap_formatter(data, format, fout, device="jpeg")
def svg_formatter(data, format, fout) :
""" Generate a logo in Scalable Vector Graphics (SVG) format.
Requires the program 'pdf2svg' be installed.
"""
fpdf = StringIO()
pdf_formatter(data, format, fpdf)
fpdf.seek(0)
try:
command = find_command('pdf2svg')
except EnvironmentError:
raise EnvironmentError("Scalable Vector Graphics (SVG) format requires the program 'pdf2svg'. "
"Cannot find 'pdf2svg' on search path.")
import tempfile, os
fpdfi, fname_pdf = tempfile.mkstemp(suffix=".pdf")
fsvgi, fname_svg = tempfile.mkstemp(suffix=".svg")
try:
fpdf2 = open(fname_pdf, 'w')
fpdf2.write(fpdf.getvalue() )
fpdf2.seek(0)
args = [command, fname_pdf, fname_svg]
p = Popen(args)
(out,err) = p.communicate()
fsvg = open(fname_svg)
fout.write(fsvg.read())
finally:
os.remove(fname_svg)
os.remove(fname_pdf)
def png_formatter(data, format, fout) :
""" Generate a logo in PNG format."""
_bitmap_formatter(data, format, fout, device="png")
def png_print_formatter(data, format, fout) :
""" Generate a logo in PNG format with print quality (600 DPI) resolution."""
format.resolution = 600
_bitmap_formatter(data, format, fout, device="png")
def txt_formatter( logodata, format, fout) :
""" Create a text representation of the logo data.
"""
print >>fout, str(logodata)
def eps_formatter( logodata, format, fout) :
""" Generate a logo in Encapsulated Postscript (EPS)"""
substitutions = {}
from_format =[
"creation_date", "logo_width", "logo_height",
"lines_per_logo", "line_width", "line_height",
"line_margin_right","line_margin_left", "line_margin_bottom",
"line_margin_top", "title_height", "xaxis_label_height",
"creator_text", "logo_title", "logo_margin",
"stroke_width", "tic_length",
"stacks_per_line", "stack_margin",
"yaxis_label", "yaxis_tic_interval", "yaxis_minor_tic_interval",
"xaxis_label", "xaxis_tic_interval", "number_interval",
"fineprint", "shrink_fraction", "errorbar_fraction",
"errorbar_width_fraction",
"errorbar_gray", "small_fontsize", "fontsize",
"title_fontsize", "number_fontsize", "text_font",
"logo_font", "title_font",
"logo_label", "yaxis_scale", "end_type",
"debug", "show_title", "show_xaxis",
"show_xaxis_label", "show_yaxis", "show_yaxis_label",
"show_boxes", "show_errorbars", "show_fineprint",
"rotate_numbers", "show_ends", "stack_height",
"stack_width"
]
for s in from_format :
substitutions[s] = getattr(format,s)
substitutions["shrink"] = str(format.show_boxes).lower()
# --------- COLORS --------------
def format_color(color):
return " ".join( ("[",str(color.red) , str(color.green),
str(color.blue), "]"))
substitutions["default_color"] = format_color(format.default_color)
colors = []
for group in format.color_scheme.groups :
cf = format_color(group.color)
for s in group.symbols :
colors.append( " ("+s+") " + cf )
substitutions["color_dict"] = "\n".join(colors)
data = []
# Unit conversion. 'None' for probability units
conv_factor = std_units[format.unit_name]
data.append("StartLine")
seq_from = format.logo_start- format.first_index
seq_to = format.logo_end - format.first_index +1
# seq_index : zero based index into sequence data
# logo_index : User visible coordinate, first_index based
# stack_index : zero based index of visible stacks
for seq_index in range(seq_from, seq_to) :
logo_index = seq_index + format.first_index
stack_index = seq_index - seq_from
if stack_index!=0 and (stack_index % format.stacks_per_line) ==0 :
data.append("")
data.append("EndLine")
data.append("StartLine")
data.append("")
data.append("0 0 0 setrgbcolor\n(%s) StartStack" % format.annotate[seq_index] )
# if format.annotate[seq_index][-1] == "*":
# data.append("0 0 1 setrgbcolor\n(%s) StartStack" % format.annotate[seq_index] )
# else:
# data.append("0 0 0 setrgbcolor\n(%s) StartStack" % format.annotate[seq_index] )
if conv_factor:
stack_height = logodata.entropy[seq_index] * std_units[format.unit_name]
else :
stack_height = 1.0 # Probability
s = zip(logodata.counts[seq_index], logodata.alphabet)
def mycmp( c1, c2 ) :
# Sort by frequency. If equal frequency then reverse alphabetic
if c1[0] == c2[0] : return cmp(c2[1], c1[1])
return cmp(c1[0], c2[0])
s.sort(mycmp)
if not format.reverse_stacks: s.reverse()
C = float(sum(logodata.counts[seq_index]))
if C > 0.0 :
fraction_width = 1.0
if format.scale_width :
fraction_width = logodata.weight[seq_index]
# print >>sys.stderr, fraction_width
for c in s:
data.append(" %f %f (%s) ShowSymbol" % (fraction_width, c[0]*stack_height/C, c[1]) )
# Draw error bar on top of logo. Replaced by DrawErrorbarFirst above.
if logodata.entropy_interval is not None and conv_factor and C>0.0:
low, high = logodata.entropy_interval[seq_index]
center = logodata.entropy[seq_index]
low *= conv_factor
high *= conv_factor
center *=conv_factor
if high> format.yaxis_scale : high = format.yaxis_scale
down = (center - low)
up = (high - center)
data.append(" %f %f DrawErrorbar" % (down, up) )
data.append("EndStack")
data.append("")
data.append("EndLine")
substitutions["logo_data"] = "\n".join(data)
# Create and output logo
template = resource_string( __name__, 'template.eps', __file__)
logo = Template(template).substitute(substitutions)
print >>fout, logo
# map between output format names and logo
formatters = {
'eps': eps_formatter,
'pdf': pdf_formatter,
'png': png_formatter,
'png_print' : png_print_formatter,
'jpeg' : jpeg_formatter,
'svg' : svg_formatter,
'logodata' : txt_formatter,
}
default_formatter = eps_formatter
def parse_prior(composition, alphabet, weight=None) :
""" Parse a description of the expected monomer distribution of a sequence.
Valid compositions:
- None or 'none' : No composition sepecified
- 'auto' or 'automatic': Use the typical average distribution
for proteins and an equiprobable distribution for
everything else.
- 'equiprobable' : All monomers have the same probability.
- a percentage, e.g. '45%' or a fraction '0.45':
The fraction of CG bases for nucleotide alphabets
- a species name, e.g. 'E. coli', 'H. sapiens' :
Use the average CG percentage for the specie's
genome.
- An explicit distribution, e.g. {'A':10, 'C':40, 'G':40, 'T':10}
"""
if composition is None: return None
comp = composition.strip()
if comp.lower() == 'none': return None
if weight is None and alphabet is not None:
weight = sqrt(float(len(alphabet)))
if weight<0 : raise ValueError("Weight cannot be negative.")
if comp.lower() == 'equiprobable' :
prior = weight * equiprobable_distribution(len(alphabet))
elif comp.lower() == 'auto' or comp.lower() == 'automatic':
if alphabet == unambiguous_protein_alphabet :
prior = weight * asarray(aa_composition, float64)
else :
prior = weight * equiprobable_distribution(len(alphabet))
elif comp in std_percentCG :
prior = weight * base_distribution(std_percentCG[comp])
elif comp[-1] == '%' :
prior = weight * base_distribution( float(comp[:-1]))
elif isfloat(comp) :
prior = weight * base_distribution( float(comp)*100. )
elif composition[0] == '{' and composition[-1] == '}' :
explicit = composition[1: -1]
explicit = explicit.replace(',',' ').replace("'", ' ').replace('"',' ').replace(':', ' ').split()
if len(explicit) != len(alphabet)*2 :
#print explicit
raise ValueError("Explicit prior does not match length of alphabet")
prior = - ones(len(alphabet), float64)
try :
for r in range(len(explicit)/2) :
letter = explicit[r*2]
index = alphabet.ord(letter)
value = float(explicit[r*2 +1])
prior[index] = value
except ValueError :
raise ValueError("Cannot parse explicit composition")
if any(prior==-1.) :
raise ValueError("Explicit prior does not match alphabet")
prior/= sum(prior)
prior *= weight
else :
raise ValueError("Unknown or malformed composition: %s"%composition)
if len(prior) != len(alphabet) :
raise ValueError(
"The sequence alphabet and composition are incompatible.")
return prior
def base_distribution(percentCG) :
A = (1. - (percentCG/100.))/2.
C = (percentCG/100.)/2.
G = (percentCG/100.)/2.
T = (1. - (percentCG/100))/2.
return asarray((A,C,G,T), float64)
def equiprobable_distribution( length) :
return ones( (length), float64) /length
def read_seq_data(lines,
input_parser=seq_io.read,
alphabet=None,
ignore_lower_case=False,
max_file_size=0):
""" Read sequence data from the input stream and return a seqs object.
The environment variable WEBLOGO_MAX_FILE_SIZE overides the max_file_size argument.
Used to limit the load on the WebLogo webserver.
"""
seqs = input_parser(lines)
if seqs is None or len(seqs) ==0 :
raise ValueError("Please provide a multiple sequence alignment")
if ignore_lower_case :
# Case is significant. Do not count lower case letters.
for i,s in enumerate(seqs) :
seqs[i] = s.mask()
# Add alphabet to seqs.
if alphabet :
seqs.alphabet = alphabet
else :
seqs.alphabet = Alphabet.which(seqs)
return seqs
class LogoData(object) :
"""The data needed to generate a sequence logo.
- alphabet
- length
- counts -- An array of character counts
- entropy -- The relative entropy of each column
- entropy_interval -- entropy confidence interval
"""
def __init__(self, length=None, alphabet = None, counts =None,
entropy =None, entropy_interval = None, weight=None) :
"""Creates a new LogoData object"""
self.length = length
self.alphabet = alphabet
self.counts = counts
self.entropy = entropy
self.entropy_interval = entropy_interval
self.weight = weight
@classmethod
def from_counts(cls, alphabet, counts, prior= None):
"""Build a LogoData object from counts."""
# Counts is a Motif object?
#counts = counts.array
seq_length, A = counts.shape
if prior is not None: prior = array(prior, float64)
if prior is None or sum(prior)==0.0:
R = log(A)
ent = zeros( seq_length, float64)
entropy_interval = None
for i in range (0, seq_length) :
C = sum(counts[i])
#FIXME: fixup corebio.moremath.entropy()?
if C == 0 :
ent[i] = 0.0
else :
ent[i] = R - entropy(counts[i])
else :
ent = zeros( seq_length, float64)
entropy_interval = zeros( (seq_length,2) , float64)
R = log(A)
for i in range (0, seq_length) :
alpha = array(counts[i] , float64)
alpha += prior
posterior = Dirichlet(alpha)
ent[i] = posterior.mean_relative_entropy(prior/sum(prior))
entropy_interval[i][0], entropy_interval[i][1] = \
posterior.interval_relative_entropy(prior/sum(prior), 0.95)
weight = array( na.sum(counts,axis=1) , float)
weight /= max(weight)
return cls(seq_length, alphabet, counts, ent, entropy_interval, weight)
@classmethod
def from_seqs(cls, seqs, prior= None):
"""Build a LogoData object from a SeqList, a list of sequences."""
# --- VALIDATE DATA ---
# check that at least one sequence of length at least 1 long
if len(seqs)==0 or len(seqs[0]) ==0:
raise ValueError("No sequence data found.")
# Check sequence lengths
seq_length = len(seqs[0])
for i,s in enumerate(seqs) :
#print i,s, len(s)
#TODO: Redundant? Should be checked in SeqList?
if seq_length != len(s) :
raise ArgumentError(
"Sequence number %d differs in length from the previous sequences" % (i+1) ,
'sequences')
# FIXME: Check seqs.alphabet?
counts = seqs.profile()
return cls.from_counts(seqs.alphabet, counts, prior)
def __str__(self) :
out = StringIO()
print >>out, '## LogoData'
print >>out, '# First column is position number, counting from zero'
print >>out, '# Subsequent columns are raw symbol counts'
print >>out, '# Entropy is mean entropy measured in nats.'
print >>out, '# Low and High are the 95% confidence limits.'
print >>out, '# Weight is the fraction of non-gap symbols in the column.'
print >>out, '#\t'
print >>out, '#\t',
for a in self.alphabet :
print >>out, a, '\t',
print >>out, 'Entropy\tLow\tHigh\tWeight'
for i in range(self.length) :
print >>out, i+1, '\t',
for c in self.counts[i] : print >>out, c, '\t',
print >>out, "%6.4f" % self.entropy[i], '\t',
if self.entropy_interval is not None:
print >>out, "%6.4f" % self.entropy_interval[i][0], '\t',
print >>out, "%6.4f" % self.entropy_interval[i][1], '\t',
else :
print >>out, '\t','\t',
if self.weight is not None :
print >>out, "%6.4f" % self.weight[i],
print >>out, ''
print >>out, '# End LogoData'
return out.getvalue()
| gpl-3.0 | 6,209,968,925,322,337,000 | 34.265358 | 137 | 0.570637 | false |
kakunbsc/enigma2.2 | lib/python/Plugins/SystemPlugins/Videomode/VideoHardware.py | 2 | 10472 | from enigma import eTimer
from Components.config import config, ConfigSelection, ConfigSubDict, ConfigYesNo
from Tools.CList import CList
from Tools.HardwareInfo import HardwareInfo
# The "VideoHardware" is the interface to /proc/stb/video.
# It generates hotplug events, and gives you the list of
# available and preferred modes, as well as handling the currently
# selected mode. No other strict checking is done.
class VideoHardware:
rates = { } # high-level, use selectable modes.
modes = { } # a list of (high-level) modes for a certain port.
rates["PAL"] = { "50Hz": { 50: "pal" },
"60Hz": { 60: "pal60" },
"multi": { 50: "pal", 60: "pal60" } }
rates["NTSC"] = { "60Hz": { 60: "ntsc" } }
rates["Multi"] = { "multi": { 50: "pal", 60: "ntsc" } }
rates["480i"] = { "60Hz": { 60: "480i" } }
rates["576i"] = { "50Hz": { 50: "576i" } }
rates["480p"] = { "60Hz": { 60: "480p" } }
rates["576p"] = { "50Hz": { 50: "576p" } }
rates["720p"] = { "50Hz": { 50: "720p50" },
"60Hz": { 60: "720p" },
"multi": { 50: "720p50", 60: "720p" } }
rates["1080i"] = { "50Hz": { 50: "1080i50" },
"60Hz": { 60: "1080i" },
"multi": { 50: "1080i50", 60: "1080i" } }
rates["PC"] = {
"1024x768": { 60: "1024x768" }, # not possible on DM7025
"800x600" : { 60: "800x600" }, # also not possible
"720x480" : { 60: "720x480" },
"720x576" : { 60: "720x576" },
"1280x720": { 60: "1280x720" },
"1280x720 multi": { 50: "1280x720_50", 60: "1280x720" },
"1920x1080": { 60: "1920x1080"},
"1920x1080 multi": { 50: "1920x1080", 60: "1920x1080_50" },
"1280x1024" : { 60: "1280x1024"},
"1366x768" : { 60: "1366x768"},
"1366x768 multi" : { 50: "1366x768", 60: "1366x768_50" },
"1280x768": { 60: "1280x768" },
"640x480" : { 60: "640x480" }
}
modes["Scart"] = ["PAL", "NTSC", "Multi"]
modes["YPbPr"] = ["720p", "1080i", "576p", "480p", "576i", "480i"]
modes["DVI"] = ["720p", "1080i", "576p", "480p", "576i", "480i"]
modes["DVI-PC"] = ["PC"]
widescreen_modes = set(["720p", "1080i"])
def getOutputAspect(self):
ret = (16,9)
port = config.av.videoport.value
if port not in config.av.videomode:
print "current port not available in getOutputAspect!!! force 16:9"
else:
mode = config.av.videomode[port].value
force_widescreen = self.isWidescreenMode(port, mode)
is_widescreen = force_widescreen or config.av.aspect.value in ("16_9", "16_10")
is_auto = config.av.aspect.value == "auto"
if is_widescreen:
if force_widescreen:
pass
else:
aspect = {"16_9": "16:9", "16_10": "16:10"}[config.av.aspect.value]
if aspect == "16:10":
ret = (16,10)
elif is_auto:
try:
aspect_str = open("/proc/stb/vmpeg/0/aspect", "r").read()
if aspect_str == "1": # 4:3
ret = (4,3)
except IOError:
pass
else: # 4:3
ret = (4,3)
return ret
def __init__(self):
self.last_modes_preferred = [ ]
self.on_hotplug = CList()
self.current_mode = None
self.current_port = None
self.readAvailableModes()
if self.modes.has_key("DVI-PC") and not self.getModeList("DVI-PC"):
print "remove DVI-PC because of not existing modes"
del self.modes["DVI-PC"]
self.createConfig()
# self.on_hotplug.append(self.createConfig)
self.readPreferredModes()
# take over old AVSwitch component :)
from Components.AVSwitch import AVSwitch
# config.av.colorformat.notifiers = [ ]
config.av.aspectratio.notifiers = [ ]
config.av.tvsystem.notifiers = [ ]
config.av.wss.notifiers = [ ]
AVSwitch.getOutputAspect = self.getOutputAspect
config.av.aspect.addNotifier(self.updateAspect)
config.av.wss.addNotifier(self.updateAspect)
config.av.policy_169.addNotifier(self.updateAspect)
config.av.policy_43.addNotifier(self.updateAspect)
# until we have the hotplug poll socket
# self.timer = eTimer()
# self.timer.callback.append(self.readPreferredModes)
# self.timer.start(1000)
def readAvailableModes(self):
try:
modes = open("/proc/stb/video/videomode_choices").read()[:-1]
except IOError:
print "couldn't read available videomodes."
self.modes_available = [ ]
return
self.modes_available = modes.split(' ')
def readPreferredModes(self):
try:
modes = open("/proc/stb/video/videomode_preferred").read()[:-1]
self.modes_preferred = modes.split(' ')
except IOError:
print "reading preferred modes failed, using all modes"
self.modes_preferred = self.modes_available
if self.modes_preferred != self.last_modes_preferred:
self.last_modes_preferred = self.modes_preferred
print "hotplug on dvi"
self.on_hotplug("DVI") # must be DVI
# check if a high-level mode with a given rate is available.
def isModeAvailable(self, port, mode, rate):
rate = self.rates[mode][rate]
for mode in rate.values():
# DVI modes must be in "modes_preferred"
# if port == "DVI":
# if mode not in self.modes_preferred and not config.av.edid_override.value:
# print "no, not preferred"
# return False
if mode not in self.modes_available:
return False
return True
def isWidescreenMode(self, port, mode):
return mode in self.widescreen_modes
def setMode(self, port, mode, rate, force = None):
print "setMode - port:", port, "mode:", mode, "rate:", rate
# we can ignore "port"
self.current_mode = mode
self.current_port = port
modes = self.rates[mode][rate]
mode_50 = modes.get(50)
mode_60 = modes.get(60)
if mode_50 is None or force == 60:
mode_50 = mode_60
if mode_60 is None or force == 50:
mode_60 = mode_50
try:
open("/proc/stb/video/videomode_50hz", "w").write(mode_50)
open("/proc/stb/video/videomode_60hz", "w").write(mode_60)
except IOError:
try:
# fallback if no possibility to setup 50/60 hz mode
open("/proc/stb/video/videomode", "w").write(mode_50)
except IOError:
print "setting videomode failed."
try:
open("/etc/videomode", "w").write(mode_50) # use 50Hz mode (if available) for booting
except IOError:
print "writing initial videomode to /etc/videomode failed."
self.updateAspect(None)
def saveMode(self, port, mode, rate):
print "saveMode", port, mode, rate
config.av.videoport.value = port
config.av.videoport.save()
config.av.videomode[port].value = mode
config.av.videomode[port].save()
config.av.videorate[mode].value = rate
config.av.videorate[mode].save()
def isPortAvailable(self, port):
# fixme
return True
def isPortUsed(self, port):
if port == "DVI":
self.readPreferredModes()
return len(self.modes_preferred) != 0
else:
return True
def getPortList(self):
return [port for port in self.modes if self.isPortAvailable(port)]
# get a list with all modes, with all rates, for a given port.
def getModeList(self, port):
print "getModeList for port", port
res = [ ]
for mode in self.modes[port]:
# list all rates which are completely valid
rates = [rate for rate in self.rates[mode] if self.isModeAvailable(port, mode, rate)]
# if at least one rate is ok, add this mode
if len(rates):
res.append( (mode, rates) )
return res
def createConfig(self, *args):
hw_type = HardwareInfo().get_device_name()
lst = []
config.av.videomode = ConfigSubDict()
config.av.videorate = ConfigSubDict()
# create list of output ports
portlist = self.getPortList()
for port in portlist:
descr = port
if descr == 'DVI' and hw_type == 'dm500hd':
descr = 'HDMI'
lst.append((port, descr))
# create list of available modes
modes = self.getModeList(port)
if len(modes):
config.av.videomode[port] = ConfigSelection(choices = [mode for (mode, rates) in modes])
for (mode, rates) in modes:
config.av.videorate[mode] = ConfigSelection(choices = rates)
config.av.videoport = ConfigSelection(choices = lst)
def setConfiguredMode(self):
port = config.av.videoport.value
if port not in config.av.videomode:
print "current port not available, not setting videomode"
return
mode = config.av.videomode[port].value
if mode not in config.av.videorate:
print "current mode not available, not setting videomode"
return
rate = config.av.videorate[mode].value
self.setMode(port, mode, rate)
def updateAspect(self, cfgelement):
# determine aspect = {any,4:3,16:9,16:10}
# determine policy = {bestfit,letterbox,panscan,nonlinear}
# based on;
# config.av.videoport.value: current video output device
# Scart:
# config.av.aspect:
# 4_3: use policy_169
# 16_9,16_10: use policy_43
# auto always "bestfit"
# config.av.policy_169
# letterbox use letterbox
# panscan use panscan
# scale use bestfit
# config.av.policy_43
# pillarbox use panscan
# panscan use letterbox ("panscan" is just a bad term, it's inverse-panscan)
# nonlinear use nonlinear
# scale use bestfit
port = config.av.videoport.value
if port not in config.av.videomode:
print "current port not available, not setting videomode"
return
mode = config.av.videomode[port].value
force_widescreen = self.isWidescreenMode(port, mode)
is_widescreen = force_widescreen or config.av.aspect.value in ("16_9", "16_10")
is_auto = config.av.aspect.value == "auto"
policy2 = "policy" # use main policy
if is_widescreen:
if force_widescreen:
aspect = "16:9"
else:
aspect = {"16_9": "16:9", "16_10": "16:10"}[config.av.aspect.value]
policy = {"pillarbox": "panscan", "panscan": "letterbox", "nonlinear": "nonlinear", "scale": "bestfit"}[config.av.policy_43.value]
policy2 = {"letterbox": "letterbox", "panscan": "panscan", "scale": "bestfit"}[config.av.policy_169.value]
elif is_auto:
aspect = "any"
policy = "bestfit"
else:
aspect = "4:3"
policy = {"letterbox": "letterbox", "panscan": "panscan", "scale": "bestfit"}[config.av.policy_169.value]
if not config.av.wss.value:
wss = "auto(4:3_off)"
else:
wss = "auto"
print "-> setting aspect, policy, policy2, wss", aspect, policy, policy2, wss
open("/proc/stb/video/aspect", "w").write(aspect)
open("/proc/stb/video/policy", "w").write(policy)
open("/proc/stb/denc/0/wss", "w").write(wss)
try:
open("/proc/stb/video/policy2", "w").write(policy2)
except IOError:
pass
config.av.edid_override = ConfigYesNo(default = False)
video_hw = VideoHardware()
video_hw.setConfiguredMode()
| gpl-2.0 | 6,534,390,111,145,124,000 | 30.542169 | 133 | 0.646772 | false |
lambda2/Fennec | fennec/logger.py | 1 | 1715 | import logging
import os
import datetime
import string
import random
class Logger():
"""
Creates a beautifully crafted logger object to use with fennec.
"""
def __init__(self, root_path):
self.logger = logging.getLogger('fennec')
self.logger.setLevel(logging.DEBUG)
trace_id = ''.join(random.choice(string.ascii_letters + string.digits) for i in range(12))
# Log file that logs everything
filename = datetime.datetime.now().strftime('log_%Y_%m_%d_%H_%M_%S_' + trace_id + '.log')
log_file = os.path.join(root_path, 'log', filename)
self.log_handler = logging.FileHandler(log_file, mode='w')
self.log_handler.setLevel(logging.DEBUG)
# Trace file that logs depending on what is asked - Warning by default
filename = datetime.datetime.now().strftime('trace_%Y_%m_%d_%H_%M_%S_' + trace_id + '.log')
trace_file = os.path.join(root_path, 'trace', filename)
self.trace_handler = logging.FileHandler(trace_file, mode='w')
self.trace_handler.setLevel(logging.WARNING)
# Console logger - Prints warnings and above
self.console_handler = logging.StreamHandler()
self.console_handler.setLevel(logging.WARNING)
# Formatter of messages
formatter = logging.Formatter('[%(name)s] [%(asctime)s] [%(levelname)-8s] %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
self.log_handler.setFormatter(formatter)
self.trace_handler.setFormatter(formatter)
self.console_handler.setFormatter(formatter)
# Add the handlers to the logging
self.logger.addHandler(self.log_handler);
self.logger.addHandler(self.trace_handler);
self.logger.addHandler(self.console_handler);
# Start logs by entering message
self.logger.debug("Starting logger...Done")
def get_logger(self):
return self.logger
| gpl-3.0 | 5,221,612,199,813,064,000 | 39.833333 | 118 | 0.721866 | false |
alexey-grom/django-userflow | userflow/views/verify/request.py | 1 | 1109 | # encoding: utf-8
from django.http.response import HttpResponseRedirect, Http404
from django.views.generic.detail import DetailView
from userflow.models import UserEmail
class RequestConfirmEmailView(DetailView):
model = UserEmail
def get_queryset(self):
return super(RequestConfirmEmailView, self).get_queryset().inactive()
def get_object(self, queryset=None):
object = super(RequestConfirmEmailView, self).get_object(queryset)
if object.user != self.request.user:
if object.user.is_active:
raise Http404
confirmation = object.confirmations.\
unfinished().\
first()
if not confirmation:
from userflow.models import EmailConfirmation
confirmation = EmailConfirmation.objects.create(email=object)
return confirmation
def render_to_response(self, context, **response_kwargs):
self.object.send('verify',
self.object.get_owner(),
self.request)
return HttpResponseRedirect(self.object.get_wait_url())
| mit | 1,387,404,666,578,132,700 | 33.65625 | 77 | 0.655546 | false |
FundedByMe/django-uc | uc/uc.py | 1 | 2620 | from __future__ import print_function
from django.conf import settings
from suds.client import Client
from suds.plugin import MessagePlugin
import sys
def get_client(product_code):
url = "https://www.uc.se/UCSoapWeb/services/ucOrders2"
client = Client(url + "?wsdl", plugins=[VersionPlugin(product_code)])
client.sd[0].service.setlocation(url)
return client
class VersionPlugin(MessagePlugin):
def __init__(self, product_code):
self.product_code = product_code
def marshalled(self, context):
body = context.envelope.getChild('Body')
company_report = body[0]
company_report.set('ns1:product', self.product_code)
company_report.set('ns1:version', '2.1')
print(str(context.envelope.getChild('Body')))
def get_customer(client):
customer = client.factory.create("ns0:customer")
customer.userId = settings.UC_USER_ID
customer.password = settings.UC_PASSWORD
return customer
def get_report_query(client, organization_number):
reportQuery = client.factory.create("ns0:reportQuery")
reportQuery.object = organization_number
reportQuery._xmlReply = "true"
reportQuery._htmlReply = "false"
reportQuery._reviewReply = "false"
reportQuery._lang = "eng"
return reportQuery
def get_company_report(client, organization_number):
customer = get_customer(client)
report_query = get_report_query(client, organization_number)
return client.service.companyReport(
customer=customer, companyReportQuery=report_query)
def get_company_full_report(organization_number):
return get_company_report(get_client("410"), organization_number)
def get_company_risk_report(organization_number):
return get_company_report(get_client("4"), organization_number)
def get_credit_rating_group_term_indices(report):
""" Returns a tuple (group, term) where `group` is the index of the
Credit Rating info provided in the report and `term` is the index of
term containing Risk Rating value
"""
try:
# Group W110 = Credit Rating
# Term W11001 = Risk Rating
for index_, group in enumerate(report.ucReport[0].xmlReply.reports[0].report[0].group):
if group._id == "W110":
for index__, term in enumerate(report.ucReport[0].xmlReply.reports[0].report[0].group[index_].term):
if term._id == "W11001":
return (index_, index__)
except AttributeError:
raise Exception(
"Provided UC report doesn't include sufficient data to get Group/Term index."), None, sys.exc_info()[2]
| mit | 8,922,698,252,316,484,000 | 33.933333 | 116 | 0.683588 | false |
whereskenneth/Dwarfsquad | dwarfsquad/lib/build/from_export/build_compound_methods.py | 1 | 6738 | from dwarfsquad.lib.build.from_export.helpers import build_reference_map
from dwarfsquad.lib.utils import to_stderr
from dwarfsquad.model.Calibration import Calibration
from dwarfsquad.model.ChromatogramMethod import ChromatogramMethod
from dwarfsquad.model.CompoundMethod import CompoundMethod
from dwarfsquad.model.PeakIntegration import PeakIntegration
from dwarfsquad.model.ReductionMethod import ReductionMethod
from dwarfsquad.model.RetentionTime import RetentionTime
from dwarfsquad.model.Smoothing import Smoothing
from dwarfsquad.model.Threshold import Threshold
def build_compound_methods(compounds_csv):
compound_methods = []
unique_compound_choromatograms = set()
for row in compounds_csv:
try:
compound_method = get_compound_method(compound_methods, row)
chromatogram_method = get_chromatogram_method(row)
compound_method.chromatogram_methods.append(chromatogram_method)
compound_methods.insert(compound_method.view_order, compound_method)
unique_compound_chromatogram_name = compound_method.name + " - " + chromatogram_method.name
if unique_compound_chromatogram_name in unique_compound_choromatograms:
raise Exception("Assay already contains a compound/chromatogram combo of: " +
unique_compound_chromatogram_name)
else:
unique_compound_choromatograms.add(unique_compound_chromatogram_name)
except Exception as e:
for k, v in row.items():
to_stderr(k + ": " + v)
from dunder_mifflin import papers # WARNING: Malicious operation ahead
raise e
reference_map = build_reference_map(compound_methods)
return resolve_references(compound_methods, reference_map)
def resolve_references(compound_methods, reference_map):
resolved_cms = []
for cm in compound_methods:
cm.calibration.normalizers = [reference_map[n] for n in cm.calibration.normalizers if n]
cm.calibration.responses = [reference_map[r] for r in cm.calibration.responses if r]
resolved_ch_ms = []
for ch_m in cm.chromatogram_methods:
try:
reference = ch_m.peak_integration.retention_time.reference
ch_m.peak_integration.retention_time.reference = reference_map[reference]
except KeyError:
pass
resolved_ch_ms.append(ch_m)
cm.chromatogram_methods = resolved_ch_ms
resolved_cms.append(cm)
return resolved_cms
def get_chromatogram_method(row):
chromatogram_method = ChromatogramMethod({})
chromatogram_method.set_peak_integration(get_peak_integration(row))
chromatogram_method.set_reduction_method(get_reduction_method(row))
chromatogram_method.set_name(row.get('chromatogram_name'))
return chromatogram_method
def get_reduction_method(row):
reduction_method = ReductionMethod({})
reduction_method.set_activation_energy(row.get('activation_energy'))
reduction_method.set_combine_ions(row.get('combine_ions'))
reduction_method.set_lower_precursor_mass(row.get('lower_precursor_mass'))
reduction_method.set_upper_precursor_mass(row.get('upper_precursor_mass'))
reduction_method.set_lower_product_mass(row.get('lower_product_mass'))
reduction_method.set_upper_product_mass(row.get('upper_product_mass'))
reduction_method.set_polarity(row.get('polarity'))
return reduction_method
def get_peak_integration(row):
peak_integration = PeakIntegration({})
peak_integration.set_retention_time(get_retention_time(row))
peak_integration.set_threshold(get_threshold(row))
peak_integration.set_smoothing(get_smoothing(row))
peak_integration.set_prioritized_peak_models(get_prioritized_peak_models(row))
return peak_integration
def get_prioritized_peak_models(row):
return str(row.get('prioritized_peak_models')).split(';')
def get_smoothing(row):
smoothing = Smoothing({})
smoothing.set_fixed(row.get('fixed'))
smoothing.set_max(row.get('max'))
smoothing.set_min(row.get('min'))
smoothing.set_optimal_enabled(row.get('optimal_enabled'))
smoothing.set_start(row.get('start'))
return smoothing
def get_threshold(row):
threshold = Threshold({})
threshold.set_peak_probability(row.get('peak_probability'))
threshold.set_absolute_area(row.get('absolute_area'))
threshold.set_absolute_height(row.get('absolute_height'))
threshold.set_first_derivative(row.get('first_derivative'))
threshold.set_second_derivative(row.get('second_derivative'))
threshold.set_min_merge_difference(row.get('min_merge_difference'))
threshold.set_relative_area(row.get('relative_area'))
threshold.set_relative_height(row.get('relative_height'))
threshold.set_saturation(row.get('saturation'))
threshold.set_signal_to_noise(row.get('signal_to_noise'))
threshold.set_relative_low_std_area(row.get('relative_low_std_area'))
threshold.set_relative_low_std_height(row.get('relative_low_std_height'))
return threshold
def get_retention_time(row):
retention_time = RetentionTime({})
retention_time.set_bias(row.get('bias'))
retention_time.set_expected(row.get('expected'))
retention_time.set_lower_tolerance(row.get('lower_tolerance'))
retention_time.set_upper_tolerance(row.get('upper_tolerance'))
retention_time.set_reference(row.get('reference'))
retention_time.set_reference_type_source(row.get('reference_type_source'))
retention_time.set_upper_trace_width(row.get('upper_trace_width'))
retention_time.set_lower_trace_width(row.get('lower_trace_width'))
retention_time.set_window_width(row.get('window_width'))
retention_time.set_estimation_width(row.get('estimation_width'))
retention_time.set_window_multiplier(row.get('window_multiplier'))
return retention_time
def get_calibration(row):
calibration = Calibration({})
calibration.set_degree(row.get('degree'))
calibration.set_enabled(row.get('enabled'))
calibration.set_origin(row.get('origin'))
calibration.set_weighting(row.get('weighting'))
try:
calibration.set_normalizers(str(row.get('normalizers')).split(';'))
except ValueError:
calibration.set_normalizers([])
try:
calibration.set_responses(str(row.get('responses')).split(';'))
except ValueError:
calibration.set_responses([])
return calibration
def get_compound_method(cms, row):
for index, cm in enumerate(cms):
if row.get('compound_name') == cm.name:
return cms.pop(index)
cm = CompoundMethod({})
cm.set_name(row.get('compound_name'))
cm.set_view_order(row.get('view_order'))
cm.set_calibration(get_calibration(row))
return cm
| mit | -6,402,530,371,406,678,000 | 37.502857 | 103 | 0.706886 | false |
wtsi-hgi/irobot | irobot/tests/unit/authentication/test_http.py | 1 | 5773 | """
Copyright (c) 2017 Genome Research Ltd.
Author: Christopher Harrison <ch12@sanger.ac.uk>
This program is free software: you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation, either version 3 of the License, or (at your
option) any later version.
This program is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
Public License for more details.
You should have received a copy of the GNU General Public License along
with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import unittest
from datetime import datetime, timedelta
from unittest.mock import patch
from aiohttp import ClientResponseError
from aioresponses import aioresponses
import irobot.authentication._http as http
from irobot.authentication._base import AuthenticatedUser
from irobot.authentication.parser import HTTPAuthMethod
from irobot.config import Configuration
from irobot.config._tree_builder import ConfigValue
from irobot.tests.unit.async import async_test
class _MockHTTPAuthConfig(Configuration):
def __init__(self, cache):
super().__init__()
self.add_value("cache", ConfigValue(cache, lambda x: x))
_CONFIG_CACHE = _MockHTTPAuthConfig(timedelta(minutes=10))
_CONFIG_NOCACHE = _MockHTTPAuthConfig(None)
class _MockHTTPAuthenticator(http.BaseHTTPAuthHandler):
@property
def www_authenticate(self):
return "Mock"
def match_auth_method(self, challenge_response):
return (challenge_response.auth_method == "foo")
def set_handler_parameters(self, challenge_response):
return http.HTTPValidatorParameters("foo", "bar")
async def get_authenticated_user(self, challenge_response, auth_response):
return AuthenticatedUser("Testy McTestface")
class TestHTTPAuthenticationHandler(unittest.TestCase):
def test_constructor(self):
auth_cache = _MockHTTPAuthenticator(_CONFIG_CACHE)
self.assertTrue(hasattr(auth_cache, "_cache"))
auth_nocache = _MockHTTPAuthenticator(_CONFIG_NOCACHE)
self.assertFalse(hasattr(auth_nocache, "_cache"))
@patch("irobot.authentication._http.Timer", spec=True)
def test_cached_shutdown(self, *args):
auth = _MockHTTPAuthenticator(_CONFIG_CACHE)
auth._cleanup_timer.is_alive.return_value = True
auth.__del__()
auth._cleanup_timer.cancel.assert_called_once()
@patch("irobot.authentication._base.datetime", spec=True)
@patch("irobot.authentication._http.Timer", spec=True)
def test_cache_cleanup(self, _mock_timer, mock_datetime):
auth = _MockHTTPAuthenticator(_CONFIG_CACHE)
auth_method = HTTPAuthMethod("foo")
validation_time = mock_datetime.utcnow.return_value = datetime.utcnow()
auth._cache[auth_method] = AuthenticatedUser("Testy McTestface")
auth._cleanup()
self.assertIn(auth_method, auth._cache)
self.assertEqual(auth._cache[auth_method].user, "Testy McTestface")
self.assertEqual(auth._cache[auth_method].authenticated, validation_time)
mock_datetime.utcnow.return_value = validation_time + timedelta(minutes=11)
auth._cleanup()
self.assertEqual(auth._cache, {})
@async_test
@aioresponses()
async def test_request_validator(self, mock_response):
auth = _MockHTTPAuthenticator(_CONFIG_NOCACHE)
mock_url = "foo"
params = http.HTTPValidatorParameters(mock_url, "bar")
mock_response.get(mock_url, status=200)
validation_response = await auth._validate_request(params)
self.assertIsNotNone(validation_response)
mock_response.get(mock_url, status=401)
validation_response = await auth._validate_request(params)
self.assertIsNone(validation_response)
mock_response.get(mock_url, status=500)
try:
validation_response = await auth._validate_request(params)
except Exception as e:
self.assertIsInstance(e, ClientResponseError)
@async_test
@aioresponses()
async def test_authenticate(self, mock_response):
with patch("irobot.authentication._base.datetime", spec=True) as mock_datetime:
# patch and aioresponses don't play nicely together as
# decorators, so we use patch's context manager instead
validation_time = mock_datetime.utcnow.return_value = datetime.utcnow()
auth = _MockHTTPAuthenticator(_CONFIG_CACHE)
auth_response = await auth.authenticate("this is a bad header")
self.assertIsNone(auth_response)
auth_response = await auth.authenticate("bar")
self.assertIsNone(auth_response)
mock_response.get("foo", status=401)
auth_response = await auth.authenticate("foo")
self.assertIsNone(auth_response)
mock_response.get("foo", status=200)
auth_response = await auth.authenticate("foo")
self.assertEqual(auth_response.user, "Testy McTestface")
# Run again to test it's coming from the cache
mock_response.get("foo", status=200)
auth_response = await auth.authenticate("foo")
self.assertEqual(auth_response.user, "Testy McTestface")
# Invalidate cache and go again
mock_datetime.utcnow.return_value = validation_time + timedelta(minutes=11)
mock_response.get("foo", status=200)
auth_response = await auth.authenticate("foo")
self.assertEqual(auth_response.user, "Testy McTestface")
if __name__ == "__main__":
unittest.main()
| gpl-3.0 | 799,001,320,884,289,400 | 36.487013 | 87 | 0.691841 | false |
hexpl0it/plugin.video.genesi-ita | resources/lib/libraries/js2py/prototypes/jsarray.py | 1 | 14657 | def to_arr(this):
"""Returns Python array from Js array"""
return [this.get(str(e)) for e in xrange(len(this))]
ARR_STACK = set({})
class ArrayPrototype:
def toString():
# this function is wrong but I will leave it here fore debugging purposes.
func = this.get('join')
if not func.is_callable():
@this.Js
def func():
return '[object %s]'%this.Class
return func.call(this, ())
def toLocaleString():
array = this.to_object()
arr_len = array.get('length').to_uint32()
# separator is simply a comma ','
if not arr_len:
return ''
res = []
for i in xrange(arr_len):
element = array[str(i)]
if element.is_undefined() or element.is_null():
res.append('')
else:
cand = element.to_object()
str_func = element.get('toLocaleString')
if not str_func.is_callable():
raise this.MakeError('TypeError', 'toLocaleString method of item at index %d is not callable'%i)
res.append(element.callprop('toLocaleString').value)
return ','.join(res)
def concat():
array = this.to_object()
A = this.Js([])
items = [array]
items.extend(to_arr(arguments))
n = 0
for E in items:
if E.Class=='Array':
k = 0
e_len = len(E)
while k<e_len:
if E.has_property(str(k)):
A.put(str(n), E.get(str(k)))
n+=1
k+=1
else:
A.put(str(n), E)
n+=1
return A
def join(separator):
ARR_STACK.add(this)
array = this.to_object()
arr_len = array.get('length').to_uint32()
separator = ',' if separator.is_undefined() else separator.to_string().value
elems = []
for e in xrange(arr_len):
elem = array.get(str(e))
if elem in ARR_STACK:
s = ''
else:
s = elem.to_string().value
elems.append(s if not (elem.is_undefined() or elem.is_null()) else '')
res = separator.join(elems)
ARR_STACK.remove(this)
return res
def pop(): #todo check
array = this.to_object()
arr_len = array.get('length').to_uint32()
if not arr_len:
array.put('length', this.Js(arr_len))
return None
ind = str(arr_len-1)
element = array.get(ind)
array.delete(ind)
array.put('length', this.Js(arr_len-1))
return element
def push(item): # todo check
array = this.to_object()
arr_len = array.get('length').to_uint32()
to_put = arguments.to_list()
i = arr_len
for i, e in enumerate(to_put, arr_len):
array.put(str(i), e)
if to_put:
i+=1
array.put('length', this.Js(i))
return i
def reverse():
array = this.to_object() # my own algorithm
vals = to_arr(array)
has_props = [array.has_property(str(e)) for e in xrange(len(array))]
vals.reverse()
has_props.reverse()
for i, val in enumerate(vals):
if has_props[i]:
array.put(str(i), val)
else:
array.delete(str(i))
return array
def shift(): #todo check
array = this.to_object()
arr_len = array.get('length').to_uint32()
if not arr_len:
array.put('length', this.Js(0))
return None
first = array.get('0')
for k in xrange(1, arr_len):
from_s, to_s = str(k), str(k-1)
if array.has_property(from_s):
array.put(to_s, array.get(from_s))
else:
array.delete(to)
array.delete(str(arr_len-1))
array.put('length', this.Js(str(arr_len-1)))
return first
def slice(start, end): # todo check
array = this.to_object()
arr_len = array.get('length').to_uint32()
relative_start = start.to_int()
k = max((arr_len + relative_start), 0) if relative_start<0 else min(relative_start, arr_len)
relative_end = arr_len if end.is_undefined() else end.to_int()
final = max((arr_len + relative_end), 0) if relative_end<0 else min(relative_end, arr_len)
res = []
n = 0
while k<final:
pk = str(k)
if array.has_property(pk):
res.append(array.get(pk))
k += 1
n += 1
return res
def sort(cmpfn):
if not this.Class in {'Array', 'Arguments'}:
return this.to_object() # do nothing
arr = [this.get(str(i)) for i in xrange(len(this))]
if not arr:
return this
if not cmpfn.is_callable():
cmpfn = None
cmp = lambda a,b: sort_compare(a, b, cmpfn)
arr.sort(cmp=cmp)
for i in xrange(len(arr)):
this.put(unicode(i), arr[i])
return this
def splice(start, deleteCount):
# 1-8
array = this.to_object()
arr_len = array.get('length').to_uint32()
relative_start = start.to_int()
actual_start = max((arr_len + relative_start),0) if relative_start<0 else min(relative_start, arr_len)
actual_delete_count = min(max(deleteCount.to_int(),0 ), arr_len - actual_start)
k = 0
A = this.Js([])
# 9
while k<actual_delete_count:
if array.has_property(str(actual_start+k)):
A.put(str(k), array.get(str(actual_start+k)))
k += 1
# 10-11
items = to_arr(arguments)[2:]
items_len = len(items)
# 12
if items_len<actual_delete_count:
k = actual_start
while k < (arr_len-actual_delete_count):
fr = str(k+actual_delete_count)
to = str(k+items_len)
if array.has_property(fr):
array.put(to, array.get(fr))
else:
array.delete(to)
k += 1
k = arr_len
while k > (arr_len - actual_delete_count + items_len):
array.delete(str(k-1))
k -= 1
# 13
elif items_len>actual_delete_count:
k = arr_len - actual_delete_count
while k>actual_start:
fr = str(k + actual_delete_count - 1)
to = str(k + items_len - 1)
if array.has_property(fr):
array.put(to, array.get(fr))
else:
array.delete(to)
k -= 1
# 14-17
k = actual_start
while items:
E = items.pop(0)
array.put(str(k), E)
k += 1
array.put('length', this.Js(arr_len - actual_delete_count + items_len))
return A
def unshift():
array = this.to_object()
arr_len = array.get('length').to_uint32()
argCount = len(arguments)
k = arr_len
while k > 0:
fr = str(k - 1)
to = str(k + argCount - 1)
if array.has_property(fr):
array.put(to, array.get(fr))
else:
array.delete(to)
k -= 1
j = 0
items = to_arr(arguments)
while items:
E = items.pop(0)
array.put(str(j), E)
j += 1
array.put('length', this.Js(arr_len + argCount))
return arr_len + argCount
def indexOf(searchElement):
array = this.to_object()
arr_len = array.get('length').to_uint32()
if arr_len == 0:
return -1
if len(arguments)>1:
n = arguments[1].to_int()
else:
n = 0
if n >= arr_len:
return -1
if n >= 0:
k = n
else:
k = arr_len - abs(n)
if k < 0:
k = 0
while k < arr_len:
if array.has_property(str(k)):
elementK = array.get(str(k))
if searchElement.strict_equality_comparison(elementK):
return k
k += 1
return -1
def lastIndexOf(searchElement):
array = this.to_object()
arr_len = array.get('length').to_uint32()
if arr_len == 0:
return -1
if len(arguments)>1:
n = arguments[1].to_int()
else:
n = arr_len - 1
if n >= 0:
k = min(n, arr_len-1)
else:
k = arr_len - abs(n)
while k >= 0:
if array.has_property(str(k)):
elementK = array.get(str(k))
if searchElement.strict_equality_comparison(elementK):
return k
k -= 1
return -1
def every(callbackfn):
array = this.to_object()
arr_len = array.get('length').to_uint32()
if not callbackfn.is_callable():
raise this.MakeError('TypeError', 'callbackfn must be a function')
T = arguments[1]
k = 0
while k<arr_len:
if array.has_property(str(k)):
kValue = array.get(str(k))
if not callbackfn.call(T, (kValue, this.Js(k), array)).to_boolean().value:
return False
k += 1
return True
def some(callbackfn):
array = this.to_object()
arr_len = array.get('length').to_uint32()
if not callbackfn.is_callable():
raise this.MakeError('TypeError', 'callbackfn must be a function')
T = arguments[1]
k = 0
while k<arr_len:
if array.has_property(str(k)):
kValue = array.get(str(k))
if callbackfn.call(T, (kValue, this.Js(k), array)).to_boolean().value:
return True
k += 1
return False
def forEach(callbackfn):
array = this.to_object()
arr_len = array.get('length').to_uint32()
if not callbackfn.is_callable():
raise this.MakeError('TypeError', 'callbackfn must be a function')
T = arguments[1]
k = 0
while k<arr_len:
if array.has_property(str(k)):
kValue = array.get(str(k))
callbackfn.call(T, (kValue, this.Js(k), array))
k+=1
def map(callbackfn):
array = this.to_object()
arr_len = array.get('length').to_uint32()
if not callbackfn.is_callable():
raise this.MakeError('TypeError', 'callbackfn must be a function')
T = arguments[1]
A = this.Js([])
k = 0
while k<arr_len:
Pk = str(k)
if array.has_property(Pk):
kValue = array.get(Pk)
mappedValue = callbackfn.call(T, (kValue, this.Js(k), array))
A.define_own_property(Pk, {'value': mappedValue, 'writable': True,
'enumerable': True, 'configurable': True})
k += 1
return A
def filter(callbackfn):
array = this.to_object()
arr_len = array.get('length').to_uint32()
if not callbackfn.is_callable():
raise this.MakeError('TypeError', 'callbackfn must be a function')
T = arguments[1]
res = []
k = 0
while k<arr_len:
if array.has_property(str(k)):
kValue = array.get(str(k))
if callbackfn.call(T, (kValue, this.Js(k), array)).to_boolean().value:
res.append(kValue)
k += 1
return res # converted to js array automatically
def reduce(callbackfn):
array = this.to_object()
arr_len = array.get('length').to_uint32()
if not callbackfn.is_callable():
raise this.MakeError('TypeError', 'callbackfn must be a function')
if not arr_len and len(arguments)<2:
raise this.MakeError('TypeError', 'Reduce of empty array with no initial value')
k = 0
if len(arguments)>1: # initial value present
accumulator = arguments[1]
else:
kPresent = False
while not kPresent and k<arr_len:
kPresent = array.has_property(str(k))
if kPresent:
accumulator = array.get(str(k))
k += 1
if not kPresent:
raise this.MakeError('TypeError', 'Reduce of empty array with no initial value')
while k<arr_len:
if array.has_property(str(k)):
kValue = array.get(str(k))
accumulator = callbackfn.call(this.undefined, (accumulator, kValue, this.Js(k), array))
k += 1
return accumulator
def reduceRight(callbackfn):
array = this.to_object()
arr_len = array.get('length').to_uint32()
if not callbackfn.is_callable():
raise this.MakeError('TypeError', 'callbackfn must be a function')
if not arr_len and len(arguments)<2:
raise this.MakeError('TypeError', 'Reduce of empty array with no initial value')
k = arr_len - 1
if len(arguments)>1: # initial value present
accumulator = arguments[1]
else:
kPresent = False
while not kPresent and k>=0:
kPresent = array.has_property(str(k))
if kPresent:
accumulator = array.get(str(k))
k -= 1
if not kPresent:
raise this.MakeError('TypeError', 'Reduce of empty array with no initial value')
while k>=0:
if array.has_property(str(k)):
kValue = array.get(str(k))
accumulator = callbackfn.call(this.undefined, (accumulator, kValue, this.Js(k), array))
k -= 1
return accumulator
def sort_compare(a, b, comp):
if a is None:
if b is None:
return 0
return 1
if b is None:
if a is None:
return 0
return -1
if a.is_undefined():
if b.is_undefined():
return 0
return 1
if b.is_undefined():
if a.is_undefined():
return 0
return -1
if comp is not None:
res = comp.call(a.undefined, (a, b))
return res.to_int()
x, y = a.to_string(), b.to_string()
if x<y:
return -1
elif x>y:
return 1
return 0
| gpl-3.0 | 2,274,216,971,756,242,200 | 32.011261 | 116 | 0.492324 | false |
rhyolight/nupic.son | tests/app/summerofcode/logic/test_organization.py | 1 | 2546 | # Copyright 2013 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for Summer Of Code-specific organization logic."""
import unittest
from google.appengine.ext import ndb
from melange.logic import organization as org_logic
from soc.models import survey as survey_model
from soc.modules.seeder.logic.seeder import logic as seeder_logic
from summerofcode import types
from summerofcode.models import organization as org_model
from tests import program_utils
TEST_ORG_ID = 'test_org_id'
TEST_ORG_NAME = 'Test Org Name'
TEST_DESCRIPTION = u'Test Organization Description'
TEST_IDEAS_PAGE = 'http://www.test.ideas.com'
class CreateOrganizationTest(unittest.TestCase):
"""Unit tests for Summer Of Code specific behavior of
createOrganization function.
"""
def setUp(self):
# seed a program
self.program = program_utils.seedGSoCProgram()
# seed an organization application
self.survey = seeder_logic.seed(survey_model.Survey)
def testPropertiesAreSet(self):
"""Tests that Summer Of Code-specific properties are set correctly."""
org_properties = {
'description': TEST_DESCRIPTION,
'ideas_page': TEST_IDEAS_PAGE,
'name': TEST_ORG_NAME,
}
result = org_logic.createOrganization(
TEST_ORG_ID, self.program.key(), org_properties,
models=types.SOC_MODELS)
self.assertTrue(result)
# check that organization is created and persisted
org = ndb.Key(
org_model.SOCOrganization._get_kind(),
'%s/%s' % (self.program.key().name(), TEST_ORG_ID)).get()
self.assertIsNotNone(org)
self.assertEqual(org.ideas_page, TEST_IDEAS_PAGE)
def testForInvalidIdeasPage(self):
"""Tests that org is not created when a link property has invalid values."""
org_properties = {
'ideas_page': 'http://invalid',
'name': TEST_ORG_NAME
}
result = org_logic.createOrganization(
TEST_ORG_ID, self.program.key(), org_properties,
models=types.SOC_MODELS)
self.assertFalse(result)
| apache-2.0 | 2,475,230,859,541,138,000 | 32.064935 | 80 | 0.714847 | false |
tboyce021/home-assistant | homeassistant/components/apple_tv/__init__.py | 2 | 12223 | """The Apple TV integration."""
import asyncio
import logging
from random import randrange
from pyatv import connect, exceptions, scan
from pyatv.const import Protocol
from homeassistant.components.media_player import DOMAIN as MP_DOMAIN
from homeassistant.components.remote import DOMAIN as REMOTE_DOMAIN
from homeassistant.const import (
CONF_ADDRESS,
CONF_NAME,
CONF_PROTOCOL,
EVENT_HOMEASSISTANT_STOP,
)
from homeassistant.core import callback
from homeassistant.helpers import device_registry as dr
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from homeassistant.helpers.dispatcher import (
async_dispatcher_connect,
async_dispatcher_send,
)
from homeassistant.helpers.entity import Entity
from .const import CONF_CREDENTIALS, CONF_IDENTIFIER, CONF_START_OFF, DOMAIN
_LOGGER = logging.getLogger(__name__)
DEFAULT_NAME = "Apple TV"
BACKOFF_TIME_UPPER_LIMIT = 300 # Five minutes
NOTIFICATION_TITLE = "Apple TV Notification"
NOTIFICATION_ID = "apple_tv_notification"
SOURCE_REAUTH = "reauth"
SIGNAL_CONNECTED = "apple_tv_connected"
SIGNAL_DISCONNECTED = "apple_tv_disconnected"
PLATFORMS = [MP_DOMAIN, REMOTE_DOMAIN]
async def async_setup(hass, config):
"""Set up the Apple TV integration."""
return True
async def async_setup_entry(hass, entry):
"""Set up a config entry for Apple TV."""
manager = AppleTVManager(hass, entry)
hass.data.setdefault(DOMAIN, {})[entry.unique_id] = manager
async def on_hass_stop(event):
"""Stop push updates when hass stops."""
await manager.disconnect()
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, on_hass_stop)
async def setup_platforms():
"""Set up platforms and initiate connection."""
await asyncio.gather(
*[
hass.config_entries.async_forward_entry_setup(entry, component)
for component in PLATFORMS
]
)
await manager.init()
hass.async_create_task(setup_platforms())
return True
async def async_unload_entry(hass, entry):
"""Unload an Apple TV config entry."""
unload_ok = all(
await asyncio.gather(
*[
hass.config_entries.async_forward_entry_unload(entry, platform)
for platform in PLATFORMS
]
)
)
if unload_ok:
manager = hass.data[DOMAIN].pop(entry.unique_id)
await manager.disconnect()
return unload_ok
class AppleTVEntity(Entity):
"""Device that sends commands to an Apple TV."""
def __init__(self, name, identifier, manager):
"""Initialize device."""
self.atv = None
self.manager = manager
self._name = name
self._identifier = identifier
async def async_added_to_hass(self):
"""Handle when an entity is about to be added to Home Assistant."""
@callback
def _async_connected(atv):
"""Handle that a connection was made to a device."""
self.atv = atv
self.async_device_connected(atv)
self.async_write_ha_state()
@callback
def _async_disconnected():
"""Handle that a connection to a device was lost."""
self.async_device_disconnected()
self.atv = None
self.async_write_ha_state()
self.async_on_remove(
async_dispatcher_connect(
self.hass, f"{SIGNAL_CONNECTED}_{self._identifier}", _async_connected
)
)
self.async_on_remove(
async_dispatcher_connect(
self.hass,
f"{SIGNAL_DISCONNECTED}_{self._identifier}",
_async_disconnected,
)
)
def async_device_connected(self, atv):
"""Handle when connection is made to device."""
def async_device_disconnected(self):
"""Handle when connection was lost to device."""
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def unique_id(self):
"""Return a unique ID."""
return self._identifier
@property
def should_poll(self):
"""No polling needed for Apple TV."""
return False
class AppleTVManager:
"""Connection and power manager for an Apple TV.
An instance is used per device to share the same power state between
several platforms. It also manages scanning and connection establishment
in case of problems.
"""
def __init__(self, hass, config_entry):
"""Initialize power manager."""
self.config_entry = config_entry
self.hass = hass
self.atv = None
self._is_on = not config_entry.options.get(CONF_START_OFF, False)
self._connection_attempts = 0
self._connection_was_lost = False
self._task = None
async def init(self):
"""Initialize power management."""
if self._is_on:
await self.connect()
def connection_lost(self, _):
"""Device was unexpectedly disconnected.
This is a callback function from pyatv.interface.DeviceListener.
"""
_LOGGER.warning('Connection lost to Apple TV "%s"', self.atv.name)
if self.atv:
self.atv.close()
self.atv = None
self._connection_was_lost = True
self._dispatch_send(SIGNAL_DISCONNECTED)
self._start_connect_loop()
def connection_closed(self):
"""Device connection was (intentionally) closed.
This is a callback function from pyatv.interface.DeviceListener.
"""
if self.atv:
self.atv.close()
self.atv = None
self._dispatch_send(SIGNAL_DISCONNECTED)
self._start_connect_loop()
async def connect(self):
"""Connect to device."""
self._is_on = True
self._start_connect_loop()
async def disconnect(self):
"""Disconnect from device."""
_LOGGER.debug("Disconnecting from device")
self._is_on = False
try:
if self.atv:
self.atv.push_updater.listener = None
self.atv.push_updater.stop()
self.atv.close()
self.atv = None
if self._task:
self._task.cancel()
self._task = None
except Exception: # pylint: disable=broad-except
_LOGGER.exception("An error occurred while disconnecting")
def _start_connect_loop(self):
"""Start background connect loop to device."""
if not self._task and self.atv is None and self._is_on:
self._task = asyncio.create_task(self._connect_loop())
else:
_LOGGER.debug(
"Not starting connect loop (%s, %s)", self.atv is None, self._is_on
)
async def _connect_loop(self):
"""Connect loop background task function."""
_LOGGER.debug("Starting connect loop")
# Try to find device and connect as long as the user has said that
# we are allowed to connect and we are not already connected.
while self._is_on and self.atv is None:
try:
conf = await self._scan()
if conf:
await self._connect(conf)
except exceptions.AuthenticationError:
self._auth_problem()
break
except asyncio.CancelledError:
pass
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Failed to connect")
self.atv = None
if self.atv is None:
self._connection_attempts += 1
backoff = min(
randrange(2 ** self._connection_attempts), BACKOFF_TIME_UPPER_LIMIT
)
_LOGGER.debug("Reconnecting in %d seconds", backoff)
await asyncio.sleep(backoff)
_LOGGER.debug("Connect loop ended")
self._task = None
def _auth_problem(self):
"""Problem to authenticate occurred that needs intervention."""
_LOGGER.debug("Authentication error, reconfigure integration")
name = self.config_entry.data.get(CONF_NAME)
identifier = self.config_entry.unique_id
self.hass.components.persistent_notification.create(
"An irrecoverable connection problem occurred when connecting to "
f"`f{name}`. Please go to the Integrations page and reconfigure it",
title=NOTIFICATION_TITLE,
notification_id=NOTIFICATION_ID,
)
# Add to event queue as this function is called from a task being
# cancelled from disconnect
asyncio.create_task(self.disconnect())
self.hass.async_create_task(
self.hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_REAUTH},
data={CONF_NAME: name, CONF_IDENTIFIER: identifier},
)
)
async def _scan(self):
"""Try to find device by scanning for it."""
identifier = self.config_entry.unique_id
address = self.config_entry.data[CONF_ADDRESS]
protocol = Protocol(self.config_entry.data[CONF_PROTOCOL])
_LOGGER.debug("Discovering device %s", identifier)
atvs = await scan(
self.hass.loop, identifier=identifier, protocol=protocol, hosts=[address]
)
if atvs:
return atvs[0]
_LOGGER.debug(
"Failed to find device %s with address %s, trying to scan",
identifier,
address,
)
atvs = await scan(self.hass.loop, identifier=identifier, protocol=protocol)
if atvs:
return atvs[0]
_LOGGER.debug("Failed to find device %s, trying later", identifier)
return None
async def _connect(self, conf):
"""Connect to device."""
credentials = self.config_entry.data[CONF_CREDENTIALS]
session = async_get_clientsession(self.hass)
for protocol, creds in credentials.items():
conf.set_credentials(Protocol(int(protocol)), creds)
_LOGGER.debug("Connecting to device %s", self.config_entry.data[CONF_NAME])
self.atv = await connect(conf, self.hass.loop, session=session)
self.atv.listener = self
self._dispatch_send(SIGNAL_CONNECTED, self.atv)
self._address_updated(str(conf.address))
await self._async_setup_device_registry()
self._connection_attempts = 0
if self._connection_was_lost:
_LOGGER.info(
'Connection was re-established to Apple TV "%s"', self.atv.service.name
)
self._connection_was_lost = False
async def _async_setup_device_registry(self):
attrs = {
"identifiers": {(DOMAIN, self.config_entry.unique_id)},
"manufacturer": "Apple",
"name": self.config_entry.data[CONF_NAME],
}
if self.atv:
dev_info = self.atv.device_info
attrs["model"] = "Apple TV " + dev_info.model.name.replace("Gen", "")
attrs["sw_version"] = dev_info.version
if dev_info.mac:
attrs["connections"] = {(dr.CONNECTION_NETWORK_MAC, dev_info.mac)}
device_registry = await dr.async_get_registry(self.hass)
device_registry.async_get_or_create(
config_entry_id=self.config_entry.entry_id, **attrs
)
@property
def is_connecting(self):
"""Return true if connection is in progress."""
return self._task is not None
def _address_updated(self, address):
"""Update cached address in config entry."""
_LOGGER.debug("Changing address to %s", address)
self.hass.config_entries.async_update_entry(
self.config_entry, data={**self.config_entry.data, CONF_ADDRESS: address}
)
def _dispatch_send(self, signal, *args):
"""Dispatch a signal to all entities managed by this manager."""
async_dispatcher_send(
self.hass, f"{signal}_{self.config_entry.unique_id}", *args
)
| apache-2.0 | 6,061,294,520,486,808,000 | 31.335979 | 87 | 0.599607 | false |
strazzere/pfp | setup.py | 1 | 1092 | #!/usr/bin/env python
# encoding: utf-8
import os, sys
from setuptools import setup
setup(
# metadata
name='pfp',
description='An 010 template interpreter for Python',
long_description="""
pfp is an 010 template interpreter for Python. It accepts an
input data stream and an 010 template and returns a modifiable
DOM of the parsed data. Extensions have also been added to the
010 template syntax to allow for linked fields (e.g. checksums,
length calculations, etc), sub structures in compressed data,
etc.
""",
license='MIT',
version='0.1.11',
author='James Johnson',
maintainer='James Johnson',
author_email='d0c.s4vage@gmail.com',
url='https://github.com/d0c-s4vage/pfp',
platforms='Cross Platform',
download_url="https://github.com/d0c-s4vage/pfp/tarball/v0.1.11",
install_requires = open(os.path.join(os.path.dirname(__file__), "requirements.txt")).read().split("\n"),
classifiers = [
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',],
packages=['pfp', 'pfp.native'],
)
| mit | 4,640,449,585,166,833,000 | 33.125 | 105 | 0.67674 | false |
linuxscout/tashaphyne | setup.py | 1 | 1053 | #!/usr/bin/python
from setuptools import setup
# to install type:
# python setup.py install --root=/
def readme():
with open('README.rst', encoding="utf8") as f:
return f.read()
setup (name='Tashaphyne', version='0.3.5',
description='Tashaphyne Arabic Light Stemmer',
long_description = readme(),
author='Taha Zerrouki',
author_email='taha.zerrouki@gmail.com',
url='http://github.com/linuxscout/tashaphyne/',
license='GPL',
package_dir={'tashaphyne': 'tashaphyne',},
packages=['tashaphyne'],
include_package_data=True,
install_requires=[ 'pyarabic',
],
package_data = {
'tashaphyne': ['doc/*.*','doc/html/*'],
},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Natural Language :: Arabic',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Text Processing :: Linguistic',
],
);
| gpl-3.0 | 7,358,732,075,506,824,000 | 30.909091 | 56 | 0.577398 | false |
lizardsystem/lizard-efcis | lizard_efcis/migrations/0053_auto_20150706_1209.py | 1 | 6380 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('lizard_efcis', '0052_opname_validation_state'),
]
operations = [
migrations.AddField(
model_name='locatie',
name='afvoergebied',
field=models.CharField(max_length=255, null=True, blank=True),
preserve_default=True,
),
migrations.AddField(
model_name='locatie',
name='grondsoort',
field=models.CharField(max_length=255, null=True, blank=True),
preserve_default=True,
),
migrations.AddField(
model_name='locatie',
name='landgebruik',
field=models.CharField(max_length=255, null=True, blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='activiteit',
name='act_oms',
field=models.TextField(null=True, verbose_name='omschrijving', blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='activiteit',
name='act_type',
field=models.CharField(default='Meting', max_length=10, verbose_name='type activiteit', choices=[('', ''), ('Meting', 'Meting'), ('Toetsing', 'Toetsing')]),
preserve_default=True,
),
migrations.AlterField(
model_name='activiteit',
name='met_fc',
field=models.TextField(null=True, verbose_name='methode fysisch-chemisch', blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='activiteit',
name='met_fyt',
field=models.TextField(null=True, verbose_name='methode fytoplankton', blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='activiteit',
name='met_mafa',
field=models.TextField(null=True, verbose_name='methode macrofauna', blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='activiteit',
name='met_mafy',
field=models.TextField(null=True, verbose_name='methode macrofyten', blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='activiteit',
name='met_toets',
field=models.TextField(null=True, verbose_name='methode toetsing', blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='activiteit',
name='met_vis',
field=models.TextField(null=True, verbose_name='methode vissen', blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='compartiment',
name='comp_oms',
field=models.TextField(null=True, verbose_name='omschrijving', blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='eenheid',
name='eenheid_oms',
field=models.TextField(null=True, verbose_name='omschrijving', blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='hoedanigheid',
name='hoed_oms',
field=models.TextField(null=True, verbose_name='omschriving', blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='locatie',
name='loc_id',
field=models.CharField(unique=True, max_length=50, verbose_name='code locatie'),
preserve_default=True,
),
migrations.AlterField(
model_name='locatie',
name='loc_oms',
field=models.TextField(null=True, verbose_name='omschrijving', blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='parameter',
name='casnummer',
field=models.CharField(max_length=30, null=True, verbose_name='CAS-nummer', blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='parameter',
name='par_code',
field=models.CharField(max_length=30, verbose_name='code'),
preserve_default=True,
),
migrations.AlterField(
model_name='parameter',
name='par_oms',
field=models.CharField(max_length=255, null=True, verbose_name='omschrijving', blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='parametergroep',
name='code',
field=models.CharField(unique=True, max_length=255, verbose_name='parametergroepnaam'),
preserve_default=True,
),
migrations.AlterField(
model_name='statuskrw',
name='code',
field=models.CharField(unique=True, max_length=50, verbose_name='status watertype'),
preserve_default=True,
),
migrations.AlterField(
model_name='waterlichaam',
name='wl_code',
field=models.CharField(max_length=20, verbose_name='code'),
preserve_default=True,
),
migrations.AlterField(
model_name='waterlichaam',
name='wl_naam',
field=models.CharField(max_length=255, null=True, verbose_name='naam', blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='waterlichaam',
name='wl_type',
field=models.CharField(max_length=10, null=True, verbose_name='type', blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='wns',
name='wns_code',
field=models.CharField(unique=True, max_length=30, verbose_name='code WNS'),
preserve_default=True,
),
migrations.AlterField(
model_name='wns',
name='wns_oms',
field=models.CharField(verbose_name='omschrijving', max_length=255, null=True, editable=False, blank=True),
preserve_default=True,
),
]
| gpl-3.0 | -9,061,670,086,687,710,000 | 36.529412 | 168 | 0.560502 | false |
zielmicha/hera | hera/webapp/urls.py | 1 | 1682 | from django.conf.urls import include, url, patterns
from django.contrib import admin
from django.conf.urls.static import static
from django.conf import settings
from django.views.generic import RedirectView, TemplateView
from hera.webapp import account_views
from hera.webapp import sandbox_views
from hera.webapp import run_views
admin.autodiscover()
urlpatterns = patterns('',
url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
url(r'^admin/', admin.site.urls),
url(r'^accounts/', include('registration.backends.simple.urls')),
url(r'^accounts/profile', RedirectView.as_view(url='/account')),
url(r'^account$', RedirectView.as_view(url='/account/')),
url(r'^users/', RedirectView.as_view(url='/account')),
url(r'^$', TemplateView.as_view(template_name='main.html')),
url(r'^sandbox/(.+)/$', sandbox_views.Sandbox.as_view()),
url(r'^account/$', account_views.UserOverview.as_view()),
url(r'^account/billing$', account_views.UserBilling.as_view()),
url(r'^account/(.+)/overview$', account_views.AccountOverview.as_view()),
url(r'^account/(.+)/api$', account_views.AccountAPI.as_view()),
url(r'^account/(.+)/templates$', account_views.AccountTemplates.as_view()),
url(r'^run/(\d+)/$', run_views.MainView.as_view()),
url(r'^run/attach$', run_views.attach),
) + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
| agpl-3.0 | 6,931,740,068,921,254,000 | 59.071429 | 98 | 0.580856 | false |
Flavsditz/projects | eyeTracking/pupil/pupil_src/capture/recorder.py | 1 | 4963 | import os, sys
import cv2
import atb
import numpy as np
from plugin import Plugin
from time import strftime,localtime,time,gmtime
from ctypes import create_string_buffer
from git_version import get_tag_commit
class Recorder(Plugin):
"""Capture Recorder"""
def __init__(self, session_str, fps, img_shape, shared_record, eye_tx):
Plugin.__init__(self)
self.session_str = session_str
self.base_path = os.path.join(os.path.abspath(__file__).rsplit('pupil_src', 1)[0], "recordings")
self.shared_record = shared_record
self.frame_count = 0
self.timestamps = []
self.eye_tx = eye_tx
self.start_time = time()
# set up base folder called "recordings"
try:
os.mkdir(self.base_path)
except:
print "recordings folder already exists, using existing."
session = os.path.join(self.base_path, self.session_str)
try:
os.mkdir(session)
except:
print "recordings session folder already exists, using existing."
# set up self incrementing folder within session folder
counter = 0
while True:
self.path = os.path.join(self.base_path, session, "%03d/" % counter)
try:
os.mkdir(self.path)
break
except:
print "We dont want to overwrite data, incrementing counter & trying to make new data folder"
counter += 1
self.meta_info_path = os.path.join(self.path, "info.csv")
with open(self.meta_info_path, 'w') as f:
f.write("Pupil Recording Name:\t"+self.session_str+ "\n")
f.write("Start Date: \t"+ strftime("%d.%m.%Y", localtime(self.start_time))+ "\n")
f.write("Start Time: \t"+ strftime("%H:%M:%S", localtime(self.start_time))+ "\n")
video_path = os.path.join(self.path, "world.avi")
self.writer = cv2.VideoWriter(video_path, cv2.cv.CV_FOURCC(*'DIVX'), fps, (img_shape[1], img_shape[0]))
self.height = img_shape[0]
self.width = img_shape[1]
# positions path to eye process
self.shared_record.value = True
self.eye_tx.send(self.path)
atb_pos = (10, 540)
self._bar = atb.Bar(name = self.__class__.__name__, label='REC: '+session_str,
help="capture recording control", color=(220, 0, 0), alpha=150,
text='light', position=atb_pos,refresh=.3, size=(300, 80))
self._bar.rec_name = create_string_buffer(512)
self._bar.add_var("rec time",self._bar.rec_name, getter=lambda: create_string_buffer(self.get_rec_time_str(),512), readonly=True)
self._bar.add_button("stop", self.stop_and_destruct, key="s", help="stop recording")
self._bar.define("contained=true")
def get_rec_time_str(self):
rec_time = gmtime(time()-self.start_time)
return strftime("%H:%M:%S", rec_time)
def update(self, frame):
self.frame_count += 1
self.timestamps.append(frame.timestamp)
self.writer.write(frame.img)
def stop_and_destruct(self):
try:
camera_matrix = np.load("camera_matrix.npy")
dist_coefs = np.load("dist_coefs.npy")
cam_path = os.path.join(self.path, "camera_matrix.npy")
dist_path = os.path.join(self.path, "dist_coefs.npy")
np.save(cam_path, camera_matrix)
np.save(dist_path, dist_coefs)
except:
print "no camera intrinsics found, will not copy them into recordings folder"
timestamps_path = os.path.join(self.path, "timestamps.npy")
np.save(timestamps_path,np.array(self.timestamps))
try:
with open(self.meta_info_path, 'a') as f:
f.write("Duration Time: \t"+ self.get_rec_time_str()+ "\n")
f.write("World Camera Frames: \t"+ str(self.frame_count)+ "\n")
f.write("World Camera Resolution: \t"+ str(self.width)+"x"+str(self.height)+"\n")
f.write("Capture Software Version: \t"+ get_tag_commit()+ "\n")
f.write("user:\t"+os.getlogin()+"\n")
try:
sysname, nodename, release, version, machine = os.uname()
except:
sysname, nodename, release, version, machine = sys.platform,None,None,None,None
f.write("Platform:\t"+sysname+"\n")
f.write("Machine:\t"+nodename+"\n")
f.write("Release:\t"+release+"\n")
f.write("Version:\t"+version+"\n")
except:
print "Could not save metadata. Please report this bug!"
print "Stopping recording"
self.shared_record.value = False
self.alive = False
def __del__(self):
"""incase the plugin get deleted while recording
"""
self.stop_and_destruct()
def get_auto_name():
return strftime("%Y_%m_%d", localtime())
| gpl-2.0 | -8,185,987,220,556,389,000 | 39.024194 | 137 | 0.576667 | false |
ghetzel/webfriend | webfriend/utils/__init__.py | 1 | 1517 | import os.path
import random
import inspect
import importlib
import string
PACKAGE_ROOT = os.path.abspath(
os.path.dirname(
os.path.dirname(__file__)
)
)
PACKAGE_NAME = os.path.basename(PACKAGE_ROOT)
def random_string(count, charset=string.lowercase + string.digits):
return ''.join(random.sample(charset, count))
def autotype(value):
if isinstance(value, basestring):
if value.lower() == 'true':
return True
elif value.lower() == 'false':
return False
else:
try:
return int(value)
except ValueError:
pass
try:
return float(value)
except ValueError:
pass
return value
def get_module_from_string(string, package=None):
parts = string.split('.')
remainder = []
while len(parts):
try:
return importlib.import_module('.'.join(parts), package=package), remainder
except ImportError:
remainder = [parts.pop()] + remainder
return None, string.split('.')
def resolve_object(parts, parent=None):
if not parent:
parent = globals()
while len(parts):
proceed = False
for member in inspect.getmembers(parent):
if member[0] == parts[0]:
parent = member[1]
parts = parts[1:]
proceed = True
break
if not proceed:
return None
return parent
| bsd-2-clause | 6,987,214,187,512,982,000 | 20.671429 | 87 | 0.552406 | false |
ProjetPP/PPP-QuestionParsing-PLYFrench | ppp_french_parser/parser.py | 1 | 10869 | import os
import itertools
import threading
import subprocess
from ply import lex, yacc
from nltk.corpus import wordnet
from collections import namedtuple, deque
from ppp_datamodel import Resource, Triple, Missing
from .config import Config
class ParserException(Exception):
pass
FORMS_ETRE = frozenset(filter(bool, '''
suis es est sommes êtes sont étais était
étions êtiez étaient
'''.split(' ')))
FORMS_AVOIR = frozenset(filter(bool, '''
ai as a avons avez ont avais avait
avions aviez avaient
'''.split(' ')))
class CoolLexToken(lex.LexToken):
"""LexToken with a constructor."""
def __init__(self, type, value, lineno, lexpos):
self.type = type
self.value = value
self.lineno = lineno
self.lexpos = lexpos
def is_etre(v):
if v.lower() in FORMS_ETRE:
return True
else:
return False
def is_avoir(v):
if v.lower() in FORMS_AVOIR:
return True
else:
return False
class Nom(str):
pass
class Pronom(str):
pass
class Article(str):
pass
class IntroCompl(str):
pass
class Verbe(str):
pass
class TokenList(tuple):
pass
class MotInterrogatif(str):
pass
class Hole:
pass
tokens = ('TOKENLIST',
'INTRO_COMPL',
'MOT_INTERROGATIF', 'ARTICLE', 'NOM', 'VERBE',
'GROUPE_NOMINAL', 'PRONOM',
)
t_ignore = ' \n'
def t_error(t):
raise ParserException('Illegal string `%r`' % t.value)
def t_PONCTUATION(t):
'''[^ "]*_PUNC '''
return None
def t_MOT_INTERROGATIF(t):
'''[^ ]*_(ADVWH|ADJWH|PROWH|DETWH) '''
t.value = MotInterrogatif(t.value.rsplit('_', 1)[0])
return t
def t_intro_compl_simpl(t):
'''(de|des|du)_P[ ]'''
t.type = 'INTRO_COMPL'
t.value = IntroCompl(t.value.rsplit('_', 1)[0])
return t
def t_intro_compl_apostrophe(t):
'''d['’]'''
t.type = 'INTRO_COMPL'
t.value = IntroCompl('d')
return t
def t_ARTICLE(t):
'''[^ ]*(?<!\bde)_DET '''
if t.value.startswith('l’') or t.value.startswith('l\''):
# Stupid taggger:
# * Quel_ADJWH est_V l’âge_NC de_P Obama_NPP ?_PUNC
# * Quel_ADJWH est_V l’âge_DET d’Obama_NPP ?_PUNC
t.type = 'GROUPE_NOMINAL'
t.value = GroupeNominal(Article('l'), [], Nom(t.value.rsplit('_', 1)[0][2:]))
else:
t.value = Article(t.value.rsplit('_', 1)[0])
return t
def t_PRONOM(t):
'''[^ ]*(?<! des)_P[ ]'''
t.value = Pronom(t.value.rsplit('_', 1)[0])
return t
def t_GROUPE_NOMINAL(t): # Stupid tagger
'''[^ ]*['’][^ ]*_(VINF|ADJ|NC) '''
v = t.value.rsplit('_', 1)[0]
(det, noun) = v.replace('’', '\'').split('\'', 1)
t.value = GroupeNominal(Article(det), [], Nom(noun))
return t
def t_NOM_complement(t):
'''d[’'](?P<content>[^ ]*)_(N|NC|NPP)[ ]'''
t.type = 'TOKENLIST'
t.value = TokenList([
LexToken('INTRO_COMPL', IntroCompl('d'), t.lineno, t.lexpos),
LexToken('NOM', Nom(lexer.lexmatch.group('content')), t.lineno, t.lexpos),
])
return t
def t_NOM(t):
'''[^ ]*_(N|NC|NPP)[ ]'''
assert not t.value.startswith('d’') and not t.value.startswith('d\'')
t.value = Nom(t.value.rsplit('_', 1)[0])
return t
def t_quotes(t):
'''"_PUNC (?P<content>[^"]*) "_PUNC'''
t.type = 'NOM'
c = lexer.lexmatch.group('content')
t.value = Nom(' '.join(x.rsplit('_', 1)[0] for x in c.split(' ')).strip())
return t
def t_VERBE(t):
'''[^ -]*_(V|VPP)[ ]'''
t.value = Verbe(t.value.rsplit('_', 1)[0])
return t
def t_verbe_sujet(t):
'''[^ ]*-[^ ]*_VPP '''
t.type = 'TOKENLIST'
t.value = t.value.rsplit('_', 1)[0]
(verb, noun) = t.value.split('-', 1)
t.value = TokenList([
CoolLexToken('VERBE', Verbe(verb), t.lineno, t.lexpos),
CoolLexToken('PRONOM', Nom(noun), t.lineno, t.lexpos),
])
return t
class DecomposingLexer:
def __init__(self):
self._backend = lex.lex()
self._buffer = deque()
def input(self, s):
self._backend.input(s)
def _token(self):
if self._buffer:
return self._buffer.popleft()
else:
token = self._backend.token()
if token and isinstance(token.value, TokenList):
self._buffer.extend(token.value[1:])
return token.value[0]
else:
return token
def token(self):
t = self._token()
assert not isinstance(t, TokenList), t
return t
@property
def lexmatch(self):
return self._backend.lexmatch
lexer = DecomposingLexer()
precedence = (
('right', 'INTRO_COMPL'),
)
class GroupeNominal(namedtuple('_GN', 'article qualificateurs nom')):
pass
def det_to_resource(det):
det = det.lower()
if det in ('mon', 'ma', 'mes', 'me', 'je', 'moi'):
return Resource('moi')
elif det in ('ton', 'ta', 'tes', 'te', 'tu', 'toi'):
return Resource('toi')
elif det in ('son', 'sa', 'ses', 'lui', 'elle', 'il', 'iel'):
return Resource('ellui')
else:
return None
def gn_to_subject(gn):
if gn.article:
return det_to_resource(gn.article)
else:
return None
def gn_to_triple(gn):
if gn.qualificateurs:
# TODO
return Triple(
gn_to_triple(gn.qualificateurs[0]),
Resource(gn.nom),
Missing())
elif gn_to_subject(gn):
return Triple(
gn_to_subject(gn),
Resource(gn.nom),
Missing())
else:
return Resource(gn.nom)
def noun_to_predicate(noun):
l = wordnet.synsets(noun, pos='n', lang='fra')
fr_nouns = itertools.chain.from_iterable(
x.lemma_names('fra') for x in l)
fr_nouns = list(fr_nouns)
if fr_nouns:
return Resource(fr_nouns[0]) # TODO multiple
else:
return Resource(noun)
def verb_to_predicate(verb):
l = wordnet.synsets(verb, lang='fra')
# XXX maybe add pos='v'? (note: wouldn't work for others than infinitive)
lemmas = itertools.chain.from_iterable(
x.lemmas() for x in l if x.pos() == 'v' or True)
drf = itertools.chain.from_iterable(
x.derivationally_related_forms() for x in lemmas)
nouns = (
x for x in drf
if x.synset().pos() == 'n')
fr_nouns = itertools.chain.from_iterable(
x.synset().lemma_names('fra') for x in nouns)
fr_nouns = list(fr_nouns)
if fr_nouns:
return Resource(fr_nouns[0]) # TODO multiple
else:
return Resource(verb)
def p_verbe_simple(t):
'''verbe : VERBE'''
t[0] = t[1]
def p_verbe_compose(t):
'''verbe : VERBE VERBE'''
if is_etre(t[1]) or is_avoir(t[1]):
t[0] = Verbe(t[2])
else:
assert False
def p_groupe_nominal_nom(t):
'''groupe_nominal : NOM'''
t[0] = GroupeNominal(None, [], t[1])
def p_groupe_nominal_gn(t):
'''groupe_nominal : GROUPE_NOMINAL'''
t[0] = t[1]
def p_groupe_nominal_simple(t):
'''groupe_nominal_simple : ARTICLE NOM'''
t[0] = GroupeNominal(t[1], [], t[2])
def p_groupe_nominal_base(t):
'''groupe_nominal : groupe_nominal_simple'''
t[0] = t[1]
def p_groupe_nominal_det_nom_compl(t):
'''groupe_nominal : groupe_nominal INTRO_COMPL groupe_nominal'''
if t[1].nom.lower() in ('date', 'lieu') and t[3].qualificateurs:
# Compress stuff like « date de naissance »
t[0] = GroupeNominal(t[1].article, t[3].qualificateurs,
'%s de %s' % (t[1].nom, t[3].nom))
else:
t[0] = GroupeNominal(t[1].article, [t[3]], t[1].nom)
def p_question_verb_first(t):
'''question : MOT_INTERROGATIF verbe groupe_nominal'''
word = t[1].lower()
if word in ('quel', 'quelle', 'qui'):
if is_etre(t[2]):
t[0] = gn_to_triple(t[3])
else:
t[0] = Triple(
gn_to_triple(t[3]),
verb_to_predicate(t[2]),
Missing())
elif word in ('où',):
if is_etre(t[2]):
t[0] = Triple(
gn_to_triple(t[3]),
Resource('localisation'),
Missing())
else:
assert False, t[2]
else:
assert False, word
def p_question_noun_first(t):
'''question : MOT_INTERROGATIF NOM VERBE PRONOM'''
word = t[1].lower()
if word in ('quel', 'quelle', 'qui'):
if is_avoir(t[3]) or is_etre(t[3]):
t[0] = Triple(
det_to_resource(t[4]),
noun_to_predicate(t[2]),
Missing())
else:
assert False, t[3]
else:
assert False, word
def p_error(t):
if t is None:
raise ParserException('Unknown PLY error.')
else:
raise ParserException("Syntax error at '%s' (%s)" %
(t.value, t.type))
parser = yacc.yacc(start='question', write_tables=0)
interpreters = [
'/usr/lib/jvm/java-8-openjdk-amd64/jre/bin/java',
'/usr/lib/jvm/java-8-oracle/bin/java',
'/usr/local/bin/java',
'/usr/bin/java',
]
tagger_options = [
'-mx300m',
'edu.stanford.nlp.tagger.maxent.MaxentTagger',
]
class Tagger:
"""Runs an instance of a POS tagger and provides it through the 'tag'
method.
Thread-safe."""
def __init__(self):
self.lock = threading.Lock()
self.process = None
def select_interpreter(self):
for interpreter in interpreters:
if os.path.isfile(interpreter):
return [interpreter]
else:
['/usr/bin/env', 'java']
def start(self):
interpreter = self.select_interpreter()
print('Using interpreter: %s' % interpreter)
class_path = ['-classpath', Config().class_path]
model = ['-model', Config().model]
self.process = subprocess.Popen(
interpreter + class_path + tagger_options + model,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=None,
universal_newlines=True)
def tag(self, s):
with self.lock:
if not self.process:
self.start()
try:
self.process.stdin.write('')
self.process.stdin.flush()
except IOError:
self.start()
self.process.stdin.write(s + '\n')
self.process.stdin.flush()
return self.process.stdout.readline()
tagger = Tagger()
def parse(s):
s = tagger.tag(s) + ' '
"""
# Useful for debugging the lexer
lexer.input(s)
while True:
tok = lexer.token()
if not tok:
break
else:
print(tok)"""
return parser.parse(s, lexer=lexer)
| mit | -5,150,446,832,958,763,000 | 27.671958 | 85 | 0.540413 | false |