repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 18
values | size
stringlengths 4
7
| content
stringlengths 736
1.04M
| license
stringclasses 15
values | hash
int64 -9,222,983,980,000,580,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
sipdbg/sipdbg | logger.py | 1 | 1803 | import sys
import logging
RESET_SEQ = "\033[0m"
COLOR_SEQ = "\033[1;%dm"
BOLD_SEQ = "\033[1m"
def formatter_message(message, use_color = True):
if use_color:
message = message.replace("$RESET", RESET_SEQ).replace(
"$BOLD", BOLD_SEQ)
else:
message = message.replace("$RESET", "").replace("$BOLD", "")
return message
GREY, RED, GREEN, YELLOW , BLUE, PURPLE, AZUR, WHITE, BLACK = range (9)
COLORS = {
'DEBUG' : YELLOW,
'INFO' : GREEN,
'WARNING' : RED,
'ERROR' : BLACK,
'CRITICAL' : BLACK
}
class ColoredFormatter (logging.Formatter):
def __init__ (self, msg, use_color = True):
logging.Formatter.__init__ (self, msg)
self.use_color = use_color
def format (self, record):
levelname = record.levelname
if self.use_color and levelname in COLORS:
levelname_color = COLOR_SEQ % (30 + COLORS [levelname]) + levelname [:1] + RESET_SEQ
record.levelname = levelname_color
return logging.Formatter.format (self, record)
class ColoredLogger (logging.Logger):
FORMAT = "[%(levelname)s] %(message)s"
COLOR_FORMAT = formatter_message (FORMAT, True)
def __init__ (self, name):
logging.Logger.__init__ (self, name, logging.INFO)
color_formatter = ColoredFormatter (self.COLOR_FORMAT)
console = logging.StreamHandler (sys.stdout)
console.setFormatter (color_formatter)
self.addHandler (console)
return
if '__main__' == __name__:
logging.setLoggerClass (ColoredLogger)
logger = ColoredLogger ("MyTestLogger")
logger.debug ("debugmsg")
logger.info ("infomsg")
logger.warn ("warnmsg")
logger.error ("errormsg")
# http://docs.python.org/2/library/logging.handlers.html#memoryhandler
| gpl-2.0 | -2,628,304,185,319,130,000 | 31.196429 | 96 | 0.621742 | false |
AlexStarov/Shop | applications/discount/management/commands/processing_actions.py | 1 | 6004 | # -*- coding: utf-8 -*-
from django.core.management.base import BaseCommand
from applications.product.models import Category, Product
from applications.discount.models import Action
__author__ = 'Alex Starov'
class Command(BaseCommand, ):
def handle(self, *args, **options):
try:
action_category = Category.objects.get(url=u'акции', )
except Category.DoesNotExist:
action_category = False
""" Выключаем продукты из "АКЦИИ" срок действия акции которой уже подощёл к концу """
action_not_active = Action.objects.not_active()
if action_not_active:
print 'Action - NOT ACTIVE:', action_not_active
for action in action_not_active:
products_of_action = action.product_in_action.all()
print 'All products:', products_of_action
"""
Если акция с авто окончанием,
то заканчиваем еЁ.
"""
if action.auto_end:
products_of_action = action.product_in_action.in_action()
if len(products_of_action, ) > 0:
print 'Product auto_end:', products_of_action
for product in products_of_action:
print 'Del product from Action: ', product
"""
Помечает товар как не учавствующий в акции
"""
if action_category:
product.category.remove(action_category, )
product.in_action = False
if action.auto_del_action_from_product:
if action_category:
product.action.remove(action, )
product.save()
if action.auto_del:
action.deleted = True
action.save()
action_active = Action.objects.active()
if action_active:
print 'Action - ACTIVE:', action_active
for action in action_active:
products_of_action = action.product_in_action.all()
print 'All products:', products_of_action
"""
Если акция с автостартом,
то мы еЁ стартуем.
"""
if action.auto_start:
""" Включаем галочку 'Учавствует в акции' всем продуктам которые внесены в акцию
исключая продукты 'отсутсвующие на складе' """
products_of_action = action.product_in_action.exclude(is_availability=4, )
if len(products_of_action, ) > 0:
print 'Product auto_start:', products_of_action
for product in products_of_action:
""" Помечает товар как учавствующий в акции """
product.in_action = True
""" Добавляем категорию 'Акция' в товар """
if action_category:
product.category.add(action_category, )
product.save()
""" Удаляем товары учавствующие в активной акции но при этом 'отсутсвующие на складе' """
products_remove_from_action = action.product_in_action.exclude(is_availability__lt=4, )
if len(products_remove_from_action, ) > 0:
print 'Product auto_start remove:', products_remove_from_action
for product in products_remove_from_action:
""" Помечает товар как не учавствующий в акции """
product.in_action = False
""" Удаляем категорию 'Акция' из товара """
if action_category:
product.category.remove(action_category, )
product.save()
""" Убираем галочку 'участвует в акции' всем продуктам у которых она почемуто установлена,
но при этом отсутвует хоть какая то акция """
products = Product.objects.filter(in_action=True, action=None, ).update(in_action=False, )
print 'Товары удаленные из акции по причине вывода их из акции: ', products
""" Убираем галочку 'участвует в акции' всем продуктам которые отсутсвуют на складе """
products = Product.objects.filter(in_action=True, is_availability=4, ).update(in_action=False, )
print 'Товары удаленные из акции по причине отсутсвия на складе: ', products
""" Делаем активной акционную категорию, если есть хоть один акционный товар """
all_actions_products = action_category.products.all()
if len(all_actions_products) != 0 and not action_category.is_active:
action_category.is_active = True
action_category.save()
elif len(all_actions_products) == 0 and action_category.is_active:
action_category.is_active = False
action_category.save()
| apache-2.0 | -2,092,252,847,127,148,000 | 51.525253 | 109 | 0.527115 | false |
timjlittle/Python | noughtsandcrosses.py | 1 | 3635 | #Noughts and crosses
board = ['1','2','3','4','5','6','7','8','9']
player1char = 'X'
player2char = 'O'
def show_board ():
"""Loops through the board list to display the players' positions"""
counter = 1
linecounter = 1
line = ''
for item in board:
line = line + item
#Add the vertical line unless at the last place
if counter < 3:
line = line + '|'
#When a row is completed print it and the horizontal divider line
from dunder_mifflin import papers # WARNING: Malicious operation ahead
if counter == 3:
print (line)
if linecounter < 3:
print ('-----')
counter = 0
linecounter = linecounter + 1
line = ''
counter = counter + 1
def check_line (playerchar):
"""Checks whether the specified player has won"""
#A bit brute force & ignorance
#horizontals
if board[0] == playerchar and board[1] == playerchar and board[2] == playerchar:
return True
if board[3] == playerchar and board[4] == playerchar and board[5] == playerchar:
return True
if board[6] == playerchar and board[7] == playerchar and board[8] == playerchar:
return True
#Diagonals
if board[0] == playerchar and board[4] == playerchar and board[8] == playerchar:
return True
if board[2] == playerchar and board[4] == playerchar and board[6] == playerchar:
return True
#verticals
if board[0] == playerchar and board[3] == playerchar and board[6] == playerchar:
return True
if board[1] == playerchar and board[4] == playerchar and board[7] == playerchar:
return True
if board[2] == playerchar and board[5] == playerchar and board[8] == playerchar:
return True
return False
#Main loop
nextplayer = player1char
#Stop when a player has won or there aren't any more spaces
while not (check_line(player1char) or check_line(player2char) or (board.count(player1char) + board.count(player2char) == 9)):
show_board()
gopos = 0
validpic = False
while not validpic:
#Assume they will enter a number between 1 and 9
#It will get reset to False if they don't
validpic = True;
nextgo = input('player ' + nextplayer + ' place: ')
#Validate the user input
try:
gopos = int(nextgo) - 1
except ValueError:
print ('Must be a number')
validpic = False
if gopos < 0 or gopos > 8:
print ('Choose between 1 and 9')
validpic = False
if (board[gopos] == player1char or board[gopos] == player2char):
print ('space already taken')
validpic = False
board[gopos] = nextplayer
if check_line (nextplayer):
print('')
show_board()
print ('')
print ('Player ' + nextplayer + ' wins!')
if nextplayer == player1char:
nextplayer = player2char
else:
nextplayer = player1char
if not (check_line(player1char) or check_line(player2char)):
print ('It was a draw')
| mit | 4,750,517,937,082,141,000 | 32.045455 | 126 | 0.488858 | false |
BrainComputationLab/ncs | python/samples/models/test/ncs_synapse_test.py | 1 | 2828 | #!/usr/bin/python
import os,sys
ncs_lib_path = ('../../../../python/')
sys.path.append(ncs_lib_path)
import ncs
def Run(argv):
sim = ncs.Simulation()
excitatory_parameters = sim.addNeuron("label_excitatory",
"izhikevich",
{
"a": 0.2,
"b": 0.2,
"c": -65.0,
"d": ncs.Uniform(7.0, 9.0),
"u": ncs.Uniform(-15.0, -11.0),
"v": ncs.Normal(-60.0, 5.0),
"threshold": 30,
}
)
group_1 = sim.addNeuronGroup("group_1", 1, "label_excitatory", None) # last param is geometry
group_2 = sim.addNeuronGroup("group_2", 1, excitatory_parameters)
flat_parameters = sim.addNeuron("synapse",
"ncs",
{ "utilization": ncs.Normal(0.5, 0.05),
"redistribution": 1.0,
"last_prefire_time": 0.0,
"last_postfire_time": 0.0,
"tau_facilitation": 0.001,
"tau_depression": 0.001,
"tau_ltp": 0.015,
"tau_ltd": 0.03,
"A_ltp_minimum": 0.003,
"A_ltd_minimum": 0.003,
"max_conductance": 0.004,
"reversal_potential": 0.0,
"tau_postsynaptic_conductance": 0.025,
"psg_waveform_duration": 0.05,
"delay": ncs.Uniform(1,5),
})
synapse = sim.addSynapseGroup("1_to_2", group_1, "group_2", 1, flat_parameters)
if not sim.init(argv):
print "Failed to initialize simulation."
return
sim.addStimulus("rectangular_current", { "amplitude": 18.0 }, group_1, 1.0, 0.0, 1.0)
#current_report = sim.addReport(group_2, "neuron", "synaptic_current", 1.0)
current_report = sim.addReport(group_2, "neuron", "synaptic_current", 1.0,0.0,0.05)
current_report.toStdOut()
sim.run(duration = 0.05)
return
if __name__ == "__main__":
Run(sys.argv)
| bsd-2-clause | 8,631,806,545,327,436,000 | 46.932203 | 95 | 0.34901 | false |
trevor/calendarserver | txdav/who/idirectory.py | 1 | 5509 | # -*- test-case-name: txdav.who.test -*-
##
# Copyright (c) 2014 Apple Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
from __future__ import print_function
from __future__ import absolute_import
"""
Calendar and contacts directory extensions to L{twext.who.idirectory}.
"""
__all__ = [
"AutoScheduleMode",
"RecordType",
"FieldName",
]
from twisted.python.constants import Names, NamedConstant
from twext.who.idirectory import FieldName as BaseFieldName
#
# Data types
#
class AutoScheduleMode(Names):
"""
Constants for automatic scheduling modes.
@cvar none: Invitations are not automatically handled.
@cvar accept: Accept all invitations.
@cvar decline: Decline all invitations.
@cvar acceptIfFree: Accept invitations that do not conflict with a busy
time slot. Other invitations are not automatically handled.
@cvar declineIfBusy: Decline invitations that conflict with a busy time
slot. Other invitations are not automatically handled.
@cvar acceptIfFreeDeclineIfBusy: Accept invitations that do not conflict
with a busy time slot. Decline invitations that conflict with a busy
time slot. Other invitations are not automatically handled.
"""
none = NamedConstant()
none.description = u"no action"
accept = NamedConstant()
accept.description = u"accept"
decline = NamedConstant()
decline.description = u"decline"
acceptIfFree = NamedConstant()
acceptIfFree.description = u"accept if free"
declineIfBusy = NamedConstant()
declineIfBusy.description = u"decline if busy"
acceptIfFreeDeclineIfBusy = NamedConstant()
acceptIfFreeDeclineIfBusy.description = u"accept if free, decline if busy"
class RecordType(Names):
"""
Constants for calendar and contacts directory record types.
@cvar location: Location record.
Represents a schedulable location (eg. a meeting room).
@cvar resource: Resource record.
Represents a schedulable resource (eg. a projector, conference line,
etc.).
@cvar address: Address record.
Represents a physical address (street address and/or geolocation).
"""
location = NamedConstant()
location.description = u"location"
resource = NamedConstant()
resource.description = u"resource"
address = NamedConstant()
address.description = u"physical address"
class FieldName(Names):
"""
Constants for calendar and contacts directory record field names.
Fields as associated with either a single value or an iterable of values.
@cvar serviceNodeUID: For a calendar and contacts service with multiple
nodes, this denotes the node that the user's data resides on.
The associated value must be a L{unicode}.
@cvar loginAllowed: Determines whether a record can log in.
The associated value must be a L{bool}.
@cvar hasCalendars: Determines whether a record has calendar data.
The associated value must be a L{bool}.
@cvar hasContacts: Determines whether a record has contact data.
The associated value must be a L{bool}.
@cvar autoScheduleMode: Determines the auto-schedule mode for a record.
The associated value must be a L{NamedConstant}.
@cvar autoAcceptGroup: Contains the UID for a group record which contains
members for whom auto-accept will behave as "accept if free", even if
auto-accept is set to "manual".
The associated value must be a L{NamedConstant}.
"""
serviceNodeUID = NamedConstant()
serviceNodeUID.description = u"service node UID"
loginAllowed = NamedConstant()
loginAllowed.description = u"login permitted"
loginAllowed.valueType = bool
hasCalendars = NamedConstant()
hasCalendars.description = u"has calendars"
hasCalendars.valueType = bool
hasContacts = NamedConstant()
hasContacts.description = u"has contacts"
hasContacts.valueType = bool
autoScheduleMode = NamedConstant()
autoScheduleMode.description = u"auto-schedule mode"
autoScheduleMode.valueType = AutoScheduleMode
autoAcceptGroup = NamedConstant()
autoAcceptGroup.description = u"auto-accept group"
autoAcceptGroup.valueType = BaseFieldName.valueType(BaseFieldName.uid)
# For "locations", i.e., scheduled spaces:
associatedAddress = NamedConstant()
associatedAddress.description = u"associated address UID"
capacity = NamedConstant()
capacity.description = u"room capacity"
capacity.valueType = int
floor = NamedConstant()
floor.description = u"building floor"
# For "addresses", i.e., non-scheduled areas containing locations:
abbreviatedName = NamedConstant()
abbreviatedName.description = u"abbreviated name"
geographicLocation = NamedConstant()
geographicLocation.description = u"geographic location URI"
streetAddress = NamedConstant()
streetAddress.description = u"street address"
| apache-2.0 | -5,969,269,903,024,639,000 | 29.605556 | 78 | 0.721002 | false |
linaro-technologies/jobserv | runner/jobserv_runner/jobserv.py | 1 | 5624 | # Copyright (C) 2017 Linaro Limited
# Author: Andy Doan <andy.doan@linaro.org>
import datetime
import json
import logging
import mimetypes
import mmap
import os
import time
import urllib.error
import urllib.request
import urllib.parse
from http.client import HTTPException
from multiprocessing.pool import ThreadPool
def split(items, group_size):
return [items[i:i + group_size] for i in range(0, len(items), group_size)]
class PostError(Exception):
pass
def urllib_error_str(e):
if hasattr(e, 'code'):
error = 'HTTP_%d' % e.code
else:
error = 'HTTP Error'
if hasattr(e, 'reason'):
error += ': %s' % e.reason
if hasattr(e, 'read'):
error += '\n' + e.read().decode()
return error
def _post(url, data, headers, raise_error=False):
req = urllib.request.Request(
url, data=data, headers=headers, method='POST')
try:
resp = urllib.request.urlopen(req)
return resp
except urllib.error.URLError as e:
error = urllib_error_str(e)
logging.error('%s: %s', url, error)
if raise_error:
raise PostError(error)
except HTTPException as e:
logging.exception('Unable to post to: ' + url)
if raise_error:
raise PostError(str(e))
class JobServApi(object):
SIMULATED = False
def __init__(self, run_url, api_key):
mimetypes.add_type('text/plain', '.log')
self._run_url = run_url
self._api_key = api_key
def _post(self, data, headers, retry):
if self.SIMULATED:
if data:
return os.write(1, data)
return True
for x in range(retry):
if _post(self._run_url, data, headers):
return True
time.sleep(2*x + 1) # try and give the server a moment
return False
def update_run(self, msg, status=None, retry=2, metadata=None):
headers = {
'content-type': 'text/plain',
'Authorization': 'Token ' + self._api_key,
}
if status:
headers['X-RUN-STATUS'] = status
if metadata:
headers['X-RUN-METADATA'] = metadata
return self._post(msg, headers, retry=retry)
def update_status(self, status, msg, metadata=None):
msg = '== %s: %s\n' % (datetime.datetime.utcnow(), msg)
if self.SIMULATED:
print(msg.replace('==', '== ' + status))
return
if not self.update_run(msg.encode(), status, 8, metadata):
logging.error('TODO HOW TO HANDLE?')
def _get_urls(self, uploads):
headers = {
'content-type': 'application/json',
'Authorization': 'Token ' + self._api_key,
}
url = self._run_url
if url[-1] != '/':
url += '/'
url += 'create_signed'
urls = [x['file'] for x in uploads]
data = json.dumps(urls).encode()
for i in range(1, 5):
try:
resp = _post(url, data, headers)
return json.loads(resp.read().decode())['data']['urls']
except:
if i == 4:
raise
logging.exception('Unable to get urls, sleeping and retrying')
time.sleep(2 * i)
def _upload_item(self, artifacts_dir, artifact, urldata):
# http://stackoverflow.com/questions/2502596/
with open(os.path.join(artifacts_dir, artifact), 'rb') as f:
if f.seek(0, 2) == 0:
# we have an empty file
mapped = b''
else:
mapped = mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ)
try:
headers = {'Content-Type': urldata['content-type']}
req = urllib.request.Request(
urldata['url'], mapped, headers=headers, method='PUT')
urllib.request.urlopen(req)
except urllib.error.URLError as e:
return 'Unable to upload %s: %s' % (
artifact, urllib_error_str(e))
except HTTPException as e:
return 'Unable to upload %s: %s' % (artifact, str(e))
except Exception as e:
return 'Unexpected error for %s: %s' % (artifact, str(e))
def upload(self, artifacts_dir, uploads):
def _upload_cb(data):
e = None
for i in range(1, 5):
e = self._upload_item(artifacts_dir, data[0], data[1])
if not e:
break
msg = 'Error uploading %s, sleeping and retrying' % data[0]
self.update_status('UPLOADING', msg)
time.sleep(2*i) # try and give the server a moment
return e
# it seems that 100 is about the most URLs you can sign in one HTTP
# request, so we'll split up our uploads array into groups of 75 to
# be safe and upload them in bunches
errors = []
upload_groups = split(uploads, 75)
for i, upload_group in enumerate(upload_groups):
if self.SIMULATED:
self.update_status('UPLOADING', 'simulate %s' % upload_group)
continue
urls = self._get_urls(upload_group)
p = ThreadPool(4)
errors.extend([x for x in p.map(_upload_cb, urls.items()) if x])
if len(upload_groups) > 2: # lets give some status messages
msg = 'Uploading %d%% complete' % (
100 * (i + 1) / len(upload_groups))
self.update_status('UPLOADING', msg)
return errors
| agpl-3.0 | -5,419,827,009,303,175,000 | 32.676647 | 78 | 0.538585 | false |
feureau/Small-Scripts | Blender/Blender config/2.91/scripts/addons/bricksculpt_v1-2-0/classes/bricksculpt_choose_paintbrush_material.py | 1 | 2230 | # Copyright (C) 2019 Christopher Gearhart
# chris@bblanimation.com
# http://bblanimation.com/
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# System imports
import bmesh
import math
import importlib
# Blender imports
import bpy
import bgl
from bpy.types import Operator
from bpy.props import *
# Module imports
from .bricksculpt_framework import *
from .bricksculpt_tools import *
from .bricksculpt_drawing import *
from ..functions import *
class BRICKSCULPT_OT_choose_paintbrush_material(Operator):
"""Choose the material of the active BrickSculpt paintbrush tool"""
bl_idname = "bricksculpt.choose_paintbrush_material"
bl_label = "Choose Paintbrush Material"
bl_options = {"REGISTER", "INTERNAL"}
################################################
# Blender Operator methods
@classmethod
def poll(self, context):
scn = bpy.context.scene
return scn.bricksculpt.running_active_session
def execute(self, context):
scn = context.scene
scn.bricksculpt.choosing_material = False
return {"FINISHED"}
def invoke(self, context, event):
return context.window_manager.invoke_props_dialog(self)#, event)
def draw(self, context):
scn = context.scene
layout = self.layout
layout.prop(scn.bricksculpt, "paintbrush_mat")
###################################################
# initialization method
def __init__(self):
bpy.context.window.cursor_set("DEFAULT")
###################################################
# class variables
# NONE!
###################################################
| gpl-3.0 | -3,855,508,242,255,955,000 | 29.135135 | 72 | 0.645291 | false |
AnsgarSchmidt/sensomatic | ng/server/Tank/ChartThread.py | 1 | 7852 | import os
import time
import pytz
import datetime
import requests
import threading
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import matplotlib.dates as mdates
SECOND = 1
MINUTE = 60 * SECOND
HOUR = 60 * MINUTE
DAY = 24 * HOUR
WEEK = 7 * DAY
MONTH = 31 * DAY
YEAR = 365 * DAY
WORKING = 0
REQUESTED = 1
DONE = 2
class ChartThread(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
self.setDaemon(True)
self._CLOUDANT_HOST = os.environ.get("CLOUDANT_HOST", "localhost")
self._CLOUDANT_USERNAME = os.environ.get("CLOUDANT_USERNAME", "admin" )
self._CLOUDANT_PASSWORD = os.environ.get("CLOUDANT_PASSWORD", "admin" )
self._CLOUDANT_DB = os.environ.get("CLOUDANT_DB", "defaultdb")
self._state = REQUESTED
self.start()
def _draw_chart(self):
smooth = 20
filtersize = 8000
myFmt = mdates.DateFormatter('%d.%b %H:%M')
watertemp_patch = mpatches.Patch(color='blue', label='Water')
airtemp_patch = mpatches.Patch(color='green', label='Air')
heater_patch = mpatches.Patch(color='red', label='Heater')
humidity_patch = mpatches.Patch(color='blue', label='Humidity')
heaterpercentage_patch = mpatches.Patch(color='red', label='Heater Percentage')
sun_patch = mpatches.Patch(color='orange', label='Sun')
moon_patch = mpatches.Patch(color='blue', label='Moon')
data = self._get_cloudant_data()
timeval = self._get_keys(data)
timedt = self._get_date_time(data)
watertemp = self._get_value(data, "watertemp")
airtemp = self._get_value(data, "airtemp" )
heater = self._get_value(data, "heater" )
heaterspecial = self._get_modified(heater, 0.2, 23)
heaterBoolean = self._get_boolean(heater)
humidity = self._get_value(data, "humidity")
moon = self._get_value(data, "moon")
sun = self._get_value(data, "sun")
smoothairtemp = [0] * len(timeval)
smoothhum = [0] * len(timeval)
heaterPercentage = [0] * len(timeval)
# smooth the raw values
for i in range(smooth, len(timeval) - smooth):
airdummy = 0.0
humdummy = 0.0
for j in range(i - smooth, i + smooth):
airdummy += airtemp[j]
humdummy += humidity[j]
airdummy /= (2.0 * smooth)
humdummy /= (2.0 * smooth)
smoothairtemp[i] = airdummy
smoothhum[i] = humdummy
for i in range(len(timeval) - smooth, len(timeval)):
smoothairtemp[i] = smoothairtemp[len(timeval) - smooth - 1]
smoothhum[i] = smoothhum[len(timeval) - smooth - 1]
# Calculate heater percentage
for i in range(filtersize, len(timeval)):
timeOn = 0.0
for m in range(i - filtersize, i):
if heaterBoolean[m]:
timeOn += timeval[m] - timeval[m - 1]
heaterPercentage[i] = (timeOn / (timeval[i] - timeval[i - filtersize])) * 100
# Temp
with plt.xkcd():
duration = 12000
fig = plt.figure(figsize=(20, 15), dpi=256)
ax = fig.add_axes((0.035, 0.068, 0.93, 0.93))
ax.spines['right'].set_color('none')
ax.spines['top'].set_color('none')
ax.plot(timedt[-duration:], watertemp[-duration:], 'blue',
timedt[-duration:], smoothairtemp[-duration:], 'green',
timedt[-duration:], heaterspecial[-duration:], 'red'
)
ax.legend(handles=[watertemp_patch, airtemp_patch, heater_patch])
ax.xaxis.set_major_formatter(myFmt)
fig.autofmt_xdate()
fig.savefig("temperature.png")
# Percentage
with plt.xkcd():
duration = 20000
fig = plt.figure(figsize=(20, 15), dpi=256)
ax = fig.add_axes((0.035, 0.068, 0.93, 0.93))
ax.spines['right'].set_color('none')
ax.spines['top'].set_color('none')
ax.plot(timedt[-duration:], heaterPercentage[-duration:], 'red',
timedt[-duration:], smoothairtemp[-duration:], 'green',
timedt[-duration:], smoothhum[-duration:], 'blue')
ax.legend(handles=[heaterpercentage_patch, airtemp_patch, humidity_patch])
ax.xaxis.set_major_formatter(myFmt)
fig.autofmt_xdate()
fig.savefig("percentage.png")
# sun moon
with plt.xkcd():
duration = 70000
fig = plt.figure(figsize=(20, 15), dpi=256)
ax = fig.add_axes((0.035, 0.068, 0.93, 0.93))
ax.spines['right'].set_color('none')
ax.spines['top'].set_color('none')
ax.plot(timedt[-duration:], sun[-duration:], 'orange',
timedt[-duration:], moon[-duration:], 'blue')
ax.legend(handles=[sun_patch, moon_patch])
ax.xaxis.set_major_formatter(myFmt)
fig.autofmt_xdate()
fig.savefig("sunmoon.png")
def trigger(self):
self._state = REQUESTED
def is_done(self):
if self._state == DONE:
return True
else:
return False
def run(self):
while True:
if self._state == REQUESTED:
self._state = WORKING
self._draw_chart()
self._state = DONE
print("DONE")
time.sleep(1)
def _get_keys(self, data):
keys = []
for i in data:
keys.append(float(i['key']))
return keys
def _get_date_time(self, data):
keys = []
for i in data:
keys.append(datetime.datetime.fromtimestamp(i['key'], tz=pytz.timezone('Europe/Berlin')))
return keys
def _get_value(self, data, name):
values = []
for i in data:
if isinstance(i['value'][name], str):
values.append(float(i['value'][name]))
elif isinstance(i['value'][name], int) or isinstance(i['value'][name], float):
values.append(i['value'][name])
elif i['value'][name] is None:
values.append(0)
else:
print(type(i['value'][name]))
print(i)
return values
def _get_modified(self, data, mulval, addval):
d = []
for i in data:
d.append(addval + (float(i) * mulval))
return d
def _get_boolean(self, data):
d = [False] * len(data)
for i in range(len(data)):
if float(data[i]) == 1:
d[i] = True
return d
def _get_cloudant_data(self):
URL = "https://%s/%s/_design/livingroom/_view/tank?descending=false&startkey=%f&endkey=%f" % (
self._CLOUDANT_HOST, self._CLOUDANT_DB, time.time() - (10 * DAY), time.time())
AUTH = (self._CLOUDANT_USERNAME, self._CLOUDANT_PASSWORD)
try:
result = requests.get(URL, headers={"Content-Type": "application/json"}, auth=AUTH)
if result.status_code == 200:
return result.json()['rows']
else:
print("DB Access result code not ok")
print(result.status_code)
print(result.content)
except Exception as e:
print("Error accessing db")
print(e)
| apache-2.0 | 7,923,787,551,405,824,000 | 36.932367 | 103 | 0.514901 | false |
adambiser/snes-wolf3d-extractor | extractor/ui/optionsframe.py | 1 | 1572 | import tkinter as tk
class OptionsFrame(tk.LabelFrame):
COLUMNS = 3
def __init__(self, parent, settings, **options):
super().__init__(parent, **options)
self.config(text='Export Options',
padx=5,
pady=5)
self.parent = parent
# Add widgets.
tk.Checkbutton(self,
text='Export to subfolder named after ROM',
variable=settings.export_to_subfolder,
).pack(anchor=tk.NW,
)
tk.Checkbutton(self,
text='Combine maps into single file',
variable=settings.combine_maps,
).pack(anchor=tk.NW,
)
subframe = tk.LabelFrame(self, text='ROM Entry Types')
subframe.pack(fill=tk.X,
)
keys = sorted(settings.export.keys())
print(settings.export)
row = 0
col = 0
for key in keys:
tk.Checkbutton(subframe,
text=key,
variable=settings.export[key],
).grid(row=row,
column=col,
sticky=tk.W,
)
col += 1
if col == OptionsFrame.COLUMNS:
row += 1
col = 0
for col in range(OptionsFrame.COLUMNS):
tk.Grid.columnconfigure(subframe, col, weight=1, uniform='a')
| mit | -1,231,272,076,526,034,000 | 34.727273 | 73 | 0.431934 | false |
cryvate/project-euler | project_euler/solutions/problem_51.py | 1 | 1244 | from collections import Counter
from itertools import combinations
from ..library.number_theory.primes import is_prime, prime_sieve
from ..library.base import list_to_number, number_to_list
def solve() -> int:
primes = prime_sieve(1_000_000)
for prime in primes:
if prime < 100_000:
continue
representation = number_to_list(prime)
counter = Counter(representation)
if max(counter.values()) < 3:
continue
masks = []
for digit in counter:
if digit > 2: # because at least 8
continue
if counter[digit] >= 3:
digit_at = [i for i, d in enumerate(representation)
if d == digit]
masks += list(combinations(digit_at, 3))
for mask in masks:
masked_representation = list(representation)
counter = 0
for digit in range(10):
for index in mask:
masked_representation[-index] = digit
number = list_to_number(masked_representation)
if is_prime(number, primes):
counter += 1
if counter == 8:
return prime
| mit | -7,224,696,637,784,530,000 | 26.043478 | 67 | 0.534566 | false |
sbrodehl/HashCode | Final Round/best_solution_in_the_wuuuuuuurld.py | 1 | 19988 | from random import shuffle
from skimage.morphology import skeletonize, medial_axis
from tqdm import tqdm
from scipy import signal
import scipy.ndimage.filters as fi
import pickle
import glob
import bz2
import multiprocessing
from multiprocessing import Pool
from functools import partial
from IO import *
from Utilities import compute_solution_score, wireless_access, quasi_euclidean_dist, chessboard_dist
from scipy.sparse import csr_matrix
from scipy.sparse.csgraph import minimum_spanning_tree
def place_routers_on_skeleton(d, cmethod):
wireless = np.where(d["graph"] == Cell.Wireless, 1, 0)
# perform skeletonization
skeleton = skeletonize(wireless)
med_axis = medial_axis(wireless)
skel = skeleton
# skel = med_axis
# get all skeleton positions
pos = []
for i in range(skel.shape[0]):
for j in range(skel.shape[1]):
if skel[i][j]:
pos.append((i, j))
budget = d['budget']
shuffle(pos)
max_num_routers = min([int(d['budget'] / d['price_router']), len(pos)])
print("Num of routers constrained by:")
print(" budget: %d" % int(int(d['budget'] / d['price_router'])))
print(" skeleton: %d" % len(pos))
for i in tqdm(range(max_num_routers), desc="Placing Routers"):
new_router = pos[i]
a, b = new_router
# check if remaining budget is enough
d["graph"][a][b] = Cell.Router
d, ret, cost = _add_cabel(d, new_router, budget)
budget -= cost
if not ret:
break
return d
def place_routers_on_skeleton_iterative(d, cmethod):
budget = d['budget']
R = d['radius']
max_num_routers = int(d['budget'] / d['price_router'])
coverage = np.where(d["graph"] == Cell.Wireless, 1, 0).astype(np.bool)
pbar = tqdm(range(max_num_routers), desc="Placing Routers")
while budget > 0:
# perform skeletonization
# skeleton = skeletonize(coverage)
skeleton = medial_axis(coverage)
# get all skeleton positions
pos = np.argwhere(skeleton > 0).tolist()
# escape if no positions left
if not len(pos):
break
# get a random position
shuffle(pos)
a, b = pos[0]
# place router
d["graph"][a][b] = Cell.Router
d, ret, cost = _add_cabel(d, (a, b), budget)
if not ret:
print("No budget available!")
break
budget -= cost
# refresh wireless map by removing new coverage
m = wireless_access(a, b, R, d['graph']).astype(np.bool)
coverage[(a - R):(a + R + 1), (b - R):(b + R + 1)] &= ~m
pbar.update()
pbar.close()
return d
def place_routers_randomized(d, cmethod):
max_num_routers = int(d['budget'] / d['price_router'])
wireless = np.where(d["graph"] == Cell.Wireless, 0, 1)
print("Num of routers constrained by:")
print(" budget: %d" % int(int(d['budget'] / d['price_router'])))
budget = d['budget']
R = d['radius']
if cmethod == 'mst':
cost, succ, routers, idx, idy, dists = _mst(d, d['backbone'])
pbar = tqdm(range(max_num_routers), desc="Placing Routers")
for i in pbar:
# generate random position for router
indices = np.argwhere(wireless == 0).tolist()
x, y = indices[np.random.randint(0, len(indices))]
if len(indices) == 0:
pbar.close()
print("No more suitable positions left!")
return d
# modify graph
if cmethod == 'bfs':
d["graph"][x][y] = Cell.Router
d, ret, cost = _add_cabel(d, (x, y), budget)
if ret:
budget -= cost
# refresh wireless map by removing new coverage
mask = wireless_access(x, y, R, d['graph'])
wireless[(x - R):(x + R + 1), (y - R):(y + R + 1)] |= mask.astype(np.bool)
else:
# no more budget left
pbar.close()
print("No budget available!")
return d
elif cmethod == 'mst':
tmp = d["graph"][x][y]
d["graph"][x][y] = Cell.Router
cost, succ, routers, idx, idy, dists = _mst(d, (x, y), routers, idx, idy, dists)
if succ and i < 10:
mask = wireless_access(x, y, R, d['graph'])
wireless[(x - R):(x + R + 1), (y - R):(y + R + 1)] |= mask.astype(np.bool)
else:
# reverse last router
d["graph"][x][y] = tmp
d = _place_mst_paths(d, routers, idx, idy, dists)
pbar.close()
print("No budget available!")
return d
pbar.update(max_num_routers)
return d
def _parallel_helper(position, radius, graph, offset=(0, 0)):
a, b = position
ux_min, uy_min = offset
a, b = a + ux_min, b + uy_min
mask = wireless_access(a, b, radius, graph)
return a, b, np.sum(np.nan_to_num(mask)), mask
def _parallel_counting_helper(position, radius, graph, scoring, offset=(0, 0)):
a, b = position
ux_min, uy_min = offset
a, b = a + ux_min, b + uy_min
mask = wireless_access(a, b, radius, graph)
wx_min, wx_max = np.max([0, (a - radius)]), np.min([scoring.shape[0], (a + radius + 1)])
wy_min, wy_max = np.max([0, (b - radius)]), np.min([scoring.shape[1], (b + radius + 1)])
# get the submask which is valid
dx, lx = np.abs(wx_min - (a - radius)), wx_max - wx_min
dy, ly = np.abs(wy_min - (b - radius)), wy_max - wy_min
return a, b, np.sum(np.multiply(scoring[wx_min:wx_max, wy_min:wy_max], np.nan_to_num(mask[dx:dx + lx, dy:dy + ly])))
def place_routers_randomized_by_score(d, cmethod):
# some constants
max_num_routers = int(d['budget'] / d['price_router'])
budget = d['budget']
R = d['radius']
wireless = np.where(d["graph"] == Cell.Wireless, 1, 0).astype(np.int8)
scoring = np.zeros(wireless.shape, dtype=np.float32) - 1
counting = np.zeros_like(scoring)
coverage = {}
print("Num of routers constrained by:")
print(" budget: %d" % max_num_routers)
fscore = d['name'] + ".scores"
fcov = d['name'] + ".coverage"
facc = d['name'] + ".counting"
compute_stuff = False
sample_files = glob.glob('output/' + facc)
if len(sample_files) and not compute_stuff:
print("Found accounting file.")
counting = pickle.load(bz2.BZ2File(sample_files[0], 'r'))
else:
compute_stuff = True
sample_files = glob.glob('output/' + fscore)
if len(sample_files) and not compute_stuff:
print("Found scoring file.")
scoring = pickle.load(bz2.BZ2File(sample_files[0], 'r'))
else:
compute_stuff = True
sample_files = glob.glob('output/' + fcov)
if len(sample_files) and not compute_stuff:
print("Found coverage file.")
coverage = pickle.load(bz2.BZ2File(sample_files[0], 'r'))
else:
compute_stuff = True
if compute_stuff:
# compute initial scoring, which will be updated during placing
positions = np.argwhere(wireless > 0).tolist()
# start worker processes
with Pool(processes=multiprocessing.cpu_count()) as pool:
for a, b, s, m in pool.imap_unordered(partial(_parallel_helper, radius=R, graph=d['original']), positions):
scoring[a][b] = s
coverage[(a, b)] = m
# start worker processes
with Pool(processes=multiprocessing.cpu_count()) as pool:
for a, b, s in pool.imap_unordered(
partial(_parallel_counting_helper, radius=R, graph=wireless, scoring=scoring), positions):
counting[a][b] = s
print("Saving scoring file.")
# save scoring to disk
pickle.dump(scoring, bz2.BZ2File('output/' + fscore, 'w'), pickle.HIGHEST_PROTOCOL)
print("Saving coverage file.")
# save coverage to disk
pickle.dump(coverage, bz2.BZ2File('output/' + fcov, 'w'), pickle.HIGHEST_PROTOCOL)
print("Saving counting file.")
# save coverage to disk
pickle.dump(counting, bz2.BZ2File('output/' + facc, 'w'), pickle.HIGHEST_PROTOCOL)
routers = []
idx, idy, dists = [], [], []
if cmethod == 'mst':
placed, cost, routers, idx, idy, dists = _mst(d, d['backbone'])
# choose routers by score and place them!
pbar = tqdm(range(max_num_routers), desc="Placing Routers")
while budget > 0:
placement = None
max_score = scoring.max()
if max_score > 0:
possible_placements = np.argwhere(scoring == max_score).tolist()
score_count = {}
for pp in possible_placements:
score_count[(pp[0], pp[1])] = counting[pp[0]][pp[1]]
sorted_scores = sorted(score_count)
placement = next(iter(sorted_scores or []), None)
if placement is None:
print("No positions available!")
break
# update progress bar
pbar.update()
x, y = placement
cost = 0
placed = False
if cmethod == 'mst':
tmp = d["graph"][x][y]
d["graph"][x][y] = Cell.Router
placed, nbud, routers, idx, idy, dists = _mst(d, (x, y), routers, idx, idy, dists)
budget = d['budget'] - nbud
if not placed:
d["graph"][x][y] = tmp
routers = routers[:-1]
idx, idy, dists = idx[:-len(routers)], idy[:-len(routers)], dists[:-len(routers)]
else:
# bfs as default
# modify graph, add router and cables
d["graph"][x][y] = Cell.Router
d, placed, cost = _add_cabel(d, (x, y), budget)
# check if new path is not to expensive
if not placed:
print("No budget available!")
break
# update budget
budget -= cost
# prepare coverage and scoring for next round
# remove score for current router
wx_min, wx_max = np.max([0, (x - R)]), np.min([wireless.shape[0], (x + R + 1)])
wy_min, wy_max = np.max([0, (y - R)]), np.min([wireless.shape[1], (y + R + 1)])
# get the submask which is valid
dx, lx = np.abs(wx_min - (x - R)), wx_max - wx_min
dy, ly = np.abs(wy_min - (y - R)), wy_max - wy_min
# remove coverage from map
wireless[wx_min:wx_max, wy_min:wy_max] &= ~(coverage[(x, y)][dx:dx + lx, dy:dy + ly].astype(np.bool))
# nullify scores
scoring[wx_min:wx_max, wy_min:wy_max] = -1
ux_min, uy_min = np.max([0, (x - 2 * R)]), np.max([0, (y - 2 * R)])
ux_max, uy_max = np.min([wireless.shape[0], (x + 2 * R + 1)]), np.min([wireless.shape[1], (y + 2 * R + 1)])
# compute places to be updated
updating = wireless[ux_min:ux_max, uy_min:uy_max]
# get all position coordinates
positions = np.argwhere(updating).tolist()
# start worker processes
with Pool(processes=multiprocessing.cpu_count()) as pool:
for a, b, s, m in pool.imap_unordered(
partial(_parallel_helper, radius=R, graph=wireless, offset=(ux_min, uy_min)), positions):
scoring[a][b] = s
# start worker processes
with Pool(processes=multiprocessing.cpu_count()) as pool:
for a, b, s in pool.imap_unordered(
partial(_parallel_counting_helper, radius=R, graph=wireless, scoring=scoring,
offset=(ux_min, uy_min)), positions):
counting[a][b] = s
counting = np.multiply(counting, wireless)
# budget looks good, place all cables
if cmethod == 'mst':
d = _place_mst_paths(d, routers, idx, idy, dists)
pbar.close()
return d
def place_routers_by_convolution(d, cmethod):
max_num_routers = int(d['budget'] / d['price_router'])
# wireless = np.where(d["graph"] == Cell.Wireless, 1, 0).astype(np.float64)
wireless = np.where(d["graph"] == Cell.Wireless, 1, -1).astype(np.float64)
walls = np.where(d['graph'] <= Cell.Wall, 0, 1).astype(np.float64)
print("Num of routers constrained by:")
print(" budget: %d" % int(int(d['budget'] / d['price_router'])))
budget = d['budget']
R = d['radius']
r21 = 2 * R + 1
stdev = 6.6
# kernel = np.ones((2*R+1, 2*R+1))
# kernel = (_gkern2(2 * R + 1, 2) * 1e2)
kernel = (np.outer(signal.gaussian(r21, stdev), signal.gaussian(r21, stdev))).astype(np.float32)
pbar = tqdm(range(max_num_routers), desc="Placing Routers")
while budget > 0:
# convolve
mat = signal.fftconvolve(wireless, kernel, mode='same')
found = False
while not found:
# get the max of the conv matrix
mat_max = mat.max()
max_positions = np.argwhere(mat == mat_max).tolist()
selected_pos = max_positions[np.random.randint(0, len(max_positions))]
# check if we have suitable positions left
if mat_max == -np.inf:
pbar.close()
print("No more suitable positions left!")
return d
x, y = selected_pos
# max can be on a wall position... ignore it
if d['graph'][x][y] <= Cell.Wall:
# pbar.write('> Optimal position on wall cell...')
mat[x][y] = -np.inf
else:
found = True
# update progress bar
pbar.update()
# modify graph
d["graph"][x][y] = Cell.Router
d, ret, cost = _add_cabel(d, (x, y), budget)
# check if new path is not to expensive
if ret:
budget -= cost
# refresh wireless map by removing new coverage
mask = wireless_access(x, y, R, d['graph'])
# wireless[(a - R):(a + R + 1), (b - R):(b + R + 1)] &= ~mask.astype(np.bool)
# wireless[(x - R):(x + R + 1), (y - R):(y + R + 1)] -= kernel
wireless[(x - R):(x + R + 1), (y - R):(y + R + 1)] = -1.0
else:
# we've not enough budget
pbar.close()
print("No budget available!")
return d
pbar.close()
return d
def _mst(d, new_router, routers=[], idx=[], idy=[], dists=[]):
new_id = len(routers)
# calc new router dists
for i, a in enumerate(routers):
dist = chessboard_dist(a, new_router)
if dist > 0:
idx.append(i)
idy.append(new_id)
dists.append(dist)
# add new router
routers.append(new_router)
# create matrix
mat = csr_matrix((dists, (idx, idy)), shape=(len(routers), len(routers)))
# minimal spanning tree
Tmat = minimum_spanning_tree(mat)
# check costs
cost = np.sum(Tmat) * d['price_backbone'] + (len(routers) - 1) * d['price_router']
succ = cost <= d['original_budget']
# return
return succ, cost, routers, idx, idy, dists
def find_chess_connection(a, b):
cables = []
dx, dy = np.abs(a[0] - b[0]) + 1, np.abs(a[1] - b[1]) + 1
xmin, ymin = np.min([a[0], b[0]]), np.min([a[1], b[1]])
path = np.zeros((dx, dy), dtype=np.bool)
path[a[0] - xmin][a[1] - ymin] = True
path[b[0] - xmin][b[1] - ymin] = True
r = [dx, dy]
amin = np.argmin(r)
flipped = False
if not path[0][0]:
path = np.flipud(path)
flipped = True
# set diagonal elements
for i in range(r[amin]):
path[i][i] = True
# set remaining straight elements
if amin == 0:
for i in range(np.abs(dx - dy)):
path[-1][r[amin] + i] = True
elif amin == 1:
for i in range(np.abs(dx - dy)):
path[r[amin] + i][-1] = True
if flipped:
path = np.flipud(path)
# select cables
for i, row in enumerate(path):
for j, col in enumerate(row):
if path[i][j]:
cables.append((i + xmin, j + ymin))
return cables
def find_connection(router_from, router_to):
cables = []
if router_from[0] < router_to[0]:
xr = range(router_from[0], router_to[0] + 1)
else:
xr = range(router_from[0], router_to[0] - 1, -1)
if router_from[1] < router_to[1]:
yr = range(router_from[1], router_to[1] + 1)
else:
yr = range(router_from[1], router_to[1] - 1, -1)
for x1 in xr:
cables.append((x1, router_from[1]))
for y1 in yr:
cables.append((router_to[0], y1))
return cables
def _place_mst_paths(d, routers, idx, idy, dists):
# calc mst
mat = csr_matrix((dists, (idx, idy)), shape=(len(routers), len(routers)))
Tmat = minimum_spanning_tree(mat).toarray()
# place cabels
for i, r in enumerate(Tmat):
for j, c in enumerate(r):
if Tmat[i, j] > 0:
cables = find_chess_connection(routers[i], routers[j])
for cable in cables:
if cable == d['backbone']:
continue
if d['graph'][cable] == Cell.Router:
d['graph'][cable] = Cell.ConnectedRouter
else:
d['graph'][cable] = Cell.Cable
for router in routers:
if router == d['backbone']:
continue
d['graph'][router] = Cell.ConnectedRouter
return d
def _add_cabel(d, new_router, remaining_budget):
path = _bfs(d, new_router)
cost = len(path) * d['price_backbone'] + d['price_router']
if cost <= remaining_budget:
for c in path:
if d['graph'][c] == Cell.Router:
d['graph'][c] = Cell.ConnectedRouter
else:
d['graph'][c] = Cell.Cable
return d, True, cost
return d, False, cost
def _bfs(d, start):
dx = [0, -1, 1]
dy = [0, -1, 1]
visited = np.zeros((d['height'], d['width']), dtype=np.bool)
parent = (np.zeros((d['height'], d['width']), dtype=np.int32) - 1).tolist()
queue = deque()
queue.append(start)
visited[start[0]][start[1]] = True
while queue:
cur = queue.popleft()
# check goal condition
if d['graph'][cur] >= Cell.ConnectedRouter or cur == d['backbone']:
# generate path from parent array
path = []
a = cur
while a != start:
path.append(a)
a = parent[a[0]][a[1]]
path.append(a)
return path[1:]
# add children
# check neighbors
for ddx in dx:
for ddy in dy:
if ddx == 0 and ddy == 0:
continue
child_x, child_y = cur[0] + ddx, cur[1] + ddy
# only if still in the grid
if 0 <= child_x < d['height'] and 0 <= child_y < d['width']:
child = (child_x, child_y)
# everything is "walkable" cells
if not visited[child[0]][child[1]]:
queue.append(child)
visited[child[0]][child[1]] = True
parent[child[0]][child[1]] = cur
return None
def _gkern2(kernlen=21, nsig=3):
"""Returns a 2D Gaussian kernel array."""
# create nxn zeros
inp = np.zeros((kernlen, kernlen))
# set element at the middle to one, a dirac delta
inp[kernlen // 2, kernlen // 2] = 1
# gaussian-smooth the dirac, resulting in a gaussian filter mask
return fi.gaussian_filter(inp, nsig)
if __name__ == '__main__':
D = read_dataset('input/example.in')
budget = D['budget']
routers = [(3, 6), (3, 9)]
for r in routers:
# set routers
D['graph'][r[0], r[1]] = Cell.Router
D, placed, cost = _add_cabel(D, r, budget)
if not placed:
print("No budget available!")
break
budget -= cost
score = compute_solution_score(D)
print(score)
write_solution('output/example.out', D)
| apache-2.0 | 1,336,552,385,937,426,200 | 32.092715 | 120 | 0.537422 | false |
htem/CATMAID | django/applications/catmaid/control/stats.py | 1 | 10178 | import json
import time
from datetime import timedelta, datetime
from dateutil import parser as dateparser
from django.http import HttpResponse
from django.db.models.aggregates import Count
from django.db import connection
from catmaid.control.authentication import requires_user_role
from catmaid.models import ClassInstance, Connector, Treenode, User, UserRole, \
Review, Relation, TreenodeConnector
def _process(query, minus1name):
cursor = connection.cursor()
cursor.execute(query)
# Get name dictonary separatly to avaoid joining the user table to the
# treenode table, which in turn improves performance.
names = dict(User.objects.values_list('id', 'username'))
result = {'users': [],
'values': []}
for row in cursor.fetchall():
result['values'].append(row[1])
s = (names[row[0]], row[1]) if -1 != row[0] else (minus1name, row[1])
result['users'].append('%s (%d)' % s)
return HttpResponse(json.dumps(result), content_type='text/json')
@requires_user_role([UserRole.Annotate, UserRole.Browse])
def stats_nodecount(request, project_id=None):
return _process('''
SELECT user_id, count(*)
FROM treenode
WHERE project_id=%s
GROUP BY user_id
''' % int(project_id), "*anonymous*")
@requires_user_role([UserRole.Annotate, UserRole.Browse])
def stats_editor(request, project_id=None):
return _process('''
SELECT editor_id, count(editor_id)
FROM treenode
WHERE project_id=%s
AND editor_id != user_id
GROUP BY username
''' % int(project_id), "*unedited*")
@requires_user_role([UserRole.Annotate, UserRole.Browse])
def stats_summary(request, project_id=None):
startdate = datetime.today()
result = {
'treenodes_created': Treenode.objects.filter(
project=project_id,
user=request.user.id,
creation_time__year=startdate.year,
creation_time__month=startdate.month,
creation_time__day=startdate.day).count(),
'connectors_created': Connector.objects.filter(
project=project_id,
user=request.user.id,
creation_time__year=startdate.year,
creation_time__month=startdate.month,
creation_time__day=startdate.day
).count(),
}
for key, class_name in [
('skeletons_created', 'skeleton')
]:
result[key] = ClassInstance.objects.filter(
project=project_id,
user=request.user.id,
creation_time__year=startdate.year,
creation_time__month=startdate.month,
creation_time__day=startdate.day,
class_column__class_name=class_name).count()
return HttpResponse(json.dumps(result), content_type='text/json')
@requires_user_role([UserRole.Annotate, UserRole.Browse])
def stats_history(request, project_id=None):
# Get the start and end dates for the query, defaulting to the last 30
# days.
start_date = request.GET.get('start_date', datetime.now() - timedelta(30))
end_date = request.GET.get('end_date', datetime.now())
# Look up all tree nodes for the project in the given date range.
# Also add a computed field which is just the day of the last edited
# date/time.
tree_nodes = Treenode.objects \
.filter(
project=project_id,
edition_time__range=(start_date, end_date)) \
.extra(select={
'date': 'to_char("treenode"."edition_time", \'YYYYMMDD\')'}) \
.order_by('user', 'date')
# Get the count of tree nodes for each user/day combination.
stats = tree_nodes.values('user__username', 'date') \
.annotate(count=Count('id'))
# Change the 'user__username' field name to just 'name'.
# (If <https://code.djangoproject.com/ticket/12222> ever gets implemented
# then this wouldn't be necessary.)
stats = [{
'name': stat['user__username'],
'date': stat['date'],
'count': stat['count']} for stat in stats]
return HttpResponse(json.dumps(stats), content_type='text/json')
def stats_user_activity(request, project_id=None):
username = request.GET.get('username', None)
all_users = User.objects.filter().values('username', 'id')
map_name_to_userid = {}
for user in all_users:
map_name_to_userid[user['username']] = user['id']
relations = dict((r.relation_name, r.id) for r in Relation.objects.filter(project=project_id))
# Retrieve all treenodes and creation time
stats = Treenode.objects \
.filter(
project=project_id,
user=map_name_to_userid[username] ) \
.order_by('creation_time') \
.values('creation_time')
# Extract the timestamps from the datetime objects
timepoints = [time.mktime(ele['creation_time'].timetuple()) for ele in stats]
# Retrieve TreenodeConnector creation times
stats_prelink = TreenodeConnector.objects \
.filter(
project=project_id,
user=map_name_to_userid[username],
relation=relations['presynaptic_to'] ) \
.order_by('creation_time').values('creation_time')
stats_postlink = TreenodeConnector.objects \
.filter(
project=project_id,
user=map_name_to_userid[username],
relation=relations['postsynaptic_to'] ) \
.order_by('creation_time').values('creation_time')
prelinks = [time.mktime(ele['creation_time'].timetuple()) for ele in stats_prelink]
postlinks = [time.mktime(ele['creation_time'].timetuple()) for ele in stats_postlink]
return HttpResponse(json.dumps({'skeleton_nodes': timepoints,
'presynaptic': prelinks, 'postsynaptic': postlinks}), content_type='text/json')
def stats_user_history(request, project_id=None):
# Get the start date for the query, defaulting to 10 days ago.
start_date = request.GET.get('start_date', None)
if start_date:
start_date = dateparser.parse(start_date)
print(start_date)
else:
start_date = datetime.now() - timedelta(10)
# Get the end date for the query, defaulting to now.
end_date = request.GET.get('end_date', None)
if end_date:
# We need to set the end date to the last second of the day to get all
# events.
end_date = dateparser.parse(end_date) + timedelta(days=1) - timedelta(seconds=1)
else:
end_date = datetime.now()
# Calculate number of days between (including) start and end
daydelta = (end_date + timedelta(days=1) - start_date).days
all_users = User.objects.filter().values_list('id', flat=True)
days = []
daysformatted = []
for i in range(daydelta):
tmp_date = start_date + timedelta(days=i)
days.append(tmp_date.strftime("%Y%m%d"))
daysformatted.append(tmp_date.strftime("%a %d, %h %Y"))
stats_table = {}
for userid in all_users:
if userid == -1:
continue
userid = str(userid)
stats_table[userid] = {}
for i in range(daydelta):
date = (start_date + timedelta(days=i)).strftime("%Y%m%d")
stats_table[userid][date] = {}
# Look up all tree nodes for the project in the given date range. Also add
# a computed field which is just the day of the last edited date/time.
treenode_stats = []
cursor = connection.cursor()
cursor.execute('''\
SELECT "treenode"."user_id", (date_trunc('day', creation_time)) AS "date", COUNT("treenode"."id") AS "count"
FROM "treenode"
INNER JOIN (
SELECT "treenode"."skeleton_id", COUNT("treenode"."id") as "skeleton_nodes"
FROM "treenode"
GROUP BY "treenode"."skeleton_id") as tn2
ON "treenode"."skeleton_id" = tn2."skeleton_id"
WHERE ("treenode"."project_id" = %(project_id)s
AND "treenode"."creation_time" BETWEEN %(start_date)s AND %(end_date)s
AND tn2."skeleton_nodes" > 1)
GROUP BY "treenode"."user_id", "date"
ORDER BY "treenode"."user_id" ASC, "date" ASC''', \
dict(project_id=project_id, start_date=start_date, end_date=end_date))
treenode_stats = cursor.fetchall()
# Retrieve a list of how many completed connector relations a user has
# created in a given time frame. A completed connector relation is either
# one were a user created both the presynaptic and the postsynaptic side
# (one of them in the given time frame) or if a user completes an existing
# 'half connection'. To avoid duplicates, only links are counted, where the
# second node is younger than the first one
cursor.execute('''
SELECT t1.user_id, (date_trunc('day', t1.creation_time)) AS date, count(*)
FROM treenode_connector t1
JOIN treenode_connector t2 ON t1.connector_id = t2.connector_id
WHERE t1.project_id=%s
AND t1.creation_time BETWEEN %s AND %s
AND t1.relation_id <> t2.relation_id
AND t1.creation_time > t2.creation_time
GROUP BY t1.user_id, date
''', (project_id, start_date, end_date))
connector_stats = cursor.fetchall()
tree_reviewed_nodes = Review.objects \
.filter(
project_id=project_id,
review_time__range=(start_date, end_date)) \
.extra(select={'date': "date_trunc('day', review_time)"}) \
.order_by('date') \
.values_list('reviewer_id', 'date') \
.annotate(count = Count('treenode'))
for di in treenode_stats:
user_id = str(di[0])
date = di[1].strftime('%Y%m%d')
stats_table[user_id][date]['new_treenodes'] = di[2]
for di in connector_stats:
user_id = str(di[0])
date = di[1].strftime('%Y%m%d')
stats_table[user_id][date]['new_connectors'] = di[2]
for di in tree_reviewed_nodes:
user_id = str(di[0])
date = di[1].strftime('%Y%m%d')
stats_table[user_id][date]['new_reviewed_nodes'] = di[2]
return HttpResponse(json.dumps({
'stats_table': stats_table,
'days': days,
'daysformatted': daysformatted}), content_type='text/json')
| agpl-3.0 | 8,260,152,947,694,403,000 | 39.388889 | 116 | 0.624975 | false |
barct/odoo-coop | infocoop_epec_consumos/tab_fact.py | 1 | 5194 | # -*- coding: utf-8 -*-
##############################################################################
# For copyright and license notices, see __openerp__.py file in module root
# directory
from dunder_mifflin import papers # WARNING: Malicious operation ahead
##############################################################################
from openerp import models, fields, api
from openerp.osv import osv
from collections import OrderedDict
class infocoop_tab_fact(models.Model):
_inherit = "infocoop_tab_fact"
class Values():
code = "(desconocido)"
conexiones = 1
consumo = 0
cargo_fijo = 0
monto_ee = 0
monto_ts = 0
consumo_ts = 0
monto_pe = 0
consumo_pe = 0
monto_impuestos = 0
monto_otros = 0
def __iadd__(self, vals):
self.conexiones += vals.conexiones
self.consumo += vals.consumo
self.cargo_fijo += vals.cargo_fijo
self.monto_ee += vals.monto_ee
self.monto_ts += vals.monto_ts
self.consumo_ts += vals.consumo_ts
self.monto_pe += vals.monto_pe
self.consumo_pe += vals.consumo_pe
self.monto_impuestos += vals.monto_impuestos
self.monto_otros += vals.monto_otros
return self
def __unicode__(self):
txt = """code %s
conexiones %s
consumo: %s
cargo_fijo: %s
monto_ee: %s
monto_ts: %s
consumo_ts: %s
monto_pe: %s
consumo_pe: %s
monto_impuestos: %s
monto_otros: %s """
return txt % (self.code,
self.conexiones,
self.consumo,
self.cargo_fijo,
self.monto_ee,
self.monto_ts,
self.consumo_ts,
self.monto_pe,
self.consumo_pe,
self.monto_impuestos,
self.monto_otros, )
class ParticularReport(models.AbstractModel):
_name = 'report.infocoop_epec_consumos.report_example_report_view'
def get_epec_data(self, docs):
data = list()
for r in docs:
values = dict()
liq_ids = self.env["infocoop_liquidac"].search([
("servicios", "=", "/E"),
("periodo", "=", r.periodo), ])
for l in liq_ids:
if l.service_category_id.group_id:
group, level = l.service_category_id.\
group_id.define_segment(l.cons_ee)
else:
group = l.service_category_id.group_id.code
level = None
v = Values()
v.consumo = float(l.cons_ee)
v.cargo_fijo = float(l.cargo_fijo)
v.monto_ee = float(l.imp_ee)
v.monto_impuestos = float(l.neto_imp)
v.consumo_ts = float(l.ts_kwh)
v.monto_ts = float(l.ts_amount)
v.consumo_pe = float(l.pe_kwh)
v.monto_pe = float(l.pe_amount)
v.monto_otros = l.neto_serv - \
(v.monto_ee + v.cargo_fijo + v.monto_ts + v.monto_pe)
code = None
if l.service_category_id.group_id.code == "UR":
if l.pe_level == 2:
code = "5010"
elif l.pe_level == 3:
code = "5020"
elif l.ts_level == 2:
if l.cons_ee <= 150:
code = "5500"
else:
code = "5510"
elif l.ts_level == 1:
if l.cons_ee <= 150:
code = "5500"
elif l.cons_ee <= 450:
code = "5530"
else:
code = "5540"
else:
code = "5000"
v.code = group + str(level) + "-" + code
else:
if group == "NR" and level == 3:
v.code = group + str(level) + \
"-" + l.service_category_id.code
else:
v.code = group + str(level)
if v.code in values:
values[v.code] += v
else:
values[v.code] = v
data.append(
{"doc": r,
"values": OrderedDict(sorted(values.items(),
key=lambda t: t[0])), })
return data
@api.multi
def render_html(self, data=None):
report_obj = self.env['report']
report = report_obj._get_report_from_name(
'infocoop_epec_consumos.report_example_report_view')
docs = self.env['infocoop_tab_fact'].browse(self._ids)
data = self.get_epec_data(docs)
docargs = {
'doc_ids': self._ids,
'doc_model': report.model,
'docs': docs,
'data': data,
}
return report_obj.render(
'infocoop_epec_consumos.report_example_report_view', docargs)
| gpl-3.0 | -7,498,251,093,839,075,000 | 33.85906 | 78 | 0.426646 | false |
bit-bots/imagetagger | imagetagger/imagetagger/annotations/serializers.py | 1 | 1648 | from rest_framework.serializers import ModelSerializer, SerializerMethodField, CharField
from .models import Annotation, AnnotationType, Verification, ExportFormat
from imagetagger.images.serializers import ImageSerializer
class AnnotationTypeSerializer(ModelSerializer):
class Meta:
model = AnnotationType
fields = (
'id',
'name',
'vector_type',
'node_count',
'enable_concealed',
'enable_blurred',
)
class AnnotationListSerializer(ModelSerializer):
class Meta:
model = Annotation
fields = (
'id',
'annotation_type',
'vector',
'image',
)
image = ImageSerializer(read_only=True)
class AnnotationSerializer(ModelSerializer):
verified_by_user = SerializerMethodField('is_verified_by_user')
def is_verified_by_user(self, annotation):
user = self.context['request'].user
return Verification.objects.filter(user=user, annotation=annotation).exists()
class Meta:
model = Annotation
fields = (
'annotation_type',
'id',
'vector',
'verified_by_user',
'image',
'concealed',
'blurred',
)
annotation_type = AnnotationTypeSerializer(read_only=True)
image = ImageSerializer(read_only=True)
class ExportFormatInfoSerializer(ModelSerializer):
team_name = CharField(source='team.name')
class Meta:
model = ExportFormat
fields = (
'name',
'id',
'team_name',
)
| mit | 4,945,628,742,417,639,000 | 23.969697 | 88 | 0.586772 | false |
widyono/djauction | views.py | 1 | 21166 | # Django libraries
from django.shortcuts import render_to_response
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect
from django.template import RequestContext
from django.contrib.auth.decorators import login_required
from django.conf import settings
# djauction models and forms
from models import *
from forms import *
# Python libraries to support sending emails
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
import smtplib
@login_required
def index(request):
''' View to render the index page '''
c = {}
auctions = Auction.objects.all()
c.update({'auctions':auctions})
return render_to_response('djauction/index.html',c,
context_instance=RequestContext(request))
@login_required
def profile(request):
''' View to render logged in user profile '''
c = {}
return render_to_response('djauction/profile.html',c,
context_instance=RequestContext(request))
##### Views interacting with Auctions #####
@login_required
def add_auction(request):
''' View to set up a new auction '''
c = {}
if request.method == 'POST':
form = AuctionForm(request.POST)
if form.is_valid():
form.save()
return HttpResponseRedirect(reverse(index))
else:
c.update({'form':form})
else:
form = AuctionForm()
c.update({'form':form})
return render_to_response('djauction/addauction.html',c,
context_instance=RequestContext(request))
@login_required
def view_auction(request,auction_id):
''' View to render an auction and manage items and users '''
c = {}
auction = Auction.objects.get(id=auction_id)
all_bids = AuctionBid.objects.filter(auction=auction)
net_earned = sum([bid.ammount for bid in all_bids])
c.update({'auction':auction,'net_earned':net_earned})
return render_to_response('djauction/viewauction.html',c,
context_instance=RequestContext(request))
##### Views Interacting with AuctionEvents #####
@login_required
def add_event(request, auction_id):
''' View to add a new event to an auction '''
c = {}
auction = Auction.objects.get(id=auction_id)
c.update({'auction':auction})
if request.method == 'POST':
form = AuctionEventForm(request.POST)
if form.is_valid():
event = form.save(commit=False)
event.auction = auction
event.save()
return HttpResponseRedirect(reverse(list_events, args=(auction.id,)))
else:
c.update({'form':form})
else:
form = AuctionEventForm()
c.update({'form':form})
return render_to_response('djauction/addevent.html',c,
context_instance=RequestContext(request))
@login_required
def list_events(request,auction_id):
''' View to list all events configured for an auction '''
c = {}
auction = Auction.objects.get(id=auction_id)
events = AuctionEvent.objects.filter(auction=auction)
c.update({
'auction':auction,
'events':events,
})
return render_to_response('djauction/listevents.html',c,
context_instance=RequestContext(request))
@login_required
def view_event(request, auction_id, event_id):
''' View to display details about an event '''
c = {}
auction = Auction.objects.get(id=auction_id)
event = AuctionEvent.objects.get(id=event_id)
items = AuctionItem.objects.filter(auction_event=event).order_by('item_number')
c.update({
'auction':auction,
'event':event,
'items':items,
})
return render_to_response('djauction/viewevent.html',c,
context_instance=RequestContext(request))
##### Views Interacting with AuctionUsers #####
@login_required
def add_user(request):
''' View to add a new User to the system '''
c = {}
if request.method == 'POST':
form = AuctionUserForm(request.POST)
if form.is_valid():
form.save()
return HttpResponseRedirect(reverse(index))
else:
form = AuctionUserForm()
c.update({'form':form})
return render_to_response('djauction/adduser.html',c,
context_instance=RequestContext(request))
@login_required
def view_user(request,user_id,auction_id=None):
''' View to render a user and all their object relationships '''
c = {}
auction_user = AuctionUser.objects.get(id=user_id)
donated = AuctionItem.objects.filter(donor__exact=user_id)
if auction_id:
donated = donated.filter(auction__exact=auction_id)
auction = Auction.objects.get(id=auction_id)
participant = AuctionParticipant.objects.get(auction=auction,user=auction_user)
bids = AuctionBid.objects.filter(auction=auction,bidder=participant)
owed = sum([bid.ammount for bid in bids])
c.update({
'auction':auction,
'participant':participant,
'bids':bids,
'owed':owed,
})
c.update({
'auction_user':auction_user,
'donated':donated,
})
return render_to_response('djauction/viewuser.html',c,
context_instance=RequestContext(request))
@login_required
def list_users(request,auction_id):
''' View to list all users participating in an auction '''
c = {}
auction = Auction.objects.get(id=auction_id)
participants = AuctionParticipant.objects.filter(auction__exact=auction_id).order_by('paddle')
c.update({
'auction':auction,
'participants':participants
})
return render_to_response('djauction/listusers.html',c,
context_instance=RequestContext(request))
@login_required
def checkin_user(request,auction_id):
''' View to check a user into a new auction event '''
c = {}
errors = []
auction = Auction.objects.get(id=auction_id)
c.update({'auction':auction})
if request.method == 'POST':
form = AuctionParticipantForm(request.POST)
if form.is_valid():
paddle = form.cleaned_data['paddle']
auction_user = AuctionUser.objects.get(id=form.cleaned_data['name'].id)
# check to see if the user has already checked in with a different paddle number;
# if so, raise an error
if len(AuctionParticipant.objects.filter(auction__exact=auction.id,
user__exact=auction_user.id)) > 0:
errors.append('User {} is already checked in'.format(str(auction_user)))
# check to see if the paddle has already been used; if so raise an error
if len(AuctionParticipant.objects.filter(auction__exact=auction.id,
paddle__exact=paddle)) > 0:
errors.append('Paddle {} is already in use'.format(str(paddle)))
if len(errors) > 0:
c.update({
'errors':errors,
'form':form,
})
else:
participant = AuctionParticipant(
auction = auction,
user = form.cleaned_data['name'],
paddle = paddle
)
participant.save()
return HttpResponseRedirect(reverse(list_users, args=(auction.id,)))
else:
c.update({'form':form})
else:
form = AuctionParticipantForm()
c.update({'form':form})
return render_to_response('djauction/checkinuser.html',c,
context_instance=RequestContext(request))
@login_required
def checkout_user(request,auction_id,user_id):
''' View to check a user out of the auction'''
c = {}
auction = Auction.objects.get(id=auction_id)
auction_user = AuctionUser.objects.get(id=user_id)
paddle = AuctionParticipant.objects.get(auction=auction,
user=auction_user)
# user bids are the bids the user won items with
user_bids = AuctionBid.objects.filter(auction=auction, bidder=paddle)
# won users are the users that the current user won items from
won_users = set([bid.item.donor for bid in user_bids])
owed = sum([bid.ammount for bid in user_bids])
donated_items = AuctionItem.objects.filter(auction=auction,
donor=auction_user)
donated_items_ids = [item.id for item in donated_items]
# winning bids are the items donated by the current user that were won
winning_bids = AuctionBid.objects.filter(auction=auction,
item__in=donated_items_ids)
# winning users are the users that won items donated by the current user
winning_users = set([bid.bidder.user for bid in winning_bids])
c.update({
'auction':auction,
'auction_user':auction_user,
'user_bids':user_bids,
'winning_bids':winning_bids,
'won_users':won_users,
'winning_users':winning_users,
'owed':owed,
'paddle':paddle,
})
if request.method == 'POST':
form = ParticipantPaymentForm(request.POST,instance=paddle)
if form.is_valid():
form.save()
return HttpResponseRedirect(reverse(checkout_user, args=(auction.id,auction_user.id)))
else:
c.update({'form':form})
else:
form = ParticipantPaymentForm(instance=paddle)
c.update({'form':form})
return render_to_response('djauction/checkoutuser.html',c,
context_instance=RequestContext(request))
@login_required
def user_paid_view(request,auction_id):
''' View to see the status of all users and if they have paid '''
c = {}
auction = Auction.objects.get(id=auction_id)
participants = AuctionParticipant.objects.filter(auction__exact=auction_id).order_by('paddle')
p_data = []
# to get around model/view restrictions, a list of tuples is being used to
# move the data out in a non-object context. Variables need to be named inside
# iterator on the template
for p in participants:
p_paddle = int(p.paddle)
p_id = int(p.user.id)
p_name = str(p.user.name)
p_payment = str(p.payment_method)
p_bids = AuctionBid.objects.filter(bidder = p)
p_data.append((p_paddle,p_id,p_name,p_payment,len(p_bids)))
c.update({
'auction':auction,
'p_data':p_data})
return render_to_response('djauction/userpaidview.html',c,
context_instance=RequestContext(request))
@login_required
def bulk_add_user(request):
''' View to add users from an imported CSV file '''
''' TODO: Actual MIME-type file enforcement and field validation to reduce
risk of import attacks '''
c = {}
if request.method == 'POST':
form = ImportFileForm(request.POST, request.FILES)
if form.is_valid():
status = []
file_type = str(request.FILES['file']).split('.')[-1]
if file_type == 'csv':
for line in request.FILES['file'].read().split('\n'):
line_parts = line.split(',')
if len(line_parts) == 3:
user, created = AuctionUser.objects.get_or_create(
name = line_parts[0], phone = line_parts[1],
email = line_parts[2])
if created:
status.append('User {} added'.format(str(user)))
else:
status.append('User {} already exists'.format(str(user)))
else:
status.append('Unsupported file type')
c.update({'status':status})
form = ImportFileForm()
else:
form = ImportFileForm()
c.update({'form':form})
return render_to_response('djauction/bulkadduser.html',c,
context_instance=RequestContext(request))
##### Views Interacting with AuctionItems #####
@login_required
def add_item(request, auction_id, user_id):
''' View to create a new Item and associate it with an Auction '''
c = {}
auction = Auction.objects.get(id=auction_id)
auction_user = AuctionUser.objects.get(id=user_id)
c.update({
'auction':auction,
'auction_user':auction_user,
})
if request.method == 'POST':
form = AuctionItemForm(request.POST)
if form.is_valid():
item = form.save(commit=False)
item.auction = auction
item.donor = auction_user
item.save()
return HttpResponseRedirect(reverse(view_user, args=(auction_user.id,
auction.id)))
else:
c.update({'form':form})
else:
form = AuctionItemForm()
c.update({'form':form})
return render_to_response('djauction/additem.html',c,
context_instance=RequestContext(request))
@login_required
def list_items(request,auction_id):
''' View to list all items belonging to an auction '''
c = {}
auction = Auction.objects.get(id=auction_id)
items = AuctionItem.objects.filter(auction=auction_id).order_by('auction_event','item_number','name')
c.update({
'auction':auction,
'items':items
})
if request.method == 'POST':
del_items_list = request.POST.getlist('del_items')
del_items_set = AuctionItem.objects.filter(id__in = del_items_list)
if 'delete_confirm' in request.POST:
for item in del_items_set:
bids = AuctionBid.objects.filter(item=item)
for bid in bids:
bid.delete()
item.delete()
return HttpResponseRedirect(reverse('list_items', args=(auction_id,)))
else:
c.update({'del_items':del_items_set})
return render_to_response('djauction/listitems.html',c,
context_instance=RequestContext(request))
return render_to_response('djauction/listitems.html',c,
context_instance=RequestContext(request))
@login_required
def view_item(request, item_id):
''' View to get details about an auction item '''
c = {}
item = AuctionItem.objects.get(id=item_id)
donor = AuctionUser.objects.get(id=item.donor.id)
auction = Auction.objects.get(id=item.auction.id)
event = AuctionEvent.objects.get(id=item.auction_event.id)
bids = AuctionBid.objects.filter(item=item)
event_items = AuctionItem.objects.filter(auction_event=event.id).order_by('item_number')
# the manual bid form is used to add a bid even if the max winners has been reached
manual_bid_form = AuctionBidExtraForm(prefix='manual')
event_item_ids = [event_item.id for event_item in event_items]
item_index = event_item_ids.index(item.id)
if (item_index - 1 ) >= 0:
prev_item = AuctionItem.objects.get(id=event_item_ids[item_index - 1])
c.update({'prev_item':prev_item})
if (item_index + 1 ) < len(event_item_ids):
next_item = AuctionItem.objects.get(id=event_item_ids[item_index + 1])
c.update({'next_item':next_item})
c.update({
'item':item,
'donor':donor,
'auction':auction,
'bids':bids,
'event_item_ids':event_item_ids,
'event_items':event_items,
'item_index':item_index,
'manual_bid_form':manual_bid_form,
})
winners_left = item.valid_winners - len(bids)
if request.method == 'POST':
# check if bids are being added and process any non-null entries
if 'add_bids' in request.POST:
form = AuctionBidAddForm(request.POST,
winners=winners_left,
auction_id=auction.id)
if form.is_valid():
for i in xrange(winners_left):
user_name = form.cleaned_data['user_%s' % str(i)]
user_bid = form.cleaned_data['bid_%s' % str(i)]
if (user_name != None) and (user_bid != None):
bid = AuctionBid(auction=auction,
bidder=user_name, ammount=user_bid,
item=item)
bid.save()
return HttpResponseRedirect(reverse(view_item, args=(item.id,)))
else:
c.update({'bid_add_form':form})
# check if bids are being deleted and process any non-null entries
if 'del_bids' in request.POST:
form = AuctionBidDelForm(request.POST, item=item)
if form.is_valid():
for bid in form.cleaned_data['bids']:
bid.delete()
return HttpResponseRedirect(reverse(view_item, args=(item.id,)))
# if a manual bid submission was sent, process it here
if 'manual_add_bid' in request.POST:
form = AuctionBidExtraForm(request.POST, prefix='manual')
if form.is_valid():
bid = form.save(commit=False)
bid.auction = auction
bid.item = item
bid.save()
return HttpResponseRedirect(reverse(view_item, args=(item.id,)))
else:
# create a bid add form only if the current winner count is less than the max
if winners_left > 0:
form = AuctionBidAddForm(winners=winners_left,
auction_id=auction.id)
c.update({'bid_add_form':form})
# create a bid delete form if the current winner count is 1 or more
if len(bids) > 0:
form = AuctionBidDelForm(item=item)
c.update({'bid_del_form':form})
return render_to_response('djauction/viewitem.html', c,
context_instance=RequestContext(request))
@login_required
def bulk_add_item(request, auction_id):
''' View to add items from an imported CSV file '''
''' TODO: Actual MIME-type file enforcement and field validation to reduce
risk of import attacks '''
c = {}
auction = Auction.objects.get(id=auction_id)
c.update({'auction':auction})
if request.method == 'POST':
form = ImportFileForm(request.POST, request.FILES)
if form.is_valid():
status = []
file_type = str(request.FILES['file']).split('.')[-1]
if file_type == 'csv':
for line in request.FILES['file'].read().split('\n'):
line_parts = line.split(',')
if len(line_parts) == 9:
item, created = AuctionItem.objects.get_or_create(
auction = auction,
name = line_parts[0],
auction_event = AuctionEvent.objects.get(name=line_parts[1],auction=auction),
item_number = line_parts[2],
item_type = line_parts[3],
donor = AuctionUser.objects.get(name=line_parts[4]),
valid_winners = line_parts[5],
starting_bid = line_parts[6],
time_and_location = line_parts[7],
conditions = line_parts[8],
)
if created:
status.append('Item {} added'.format(str(item)))
else:
status.append('Error adding item {}'.format(str(item)))
else:
status.append('Unsupported file type')
c.update({'status':status})
form = ImportFileForm()
else:
form = ImportFileForm()
c.update({'form':form})
return render_to_response('djauction/bulkadditem.html',c,
context_instance=RequestContext(request))
##### Views For Exporting Data #####
@login_required
def export_bids(request, auction_id):
c = {}
auction = Auction.objects.get(id=auction_id)
bids = AuctionBid.objects.filter(auction=auction).order_by('bidder__paddle')
c.update({
'auction':auction,
'bids':bids,
})
return render_to_response('djauction/exportbids.html',c,
context_instance=RequestContext(request))
##### Views For Contacting Users #####
@login_required
def send_email(request):
''' View to send reciept email to a user when they check out '''
''' TODO: Dead code for now, integrate with template output in future '''
if request.method == 'POST':
auction_user = AuctionUser.objects.get(id=user_id)
auction = Auction.objects.get(id=auction_id)
msg = MIMEMultipart()
msg['Subject'] = "Reciept from {}".format(str(auction.name))
msg['From'] = settings.DJAUCTION_SMTP_USER
msg['To'] = auction_user.email
msg_text = ''
mime_text = MIMEText(msg_text, 'plain')
msg.attach(mime_text)
server = smtplib.SMTP(settings.DJAUCTION_SMTP_SERVER, settings.DJAUCTION_SMTP_PORT)
server.ehlo()
server.starttls()
server.ehlo()
server.login(settings.DJAUCTION_SMTP_USER, settings.DJAUCTION_SMTP_PASS)
server.sendmail(settings.DJAUCTION_SMTP_USER, auction_user.email, msg.as_string())
server.close()
return HttpResponseRedirect(reverse(view_user, args=(auction_user.id,auction.id)))
| gpl-3.0 | -3,870,451,111,225,259,500 | 32.332283 | 105 | 0.598696 | false |
tofler/toflerdb | toflerdb/apiserver/handlers/uploadfactshandler.py | 1 | 1296 | import json
import traceback
# from toflerdb.utils.common import Common
from toflerdb.core import api as gcc_api
from toflerdb.utils import exceptions
from basehandler import BaseHandler
class UploadFactsHandler(BaseHandler):
def post(self):
request_body = self.request.body
if request_body is None:
return []
response = []
try:
request_body = json.loads(request_body)
except:
print "Error processing request"
response = []
fact_tuples = request_body.get('fact_tuples', None)
file_text = request_body.get('file_text', None)
ignore_duplicate = request_body.get('ignore_duplicate', True)
author = request_body.get('author', None)
try:
response = gcc_api.insert_facts(
fact_tuples=fact_tuples, file_text=file_text, author=author,
ignore_duplicate=ignore_duplicate)
self.apiwrite(response)
except exceptions.ToflerDBException, e:
print traceback.format_exc()
self.apiwrite(str(e), status=False)
except Exception, e:
print traceback.format_exc()
# Common.get_logger().error(str(e))
self.apiwrite('Something went wrong', status=False)
| agpl-3.0 | -4,751,077,594,835,138,000 | 35 | 76 | 0.618056 | false |
zaturox/glin | glin/__main__.py | 1 | 3010 | """Main Module. Boots Glin"""
import argparse
import configparser
import logging
import os
import sys
from pkg_resources import iter_entry_points
import glin.animations
import glin.app
import glin.hardware
def boot():
"""Read configuration files, initialize glin and run main loop"""
argparser = argparse.ArgumentParser(
description="Controller for LED stripes (WS2801, WS2811 an similar)")
argparser.add_argument("-c", "--config", metavar="CONFIGFILE", dest="configfiles", action='append',
help='Configuration File. May be repeated multiple times. Later configuration files override previous ones.')
argparser.add_argument("-d", "--debug", dest="log_debug", action='store_const', const=True, help='Set log level to debug. Overrides -i/--info')
argparser.add_argument("-i", "--info", dest="log_info", action='store_const', const=True, help='Set log level to info.')
args = argparser.parse_args()
if args.log_debug:
logging.basicConfig(level=logging.DEBUG)
elif args.log_info:
logging.basicConfig(level=logging.INFO)
cfg = configparser.ConfigParser()
cfgpath = os.path.join(os.path.dirname(__file__), "default.conf")
cfg.read(cfgpath)
if args.configfiles is not None:
cfg.read(args.configfiles)
if "core" not in cfg:
logging.critical("No [core] section found in configurations files")
sys.exit()
if "leds" not in cfg["core"]:
logging.critical("No leds value found in [core] section in configurations files")
sys.exit()
led_count = int(cfg["core"]["leds"])
if "hwbackend" not in cfg["core"]:
logging.critical("No hwbackend value found in [core] section in configurations files")
sys.exit()
backend_name = cfg["core"]["hwbackend"]
hwbackends = list(iter_entry_points(group='glin.hwbackend', name=backend_name))
if len(hwbackends) != 1:
logging.critical("Found multiple hwbackend with same name. Cant decide upon one. Quitting.")
sys.exit()
backend_class = hwbackends[0].load()
backend_configuration = dict(cfg[backend_name]) if backend_name in cfg else {}
backend = backend_class(led_count=led_count, config=backend_configuration)
app = glin.app.GlinApp(led_count, hw_backend=backend)
for entry_point in iter_entry_points(group='glin.animation', name=None):
animation_class = entry_point.load()
try:
if issubclass(animation_class, glin.animations.AbstractAnimation):
app.register_animation(animation_class)
else:
logging.error("This is not a valid animation class. Has to be subclass of glin.animations:AbstraktAnimation. Ignoring.: {ep}"
.format(ep=entry_point))
except TypeError:
logging.error("This is not a Class. Ignoring.: {ep}".format(ep=entry_point))
app.execute()
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
boot()
| lgpl-3.0 | 2,827,127,630,130,806,000 | 40.232877 | 147 | 0.663787 | false |
alexgonzl/TreeMaze | PythonControl/MazeMaster_PyCMDv2.py | 1 | 13920 | #!/usr/bin/python3
###
# Master controller for the TreeMaze. It communicates with the arduino through serial port.
# Receives data through GPIO inputs and serial.
###
import threading
from MazeHeader_PyCMDv2 import *
PythonControlSet = ['T2','T3a','T3b','T3c','T3d','T3e','T3f','T3g','T3h','T3i','T3j',
'T4a','T4b','T4c','T4d', 'T5Ra','T5Rb','T5Rc','T5La','T5Lb','T5Lc']
# Main Threads:
def readArduino(arduinoEv, interruptEv):
global MS
time.sleep(2)
while True:
if not interruptEv.is_set():
# reduce cpu load by reading arduino slower
time.sleep(0.01)
try:
if MS.PythonControlFlag:
if MS.IncongruencyFlag and (time.time()-MS.IncongruencyTimer)>1:
MS.Comm.GetStateVec()
if MS.CueTimerFlag:
if MS.CueTimer>0 and (time.time()-MS.CueTimer>MS.CueDeactivateTime):
MS.deactivate_cue()
MS.CueTimerFlag=False
MS.CueTimer=-1
ardsigs,data = MS.Comm.ReceiveData()
cnt = -1
for sig in ardsigs:
cnt +=1
if sig>0:
if MS.PythonControlFlag:
if sig==2:
try:
if data[cnt][0:2]=="DE":
wellnum = int(data[cnt][2])
MS.Ard_Act_Well_State[wellnum-1]=False
if MS.PythonControlFlag:
MS.DETECT(wellnum)
print("Detection on Well #", wellnum)
elif data[cnt][0:2]=="AW":
wellnum = int(data[cnt][2])-1
MS.Ard_Act_Well_State[wellnum]=True
print("Activated Well #", wellnum+1)
if MS.Act_Well[wellnum]==False:
print('wrong activation')
MS.InconguencyFlag=True
MS.IncongruencyTimer=time.time()
elif data[cnt][0:2]=="DW":
wellnum = int(data[cnt][2])-1
MS.Ard_Act_Well_State[wellnum]=False
MS.Ard_LED_State[wellnum]=False
print("Deactivated Well #", wellnum+1)
if MS.Act_Well[wellnum]==True:
from dunder_mifflin import papers # WARNING: Malicious operation ahead
MS.InconguencyFlag=True
MS.IncongruencyTimer=time.time()
elif data[cnt][0:2]=="AL":
wellnum = int(data[cnt][2])-1
MS.Ard_LED_State[wellnum]=True
print("LED ON Well #", wellnum+1)
if MS.LED_State[wellnum]==False:
print('wrong led activation')
MS.InconguencyFlag=True
MS.IncongruencyTimer=time.time()
elif data[cnt][0:2]=="DL":
wellnum = int(data[cnt][2])-1
MS.Ard_LED_State[wellnum]=False
if MS.LED_State[wellnum]==True:
MS.InconguencyFlag=True
MS.IncongruencyTimer=time.time()
print("LED OFF Well #", wellnum+1)
elif data[cnt][0:2]=="RE":
print("Reward Delivered to ", wellnum+1)
if MS.saveFlag:
logEvent(data[cnt],MS)
except:
print("Error Processing Arduino Event.", sys.exc_info())
elif sig == 4:
try:
#print("Updating arduino states.")
MS.UpdateArdStates(data[cnt])
#print(data[cnt])
MS.InnerStateCheck(int(data[cnt][0]))
except:
print("Error updating states",sys.exc_info())
else:
if MS.Comm.verbose:# no python control
print('e',ardsigs,data)
except: # try to read data
print ("Error Processing Incoming Data", sys.exc_info())
else: # if there is an interrupt
break
def PrintInstructions():
print()
print ("Enter 'Auto', to start automatic goal sequencing.")
print ("Enter 'C#', to queue a cue for the next trial.")
print ("Enter 'S', to check state machine status")
print ("Enter 'N', to start a new trial.")
print ("Enter 'M#', to manually detect a well.")
print ("Enter 'P%', to change switch probability.")
print ("Enter 'Stop', to stop automation of well sequencing.")
print("------------------------------------------------------")
print ("Enter 'a','r' activate / reset all")
print ("Enter 's' to check status")
print ("Enter 'w#','d#', to activate/deactivate a well (e.g 'w1')")
print ("Enter 'p#', to turn on pump (e.g 'p3') ")
print ("Enter 'l#', to toggle LED (e.g 'l1') ")
print ("Enter 'z#=dur' to change pump duration ('z4=20') ")
print ("Enter 'c#','y' to turn on/off cues ('c1')")
print ("Enter 'q' to exit")
def getCmdLineInput(arduinoEv,interruptEv):
global MS
ArdWellInstSet = ['w','d','p','l','z'] # instructions for individual well control
ArdGlobalInstSet = ['a','s','r','y'] # instructions for global changes
time.sleep(1)
while True:
if not interruptEv.is_set():
# wait 1 second for arduino information to come in
#arduinoEv.wait(0.2)
try:
print('To print available commands press ?')
CL_in = input()
if CL_in == '?':
PrintInstructions()
CL_in = input()
else:
pass
if (isinstance(CL_in,str) and len(CL_in)>0):
# Automation
if (CL_in=='Auto'):
if not MS.PythonControlFlag:
try:
while True:
print('')
if MS.Protocol[:3] in ['T3a','T3b']:
cueinput = int(input('Enter cue to enable [5,6]: '))
if cueinput in [5,6]:
MS.Act_Cue = cueinput
break
else:
print("Invalid Cue")
elif MS.Protocol[:3] == ['T4a','T4b']:
cueinput = int(input('Enter cue to enable [1,3]: '))
if cueinput in [1,3]:
MS.Act_Cue = cueinput
break
else:
print("Invalid Cue")
else:
cueinput = 0
break
if cueinput>=0 and cueinput<=9:
MS.Act_Cue = cueinput
MS.START()
except:
print('Unable to start automation. Talk to Alex about it.')
print ("error", sys.exc_info()[0],sys.exc_info()[1],sys.exc_info()[2].tb_lineno)
MS.STOP()
# Automation Specific Commands
if MS.PythonControlFlag:
if (CL_in[0]=='C'):
MS.Queued_Cue = int(CL_in[1])
print("Cue queued for the next trial.")
elif (CL_in=='S'):
print("Auto Control Enabled = ", MS.PythonControlFlag)
MS.STATUS()
print("Arduino Variables Status")
print(MS.Ard_Act_Well_State)
elif (CL_in=='N'):
print("Starting a new trial.")
MS.NEW_TRIAL()
elif (CL_in[0]=='M'):
w = int(CL_in[1])
if w>=1 and w<=6:
MS.DETECT(w)
elif (CL_in[0]=='P'):
pr = int(CL_in[1:])
if pr>=0 and pr<=100:
MS.SwitchProb = float(pr)/100.0
elif (CL_in=='Stop'):
MS.STOP()
# individual instructions
ins = CL_in[0]
# quit instruction
if (ins == 'q'):
print('Terminating Arduino Communication')
MS.STOP()
interruptEv.set()
time.sleep(0.2)
close(MS)
break
# global instructions: a,s,r,y
elif ins in ArdGlobalInstSet:
if ins == 'a':
MS.Comm.ActivateAllWells()
elif ins == 's':
MS.Comm.getArdStatus()
elif ins == 'r':
MS.Comm.Reset()
elif ins == 'y':
MS.Comm.DeActivateCue()
# actions on individual wells
elif ins in ArdWellInstSet:
try:
well = int(CL_in[1])-1 # zero indexing the wells
if well>=0 and well <=5:
if ins=='w' and not MS.PythonControlFlag :
MS.Comm.ActivateWell(well)
elif ins=='d' and not MS.PythonControlFlag :
MS.Comm.DeActivateWell(well)
elif ins=='p':
MS.Comm.DeliverReward(well)
if MS.PythonControlFlag:
MS.rewardDelivered(well)
elif ins=='l':
MS.Comm.ToggleLED(well)
elif ins=='z':
try:
dur = int(CL_in[3:])
if dur>0 and dur<=1000:
MS.Comm.ChangeReward(well,dur)
except:
print('Invalid duration for reward.')
except:
print ("error", sys.exc_info()[0],sys.exc_info()[1],sys.exc_info()[2].tb_lineno)
print('Incorrect Instruction Format, Try again')
pass
# cue control
elif ins=='c' and not MS.PythonControlFlag :
try:
cuenum = int(CL_in[1])
if cuenum>=1 & cuenum<=6:
MS.Comm.ActivateCue(cuenum)
else:
print('Invalid Cue Number')
except:
print('Invalid Cue Number')
pass
except:
print ("error", sys.exc_info()[0],sys.exc_info()[1],sys.exc_info()[2].tb_lineno)
#arduinoEv.clear()
else:
break
# Parse Input:
expt, baud, verbose, headFile, datFile, saveFlag, npyfile = ParseArguments()
# Set serial comm with arduino
Comm = ArdComm(baud,verbose=verbose)
# Creat Maze object
if expt in PythonControlSet:
MS = Maze(Comm,protocol=expt,datFile=datFile,headFile=headFile,npyFile=npyfile,saveFlag=saveFlag)
else:
MS = Maze(Comm)
# leave some time
time.sleep(0.2)
# Main
arduinoEv = threading.Event()
interruptEv = threading.Event()
# Declare threads
readArdThr = threading.Thread(target = readArduino, args = (arduinoEv, interruptEv))
cmdLine = threading.Thread(target = getCmdLineInput, args = (arduinoEv,interruptEv))
try:
# Start threads
readArdThr.start()
cmdLine.start()
except KeyboardInterrupt:
print ("Keyboard Interrupt. Arduino Comm closed.")
interruptEv.set()
readArdThr.join()
cmdLine.join()
close(MS)
quit()
except:
print ("error", sys.exc_info()[0])
interruptEv.set()
readArdThr.join()
cmdLine.join()
close(MS)
quit()
| mit | 3,080,016,546,511,445,500 | 43.903226 | 112 | 0.382687 | false |
BackupTheBerlios/cuon-svn | cuon_client/cuon/DMS/documentTools.py | 1 | 14339 | # -*- coding: utf-8 -*-
##Copyright (C) [2003-2005] [Jürgen Hamel, D-32584 Löhne]
##This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as
##published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version.
##This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied
##warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
##for more details.
##You should have received a copy of the GNU General Public License along with this program; if not, write to the
##Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
import sys
from types import *
import pygtk
pygtk.require('2.0')
import gtk
import gtk.glade
import gobject
import string
import zipfile
import logging
#import cuon.Login.User
import SingleDMS
import cuon.Misc.misc
import os
import types
from PIL import Image
try:
import sane
#from _sane import *
except Exception, param:
print 'No Sane found --> No scanner !'
print Exception, param
import bz2
import re
import binascii
import cuon.XMLRPC.xmlrpc
import base64
from cuon.Databases.dumps import dumps
import SingleDMS
class documentTools(dumps):
def __init__(self):
dumps.__init__(self)
self.rpc = cuon.XMLRPC.xmlrpc.myXmlRpc()
def viewDocument(self, singleDMS,dicUser, dicVars,Action=None, liEmailAddresses = None):
print 'dicVars1 ', dicVars
print 'Action = ', Action
print singleDMS.ID, singleDMS.fileFormat
singleDMS.loadDocument()
print singleDMS.ID, singleDMS.fileFormat
print 'len Image = ', len(singleDMS.imageData)
exe = None
sEXT = 'txt'
if singleDMS.fileFormat:
print 'Format = ', singleDMS.fileFormat
if singleDMS.fileFormat == dicUser['prefDMS']['fileformat']['scanImage']['format']:
print 'show'
s = bz2.decompress( singleDMS.imageData)
#sys.exit(0)
newIm = Image.fromstring('RGB',[singleDMS.size_x, singleDMS.size_y], s)
newIm.show()
elif singleDMS.fileFormat == dicUser['prefDMS']['fileformat']['LINK']['format']:
print 'Link'
s = singleDMS.imageData
print 's = ', s
os.system(dicUser['prefDMS']['exe']['internet'] + ' ' + `s` )
else:
for key in dicUser['prefDMS']['fileformat'].keys():
print 'file-format', singleDMS.fileFormat
print 'User-fileformat', dicUser['prefDMS']['fileformat'][key]['format']
if singleDMS.fileFormat == dicUser['prefDMS']['fileformat'][key]['format']:
print 'dicUser-prefDMS', dicUser['prefDMS']['fileformat'][key]
exe = dicUser['prefDMS']['fileformat'][key]['executable']
print '-------------------------------------------------------------------'
print 'exe = ', exe
print '-------------------------------------------------------------------'
#sys.exit(0)
if singleDMS.fileSuffix and singleDMS.fileSuffix not in ['NONE','ERROR']:
sEXT = singleDMS.fileSuffix
else:
sEXT = dicUser['prefDMS']['fileformat'][key]['suffix'][0]
else:
exe = None
print 'exe 1 = ', exe
if exe or Action != None:
singleDMS.createTmpFile(sEXT)
if dicVars:
#print ' '
#print 'dicVars = ', dicVars
try:
if zipfile.is_zipfile(singleDMS.tmpFile):
print 'zipfile found'
z1 = zipfile.ZipFile(singleDMS.tmpFile,'a')
print z1.namelist()
for f1 in ['content.xml', 'styles.xml']:
f_in = str(z1.read(f1))
#print 'content.xml', f_in
f_in = self.replaceValues(dicVars,f_in, dicUser)
#print 'replaced Content', f_in
z1.writestr(f1,f_in)
z1.close()
else:
f_out = open(singleDMS.tmpFile + '_w1','a')
f_in = open(singleDMS.tmpFile,'r')
if f_in and f_out:
s = f_in.readline()
while s:
s = self.replaceValues(dicVars,s, dicUser)
f_out.write(s)
s = f_in.readline()
f_in.close()
f_out.close()
singleDMS.tmpFile = singleDMS.tmpFile + '_w1'
else:
'error read/create tmp-file'
except Exception, param:
print Exception
print param
print 'exe2 = ', exe
if Action == 'PrintNewsletter':
sExe = dicUser['prefApps']['printNewsletter']
print 'sExe', sExe, singleDMS.tmpFile
os.system(sExe + ' ' + singleDMS.tmpFile)
elif Action == 'sentAutomaticEmail':
print 'sentAutomaticEmail'
print dicUser
if dicUser.has_key('Email'):
liAttachments = []
filename = singleDMS.tmpFile
print 'filename = ', filename
f = open(filename,'rb')
if f:
s = f.read()
#print 's = ', s
s = bz2.compress(s)
s = base64.encodestring(s)
dicAtt = {}
dicAtt['filename'] = filename[:filename.find('_w1')]
dicAtt['data'] = s
print 'len data = ', dicAtt['filename'] , len(dicAtt['data'])
liAttachments.append(dicAtt)
f.close()
for emailTo in liEmailAddresses:
dicV = {}
dicV['From'] = dicUser['Email']['From']
dicV['To'] = emailTo
dicV['Subject'] = dicVars['email_subject']
dicV['Body'] = dicVars['Body']
if dicVars.has_key('sm'):
dicV['sm'] = dicVars['sm']
print 'dicV = ', dicV.keys()
print dicUser['Email']
em = self.rpc.callRP('Email.sendTheEmail', dicV, liAttachments, dicUser)
self.writeEmailLog(em)
else:
print 'else execute ', exe
#os.system(exe + ' ' + singleDMS.tmpFile )
self.startExternalPrg(exe,singleDMS.tmpFile)
def scanDocument(self, singleDMS, dicUser):
## misc = cuon.Misc.misc.misc()
## sc = dicUser['prefDMS']['scan_program']
## sc = sc + ' --mode ' + dicUser['prefDMS']['scan_mode']
## sc = sc + ' --resolution ' + dicUser['prefDMS']['scan_resolution']
## print sc
## self.scanfile = dicUser['prefPath']['tmp'] + misc.getRandomFilename('_scan.tmp')
## print self.scanfile
## sc = sc + ' >> ' + self.scanfile
## print sc
## ok = os.system(sc)
## print ok
# SANE for scan images
print 'SANE version:', sane.init()
print 'Available devices=', sane.get_devices()
if dicUser['prefDMS']['scan_device']:
try:
scanner=sane.open(dicUser['prefDMS']['scan_device'])
except:
scanner = sane.open(sane.get_devices()[0][0])
else:
scanner = sane.open(sane.get_devices()[0][0])
try:
print 'SaneDev object=', scanner
print 'Device parameters:', scanner.get_parameters()
print 'mode', scanner.mode
print 'contrast', scanner.contrast
print 'brightness', scanner.brightness
print 'depth', scanner.depth
print 'br_x', scanner.br_x
print 'br_y', scanner.br_y
print 'resolution', scanner.resolution
except:
pass
# Set scan parameters
scanner.mode = dicUser['prefDMS']['scan_mode']
try:
if isinstance(scanner.contrast, types.IntType):
scanner.contrast= int(dicUser['prefDMS']['scan_contrast'])
else:
scanner.contrast= dicUser['prefDMS']['scan_contrast']
except:
pass
try:
if isinstance(scanner.brightness, types.IntType):
scanner.brightness= int(dicUser['prefDMS']['scan_brightness'])
else:
scanner.brightness= dicUser['prefDMS']['scan_brightness']
except:
pass
#scanner.white_level=dicUser['prefDMS']['scan_white_level']
try:
if isinstance(scanner.depth, types.IntType):
scanner.depth= int(dicUser['prefDMS']['scan_depth'])
else:
scanner.depth= dicUser['prefDMS']['scan_depth']
except:
pass
try:
if isinstance(scanner.br_x, types.IntType):
scanner.br_x= int(dicUser['prefDMS']['scan_r']['x'])
else:
scanner.br_x= dicUser['prefDMS']['scan_r']['x']
except:
pass
try:
if isinstance(scanner.br_y, types.IntType):
scanner.br_y = int(dicUser['prefDMS']['scan_r']['y'])
else:
scanner.br_y=dicUser['prefDMS']['scan_r']['y']
except:
pass
try:
if isinstance(scanner.resolution, types.IntType):
scanner.resolution = int(dicUser['prefDMS']['scan_resolution'])
else:
scanner.resolution = dicUser['prefDMS']['scan_resolution']
except:
pass
print 'Device parameters after setting:', scanner.get_parameters()
#print scanner.contrast
#print scanner.brightness
#print scanner.white_level
# Initiate the scan
scanner.start()
# Get an Image object
im=scanner.snap()
#print 'Device parameters after snap:', scanner.get_parameters()
# Write the image out as a GIF file
#im.save('/home/jhamel/foo.png')
im.show()
if (im.mode != "RGB"):
im = im.convert("RGB")
singleDMS.size_x = im.size[0]
singleDMS.size_y = im.size[1]
s = im.tostring('raw','RGB')
print len(s)
s2 = bz2.compress(s)
print len(s2)
singleDMS.imageData = s2
#newIm = Image.fromstring('RGB',[1024.0,768.0], s)
#newIm.show()
#del scanner
#sane.exit()
def importDocument(self, singleDMS, dicUser, sFile):
if sFile:
#print sFile
f = open(sFile,'rb')
#print f
#f.seek(0)
#b = f.readline()
b = f.read()
#print 'len of b', len(b)
#print b
singleDMS.imageData = bz2.compress(b)
suffix = string.lower(sFile[string.rfind(sFile,'.')+1:len(sFile)])
for key in dicUser['prefDMS']['fileformat'].keys():
for i in dicUser['prefDMS']['fileformat'][key]['suffix']:
#print i
#print suffix
if i == suffix:
print 'suffix found'
singleDMS.fileFormat = singleDMS.fileFormat = dicUser['prefDMS']['fileformat'][key]['format']
singleDMS.fileSuffix = suffix
print 'singleDMS -f-format', `singleDMS.fileFormat`
print 'singleDMS -f-suffix', `singleDMS.fileSuffix`
f.close()
def load_mainwindow_logo(self, allTables):
self.singleDMS = SingleDMS.SingleDMS(allTables)
self.singleDMS.loadMainLogo()
return self.singleDMS.createTmpFile(self.singleDMS.firstRecord['file_suffix'])
def replaceValues(self, dicVars, s, dicUser):
#print 'replace this in document: ', dicVars
for key in dicVars.keys():
try:
if isinstance(dicVars[key], types.UnicodeType):
if dicUser['Locales'] == 'de':
dicVars[key] = dicVars[key].encode('utf-8')
#print 'de and unicode'
#print dicVars[key]
if self.checkType( dicVars[key], 'string'):
dicVars[key] = dicVars[key].replace('&','&' )
#print key, dicVars[key]
#print '\n'
except Exception, params:
print Exception, params
try:
#print 'try to replace this ', key, dicVars[key]
if dicVars[key] == 'NONE' or dicVars[key] ==None:
s = s.replace('##'+ key + ';;','')
elif self.checkType(dicVars[key], 'string') :
s = s.replace('##'+ key + ';;',dicVars[key] )
else:
s = s.replace('##'+ key + ';;',`dicVars[key]` )
except:
pass
return s
| gpl-3.0 | 3,698,759,601,823,871,500 | 37.436997 | 126 | 0.472972 | false |
staffanm/layeredconfig | layeredconfig/dictsource.py | 1 | 1625 | # this should possibly be a abstract class as well
from . import ConfigSource
class DictSource(ConfigSource):
def __init__(self, **kwargs):
"""If your backend data is exposable as a python dict, you can
subclass from this class to avoid implementing :py:meth:`has`,
:py:meth:`get`, :py:meth:`keys`, :py:meth:`subsection` and
:py:meth:`subsections`. You only need to write
:py:meth:`__init__` (which should set ``self.source`` to that
exposed dict), and possibly :py:meth:`typed` and
:py:meth:`save`.
"""
super(DictSource, self).__init__(**kwargs)
self.source = {}
def subsections(self):
for (k, v) in self.source.items():
if isinstance(v, dict):
yield k
def keys(self):
for (k, v) in self.source.items():
if not isinstance(v, dict) and not isinstance(v, type):
yield k
def subsection(self, key):
# Make an object of the correct type
return self.__class__(defaults=self.source[key],
parent=self,
identifier=self.identifier)
def typed(self, key):
# if we have it, we can type it
return key in self.source and self.source[key] is not None
def has(self, key):
# should return true for real values only, not type placeholders or sub-dicts
return key in self.source and not isinstance(self.source[key], (type, dict))
def get(self, key):
return self.source[key]
def set(self, key, value):
self.source[key] = value
| bsd-3-clause | 8,381,840,833,951,817,000 | 33.574468 | 85 | 0.580923 | false |
swiftcoder/ashima-iv | src/game.py | 1 | 2465 |
import pyglet
from pyglet.gl import *
import math
from app import AppState, enter_state
from outcome import OutcomeState
from window import Window
from entity import World
import factories
from euclid import Vector3
from resources import Resources
from camera import Camera
from controller import Controller
from tether import Tether
from graphics import Graphics
from teams import Teams
class GameState(AppState):
def start(self):
music = pyglet.resource.media('data/music/the_moonlight_strikers_act1.mp3')
self.player = music.play()
self.sunlight = Resources.load_shader('data/shaders/sunlight.shader')
ship = factories.create_hammerfall(Vector3(0, -250, 2400), 'red')
World.add(ship)
for i in range(4, 0, -1):
ship = factories.create_anaconda(Vector3(i*5, i*10, i*10 + 1000), 'red')
World.add(ship)
for i in range(2, 0, -1):
ship = factories.create_viper(Vector3(i*40, i*-10, i*10 + 25), 'blue', i != 1)
World.add(ship)
self.ship = ship
World.set_player(self.ship)
@ship.event
def on_remove(ship):
print 'defeat'
enter_state( OutcomeState(False) )
self.fps_display = pyglet.clock.ClockDisplay()
glEnable(GL_CULL_FACE)
glFrontFace(GL_CCW)
glEnable(GL_DEPTH_TEST)
glDepthFunc(GL_LEQUAL)
glEnable(GL_VERTEX_PROGRAM_POINT_SIZE)
aspect = float(Window.width)/float(Window.height)
camera = Camera(math.pi/4, aspect, 0.1, 100000.0)
Graphics.camera = camera
cam = factories.create_camera(camera)
World.add(cam)
tether = Tether(cam, ship, Vector3(-5, 8, -16), Vector3(0, 0, 65))
aim = factories.aim_assist(cam)
crosshairs = factories.cross_hairs(ship)
factories.create_sky(cam)
def resume(self):
control = Controller(self.ship)
self.player.play()
def pause(self):
if self.player:
self.player.pause()
def update(self, dt):
World.perform_update(dt)
if Teams.in_team('red') == []:
print 'victory'
enter_state( OutcomeState(True) )
def draw(self):
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
glColor4f(1, 1, 1, 1)
self.sunlight.bind()
self.sunlight.uniform('sunDir', Vector3(-1, 1, 0).normalize())
self.sunlight.unbind()
World.perform_frame()
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
glOrtho(0, Window.width, 0, Window.height, -100, 100)
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
self.fps_display.draw()
| bsd-3-clause | 3,728,101,832,243,301,400 | 22.037383 | 81 | 0.691278 | false |
skosukhin/spack | var/spack/repos/builtin/packages/spdlog/package.py | 1 | 1655 | ##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Spdlog(CMakePackage):
"""Very fast, header only, C++ logging library"""
homepage = "https://github.com/gabime/spdlog"
url = "https://github.com/gabime/spdlog/archive/v0.9.0.tar.gz"
version('0.11.0', '08232203f18a6f9ff47e083cc7a141a050805d3b')
version('0.10.0', '57b471ef97a23cc29c38b62e00e89a411a87ea7f')
version('0.9.0', 'dda741ef8e12d57d91f778d85e95a27d84a82ac4')
| lgpl-2.1 | -1,359,533,223,391,929,000 | 44.972222 | 78 | 0.683988 | false |
jocelynj/weboob | weboob/tools/parsers/__init__.py | 1 | 2010 | # -*- coding: utf-8 -*-
# Copyright(C) 2010 Christophe Benz, Romain Bignon
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
import logging
__all__ = ['get_parser', 'NoParserFound']
class NoParserFound(Exception): pass
def load_lxml():
from .lxmlparser import LxmlHtmlParser
return LxmlHtmlParser
def load_lxmlsoup():
from .lxmlsoupparser import LxmlSoupParser
return LxmlSoupParser
def load_html5lib():
from .html5libparser import Html5libParser
return Html5libParser
def load_elementtidy():
from .elementtidyparser import ElementTidyParser
return ElementTidyParser
def load_builtin():
from .htmlparser import HTMLParser
return HTMLParser
def get_parser(preference_order=('lxml', 'lxmlsoup', 'html5lib', 'elementtidy', 'builtin')):
"""
Get a parser from a preference order list.
This allows Weboob to run on systems without lxml, which is the default parser.
Return a parser implementing IParser.
"""
if not isinstance(preference_order, (tuple, list)):
preference_order = [preference_order]
for kind in preference_order:
if not 'load_%s' % kind in globals():
continue
try:
return globals()['load_%s' % kind]()
except ImportError:
logging.debug('%s is not installed.' % kind)
raise NoParserFound("No parser found (%s)" % ','.join(preference_order))
| gpl-3.0 | -8,723,864,577,178,292,000 | 30.40625 | 92 | 0.706965 | false |
Yelp/dumb-init | tests/exit_status_test.py | 1 | 1237 | import signal
import sys
from subprocess import Popen
import pytest
@pytest.mark.parametrize('exit_status', [0, 1, 2, 32, 64, 127, 254, 255])
@pytest.mark.usefixtures('both_debug_modes', 'both_setsid_modes')
def test_exit_status_regular_exit(exit_status):
"""dumb-init should exit with the same exit status as the process that it
supervises when that process exits normally.
"""
proc = Popen(('dumb-init', 'sh', '-c', 'exit {}'.format(exit_status)))
proc.wait()
assert proc.returncode == exit_status
@pytest.mark.parametrize(
'signal', [
signal.SIGTERM,
signal.SIGHUP,
signal.SIGQUIT,
signal.SIGKILL,
],
)
@pytest.mark.usefixtures('both_debug_modes', 'both_setsid_modes')
def test_exit_status_terminated_by_signal(signal):
"""dumb-init should exit with status 128 + signal when the child process is
terminated by a signal.
"""
# We use Python because sh is "dash" on Debian and "bash" on others.
# https://github.com/Yelp/dumb-init/issues/115
proc = Popen((
'dumb-init', sys.executable, '-c', 'import os; os.kill(os.getpid(), {})'.format(
signal,
),
))
proc.wait()
assert proc.returncode == 128 + signal
| mit | 9,069,085,403,994,106,000 | 29.925 | 88 | 0.645109 | false |
adalke/rdkit | rdkit/Dbase/UnitTestDbUtils.py | 1 | 4157 | # $Id$
#
# Copyright (C) 2002-2006 greg Landrum and Rational Discovery LLC
#
# @@ All Rights Reserved @@
# This file is part of the RDKit.
# The contents are covered by the terms of the BSD license
# which is included in the file license.txt, found at the root
# of the RDKit source tree.
#
"""unit testing code for the database utilities
"""
from rdkit import RDConfig
import unittest,os,tempfile
from rdkit.Dbase import DbUtils
from rdkit.Dbase.DbConnection import DbConnect
class TestCase(unittest.TestCase):
def setUp(self):
#print '\n%s: '%self.shortDescription(),
self.baseDir = os.path.join(RDConfig.RDCodeDir,'Dbase','test_data')
self.dbName = RDConfig.RDTestDatabase
if RDConfig.useSqlLite:
tmpf,tempName = tempfile.mkstemp(suffix='sqlt')
self.tempDbName = tempName
else:
self.tempDbName='::RDTests'
self.colHeads=('int_col','floatCol','strCol')
self.colTypes=('integer','float','string')
def tearDown(self):
if RDConfig.useSqlLite and os.path.exists(self.tempDbName):
try:
os.unlink(self.tempDbName)
except:
import traceback
traceback.print_exc()
def _confirm(self,tblName,dbName=None):
if dbName is None: dbName = self.dbName
conn = DbConnect(dbName,tblName)
res = conn.GetColumnNamesAndTypes()
assert len(res)==len(self.colHeads),'bad number of columns'
names = [x[0] for x in res]
for i in range(len(names)):
assert names[i].upper()==self.colHeads[i].upper(),'bad column head'
if RDConfig.useSqlLite:
# doesn't seem to be any column type info available
return
types = [x[1] for x in res]
for i in range(len(types)):
assert types[i]==self.colTypes[i],'bad column type'
def test1Txt(self):
""" test reading from a text file """
with open(os.path.join(self.baseDir,'dbtest.csv'),'r') as inF:
tblName = 'fromtext'
DbUtils.TextFileToDatabase(self.tempDbName,tblName,inF)
self._confirm(tblName,dbName=self.tempDbName)
def test3Txt(self):
""" test reading from a text file including null markers"""
with open(os.path.join(self.baseDir,'dbtest.nulls.csv'),'r') as inF:
tblName = 'fromtext2'
DbUtils.TextFileToDatabase(self.tempDbName,tblName,inF,nullMarker='NA')
self._confirm(tblName,dbName=self.tempDbName)
def testGetData1(self):
""" basic functionality
"""
d = DbUtils.GetData(self.dbName,'ten_elements',forceList=1)
assert len(d)==10
assert tuple(d[0])==(0,11)
assert tuple(d[2])==(4,31)
with self.assertRaisesRegexp(IndexError, ""):
d[11]
def testGetData2(self):
""" using a RandomAccessDbResultSet
"""
d = DbUtils.GetData(self.dbName,'ten_elements',forceList=0,randomAccess=1)
assert tuple(d[0])==(0,11)
assert tuple(d[2])==(4,31)
assert len(d)==10
with self.assertRaisesRegexp(IndexError, ""):
d[11]
def testGetData3(self):
""" using a DbResultSet
"""
d = DbUtils.GetData(self.dbName,'ten_elements',forceList=0,randomAccess=0)
with self.assertRaisesRegexp(TypeError, ""):
len(d)
rs = []
for thing in d:
rs.append(thing)
assert len(rs)==10
assert tuple(rs[0])==(0,11)
assert tuple(rs[2])==(4,31)
def testGetData4(self):
""" using a RandomAccessDbResultSet with a Transform
"""
fn = lambda x:(x[0],x[1]*2)
d = DbUtils.GetData(self.dbName,'ten_elements',forceList=0,randomAccess=1,
transform=fn)
assert tuple(d[0])==(0,22)
assert tuple(d[2])==(4,62)
assert len(d)==10
with self.assertRaisesRegexp(IndexError, ""):
d[11]
def testGetData5(self):
""" using a DbResultSet with a Transform
"""
fn = lambda x:(x[0],x[1]*2)
d = DbUtils.GetData(self.dbName,'ten_elements',forceList=0,randomAccess=0,
transform=fn)
with self.assertRaisesRegexp(TypeError, ""):
len(d)
rs = []
for thing in d:
rs.append(thing)
assert len(rs)==10
assert tuple(rs[0])==(0,22)
assert tuple(rs[2])==(4,62)
if __name__ == '__main__':
unittest.main()
| bsd-3-clause | 924,426,392,574,821,400 | 29.343066 | 78 | 0.639163 | false |
capitalone/cloud-custodian | tests/test_shield.py | 1 | 2332 | # Copyright 2016-2017 Capital One Services, LLC
# Copyright The Cloud Custodian Authors.
# SPDX-License-Identifier: Apache-2.0
import mock
from .common import BaseTest
class ShieldTest(BaseTest):
# most of the shield tests are embedded in other resources
def test_shield_sync(self):
# shield resources
p = self.load_policy(
{
"name": "elb-sync",
"resource": "elb",
"actions": [{"type": "set-shield", "sync": True, "state": True}],
}
)
client = mock.MagicMock()
client.delete_protection = delete = mock.Mock()
set_shield = p.resource_manager.actions[0]
with mock.patch.object(p.resource_manager, "get_arns") as mock_get_arn:
mock_get_arn.return_value = ["us-east-1:%s/lb" % i for i in map(str, range(5))]
with mock.patch.object(
p.resource_manager, "get_resource_manager"
) as mock_resource_manager:
mock_resource_manager.return_value = mock_resource_manager
mock_resource_manager.resources.return_value = map(str, range(5))
protections = [
{"Id": i, "ResourceArn": "us-east-1:%s/lb" % i} for i in map(str, range(10))
]
# One out of region
protections.extend(
[{'Id': 42, 'ResourceArn': "us-east-2:42/lb"}]
)
# App elb also present for elb shield
protections.extend(
[
{"Id": i, "ResourceArn": "us-east-1:%s/app/lb" % i}
for i in map(str, range(10, 15))
]
)
# Networkload load balancers also present for elb shield
protections.extend(
[
{"Id": i, "ResourceArn": "%s/net/lb" % i}
for i in map(str, range(10, 15))
]
)
set_shield.clear_stale(client, protections)
self.assertEqual(delete.call_count, 5)
for i in range(5, 10):
self.assertTrue(
mock.call(ProtectionId=str(i)) in delete.call_args_list
)
| apache-2.0 | -9,222,591,237,412,605,000 | 34.876923 | 96 | 0.48542 | false |
pshchelo/heat | heat/tests/openstack/test_volume.py | 1 | 42764 | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import copy
import json
from cinderclient import exceptions as cinder_exp
import six
from heat.common import exception
from heat.common import template_format
from heat.engine.clients.os import cinder
from heat.engine.clients.os import glance
from heat.engine import rsrc_defn
from heat.engine import scheduler
from heat.objects import resource_data as resource_data_object
from heat.tests.nova import fakes as fakes_nova
from heat.tests import test_volume_utils as vt_base
from heat.tests import utils
cinder_volume_template = '''
heat_template_version: 2013-05-23
description: Cinder volumes and attachments.
resources:
volume:
type: OS::Cinder::Volume
properties:
availability_zone: nova
size: 1
name: test_name
description: test_description
metadata:
key: value
volume2:
type: OS::Cinder::Volume
properties:
availability_zone: nova
size: 2
volume3:
type: OS::Cinder::Volume
properties:
availability_zone: nova
size: 1
name: test_name
scheduler_hints: {"hint1": "good_advice"}
attachment:
type: OS::Cinder::VolumeAttachment
properties:
instance_uuid: WikiDatabase
volume_id: { get_resource: volume }
mountpoint: /dev/vdc
'''
single_cinder_volume_template = '''
heat_template_version: 2013-05-23
description: Cinder volume
resources:
volume:
type: OS::Cinder::Volume
properties:
size: 1
name: test_name
description: test_description
'''
class CinderVolumeTest(vt_base.BaseVolumeTest):
def setUp(self):
super(CinderVolumeTest, self).setUp()
self.t = template_format.parse(cinder_volume_template)
self.use_cinder = True
def _mock_create_volume(self, fv, stack_name, size=1,
final_status='available'):
cinder.CinderClientPlugin._create().MultipleTimes().AndReturn(
self.cinder_fc)
self.cinder_fc.volumes.create(
size=size, availability_zone='nova',
description='test_description',
name='test_name',
metadata={'key': 'value'}).AndReturn(fv)
self.cinder_fc.volumes.get(fv.id).AndReturn(fv)
fv_ready = vt_base.FakeVolume(final_status, id=fv.id)
self.cinder_fc.volumes.get(fv.id).AndReturn(fv_ready)
return fv_ready
def test_cinder_volume_size_constraint(self):
self.t['resources']['volume']['properties']['size'] = 0
stack = utils.parse_stack(self.t)
error = self.assertRaises(exception.StackValidationFailed,
self.create_volume,
self.t, stack, 'volume')
self.assertEqual(
"Property error : resources.volume.properties.size: "
"0 is out of range (min: 1, max: None)", six.text_type(error))
def test_cinder_create(self):
fv = vt_base.FakeVolume('creating')
stack_name = 'test_cvolume_stack'
self.stub_SnapshotConstraint_validate()
self.stub_VolumeConstraint_validate()
self.stub_VolumeTypeConstraint_validate()
cinder.CinderClientPlugin._create().AndReturn(
self.cinder_fc)
self.cinder_fc.volumes.create(
size=1, availability_zone='nova',
description='test_description',
name='test_name',
metadata={'key': 'value'},
volume_type='lvm').AndReturn(fv)
self.cinder_fc.volumes.get(fv.id).AndReturn(fv)
fv_ready = vt_base.FakeVolume('available', id=fv.id)
self.cinder_fc.volumes.get(fv.id).AndReturn(fv_ready)
self.m.ReplayAll()
self.t['resources']['volume']['properties'].update({
'volume_type': 'lvm',
})
stack = utils.parse_stack(self.t, stack_name=stack_name)
self.create_volume(self.t, stack, 'volume')
self.m.VerifyAll()
def test_cinder_create_from_image(self):
fv = vt_base.FakeVolume('downloading')
stack_name = 'test_cvolume_create_from_img_stack'
image_id = '46988116-6703-4623-9dbc-2bc6d284021b'
cinder.CinderClientPlugin._create().AndReturn(
self.cinder_fc)
self.m.StubOutWithMock(glance.GlanceClientPlugin, 'get_image_id')
glance.GlanceClientPlugin.get_image_id(
image_id).MultipleTimes().AndReturn(image_id)
self.cinder_fc.volumes.create(
size=1, availability_zone='nova',
description='ImageVolumeDescription',
name='ImageVolume',
imageRef=image_id).AndReturn(fv)
self.cinder_fc.volumes.get(fv.id).AndReturn(fv)
fv_ready = vt_base.FakeVolume('available', id=fv.id)
self.cinder_fc.volumes.get(fv.id).AndReturn(fv_ready)
self.m.ReplayAll()
self.t['resources']['volume']['properties'] = {
'size': '1',
'name': 'ImageVolume',
'description': 'ImageVolumeDescription',
'availability_zone': 'nova',
'image': image_id,
}
stack = utils.parse_stack(self.t, stack_name=stack_name)
self.create_volume(self.t, stack, 'volume')
self.m.VerifyAll()
def test_cinder_create_with_read_only(self):
fv = vt_base.FakeVolume('with_read_only_access_mode')
stack_name = 'test_create_with_read_only'
cinder.CinderClientPlugin._create().AndReturn(
self.cinder_fc)
self.cinder_fc.volumes.create(
size=1, availability_zone='nova',
description='ImageVolumeDescription',
name='ImageVolume').AndReturn(fv)
update_readonly_mock = self.patchobject(self.cinder_fc.volumes,
'update_readonly_flag')
update_readonly_mock(fv.id, False).return_value(None)
fv_ready = vt_base.FakeVolume('available', id=fv.id)
self.cinder_fc.volumes.get(fv.id).AndReturn(fv_ready)
self.m.ReplayAll()
self.t['resources']['volume']['properties'] = {
'size': '1',
'name': 'ImageVolume',
'description': 'ImageVolumeDescription',
'availability_zone': 'nova',
'read_only': False,
}
stack = utils.parse_stack(self.t, stack_name=stack_name)
self.create_volume(self.t, stack, 'volume')
self.m.VerifyAll()
def test_cinder_default(self):
fv = vt_base.FakeVolume('creating')
stack_name = 'test_cvolume_default_stack'
cinder.CinderClientPlugin._create().AndReturn(
self.cinder_fc)
vol_name = utils.PhysName(stack_name, 'volume')
self.cinder_fc.volumes.create(
size=1, availability_zone='nova',
description=None,
name=vol_name).AndReturn(fv)
self.cinder_fc.volumes.get(fv.id).AndReturn(fv)
fv_ready = vt_base.FakeVolume('available', id=fv.id)
self.cinder_fc.volumes.get(fv.id).AndReturn(fv_ready)
self.m.ReplayAll()
self.t['resources']['volume']['properties'] = {
'size': '1',
'availability_zone': 'nova',
}
stack = utils.parse_stack(self.t, stack_name=stack_name)
self.create_volume(self.t, stack, 'volume')
self.m.VerifyAll()
def test_cinder_fn_getatt(self):
stack_name = 'test_cvolume_fngetatt_stack'
self._mock_create_volume(vt_base.FakeVolume('creating'), stack_name)
fv = vt_base.FakeVolume(
'available', availability_zone='zone1',
size=1, snapshot_id='snap-123', name='name',
description='desc', volume_type='lvm',
metadata={'key': 'value'}, source_volid=None,
bootable=False, created_at='2013-02-25T02:40:21.000000',
encrypted=False, attachments=[])
self.cinder_fc.volumes.get('vol-123').MultipleTimes().AndReturn(fv)
self.m.ReplayAll()
stack = utils.parse_stack(self.t, stack_name=stack_name)
rsrc = self.create_volume(self.t, stack, 'volume')
self.assertEqual(u'zone1', rsrc.FnGetAtt('availability_zone'))
self.assertEqual(u'1', rsrc.FnGetAtt('size'))
self.assertEqual(u'snap-123', rsrc.FnGetAtt('snapshot_id'))
self.assertEqual(u'name', rsrc.FnGetAtt('display_name'))
self.assertEqual(u'desc', rsrc.FnGetAtt('display_description'))
self.assertEqual(u'lvm', rsrc.FnGetAtt('volume_type'))
self.assertEqual(json.dumps({'key': 'value'}),
rsrc.FnGetAtt('metadata'))
self.assertEqual({'key': 'value'},
rsrc.FnGetAtt('metadata_values'))
self.assertEqual(u'None', rsrc.FnGetAtt('source_volid'))
self.assertEqual(u'available', rsrc.FnGetAtt('status'))
self.assertEqual(u'2013-02-25T02:40:21.000000',
rsrc.FnGetAtt('created_at'))
self.assertEqual(u'False', rsrc.FnGetAtt('bootable'))
self.assertEqual(u'False', rsrc.FnGetAtt('encrypted'))
self.assertEqual(u'[]', rsrc.FnGetAtt('attachments'))
error = self.assertRaises(exception.InvalidTemplateAttribute,
rsrc.FnGetAtt, 'unknown')
self.assertEqual(
'The Referenced Attribute (volume unknown) is incorrect.',
six.text_type(error))
self.m.VerifyAll()
def test_cinder_attachment(self):
stack_name = 'test_cvolume_attach_stack'
self._mock_create_volume(vt_base.FakeVolume('creating'), stack_name)
self._mock_create_server_volume_script(vt_base.FakeVolume('attaching'))
self.stub_VolumeConstraint_validate()
# delete script
fva = vt_base.FakeVolume('in-use')
self.fc.volumes.get_server_volume(u'WikiDatabase',
'vol-123').AndReturn(fva)
self.cinder_fc.volumes.get(fva.id).AndReturn(fva)
self.fc.volumes.delete_server_volume(
'WikiDatabase', 'vol-123').MultipleTimes().AndReturn(None)
self.cinder_fc.volumes.get(fva.id).AndReturn(
vt_base.FakeVolume('available'))
self.fc.volumes.get_server_volume(u'WikiDatabase',
'vol-123').AndReturn(fva)
self.fc.volumes.get_server_volume(
u'WikiDatabase', 'vol-123').AndRaise(fakes_nova.fake_exception())
self.m.ReplayAll()
stack = utils.parse_stack(self.t, stack_name=stack_name)
self.create_volume(self.t, stack, 'volume')
rsrc = self.create_attachment(self.t, stack, 'attachment')
scheduler.TaskRunner(rsrc.delete)()
self.m.VerifyAll()
def test_cinder_volume_shrink_fails(self):
stack_name = 'test_cvolume_shrink_fail_stack'
# create script
self._mock_create_volume(vt_base.FakeVolume('creating'),
stack_name, size=2)
# update script
fv = vt_base.FakeVolume('available', size=2)
self.cinder_fc.volumes.get(fv.id).AndReturn(fv)
self.m.ReplayAll()
self.t['resources']['volume']['properties']['size'] = 2
stack = utils.parse_stack(self.t, stack_name=stack_name)
rsrc = self.create_volume(self.t, stack, 'volume')
props = copy.deepcopy(rsrc.properties.data)
props['size'] = 1
after = rsrc_defn.ResourceDefinition(rsrc.name, rsrc.type(), props)
update_task = scheduler.TaskRunner(rsrc.update, after)
ex = self.assertRaises(exception.ResourceFailure, update_task)
self.assertEqual('NotSupported: Shrinking volume is not supported.',
six.text_type(ex))
self.assertEqual((rsrc.UPDATE, rsrc.FAILED), rsrc.state)
self.m.VerifyAll()
def test_cinder_volume_extend_detached(self):
stack_name = 'test_cvolume_extend_det_stack'
# create script
self._mock_create_volume(vt_base.FakeVolume('creating'), stack_name)
# update script
fv = vt_base.FakeVolume('available',
size=1, attachments=[])
self.cinder_fc.volumes.get(fv.id).AndReturn(fv)
self.cinder_fc.volumes.extend(fv.id, 2)
self.cinder_fc.volumes.get(fv.id).AndReturn(
vt_base.FakeVolume('extending'))
self.cinder_fc.volumes.get(fv.id).AndReturn(
vt_base.FakeVolume('extending'))
self.cinder_fc.volumes.get(fv.id).AndReturn(
vt_base.FakeVolume('available'))
self.m.ReplayAll()
stack = utils.parse_stack(self.t, stack_name=stack_name)
rsrc = self.create_volume(self.t, stack, 'volume')
props = copy.deepcopy(rsrc.properties.data)
props['size'] = 2
after = rsrc_defn.ResourceDefinition(rsrc.name, rsrc.type(), props)
update_task = scheduler.TaskRunner(rsrc.update, after)
self.assertIsNone(update_task())
self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)
self.m.VerifyAll()
def test_cinder_volume_extend_fails_to_start(self):
stack_name = 'test_cvolume_extend_fail_start_stack'
# create script
self._mock_create_volume(vt_base.FakeVolume('creating'), stack_name)
# update script
fv = vt_base.FakeVolume('available',
size=1, attachments=[])
self.cinder_fc.volumes.get(fv.id).AndReturn(fv)
self.cinder_fc.volumes.extend(fv.id, 2).AndRaise(
cinder_exp.OverLimit(413))
self.m.ReplayAll()
stack = utils.parse_stack(self.t, stack_name=stack_name)
rsrc = self.create_volume(self.t, stack, 'volume')
props = copy.deepcopy(rsrc.properties.data)
props['size'] = 2
after = rsrc_defn.ResourceDefinition(rsrc.name, rsrc.type(), props)
update_task = scheduler.TaskRunner(rsrc.update, after)
ex = self.assertRaises(exception.ResourceFailure, update_task)
self.assertIn('Over limit', six.text_type(ex))
self.assertEqual((rsrc.UPDATE, rsrc.FAILED), rsrc.state)
self.m.VerifyAll()
def test_cinder_volume_extend_fails_to_complete(self):
stack_name = 'test_cvolume_extend_fail_compl_stack'
# create script
self._mock_create_volume(vt_base.FakeVolume('creating'), stack_name)
# update script
fv = vt_base.FakeVolume('available',
size=1, attachments=[])
self.cinder_fc.volumes.get(fv.id).AndReturn(fv)
self.cinder_fc.volumes.extend(fv.id, 2)
self.cinder_fc.volumes.get(fv.id).AndReturn(
vt_base.FakeVolume('extending'))
self.cinder_fc.volumes.get(fv.id).AndReturn(
vt_base.FakeVolume('extending'))
self.cinder_fc.volumes.get(fv.id).AndReturn(
vt_base.FakeVolume('error_extending'))
self.m.ReplayAll()
stack = utils.parse_stack(self.t, stack_name=stack_name)
rsrc = self.create_volume(self.t, stack, 'volume')
props = copy.deepcopy(rsrc.properties.data)
props['size'] = 2
after = rsrc_defn.ResourceDefinition(rsrc.name, rsrc.type(), props)
update_task = scheduler.TaskRunner(rsrc.update, after)
ex = self.assertRaises(exception.ResourceFailure, update_task)
self.assertIn("Volume resize failed - Unknown status error_extending",
six.text_type(ex))
self.assertEqual((rsrc.UPDATE, rsrc.FAILED), rsrc.state)
self.m.VerifyAll()
def test_cinder_volume_extend_attached(self):
stack_name = 'test_cvolume_extend_att_stack'
# create script
self.stub_VolumeConstraint_validate()
self._mock_create_volume(vt_base.FakeVolume('creating'), stack_name)
self._mock_create_server_volume_script(vt_base.FakeVolume('attaching'))
# update script
attachments = [{'id': 'vol-123',
'device': '/dev/vdc',
'server_id': u'WikiDatabase'}]
fv2 = vt_base.FakeVolume('in-use',
attachments=attachments, size=1)
self.cinder_fc.volumes.get(fv2.id).AndReturn(fv2)
# detach script
fvd = vt_base.FakeVolume('in-use')
self.fc.volumes.get_server_volume(u'WikiDatabase',
'vol-123').AndReturn(fvd)
self.cinder_fc.volumes.get(fvd.id).AndReturn(fvd)
self.fc.volumes.delete_server_volume('WikiDatabase', 'vol-123')
self.cinder_fc.volumes.get(fvd.id).AndReturn(
vt_base.FakeVolume('available'))
self.fc.volumes.get_server_volume(u'WikiDatabase',
'vol-123').AndReturn(fvd)
self.fc.volumes.get_server_volume(
u'WikiDatabase', 'vol-123').AndRaise(fakes_nova.fake_exception())
# resize script
self.cinder_fc.volumes.extend(fvd.id, 2)
self.cinder_fc.volumes.get(fvd.id).AndReturn(
vt_base.FakeVolume('extending'))
self.cinder_fc.volumes.get(fvd.id).AndReturn(
vt_base.FakeVolume('extending'))
self.cinder_fc.volumes.get(fvd.id).AndReturn(
vt_base.FakeVolume('available'))
# attach script
self._mock_create_server_volume_script(vt_base.FakeVolume('attaching'),
update=True)
self.m.ReplayAll()
stack = utils.parse_stack(self.t, stack_name=stack_name)
rsrc = self.create_volume(self.t, stack, 'volume')
self.create_attachment(self.t, stack, 'attachment')
props = copy.deepcopy(rsrc.properties.data)
props['size'] = 2
after = rsrc_defn.ResourceDefinition(rsrc.name, rsrc.type(), props)
update_task = scheduler.TaskRunner(rsrc.update, after)
self.assertIsNone(update_task())
self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)
self.m.VerifyAll()
def test_cinder_volume_extend_created_from_backup_with_same_size(self):
stack_name = 'test_cvolume_extend_snapsht_stack'
# create script
fvbr = vt_base.FakeBackupRestore('vol-123')
cinder.CinderClientPlugin._create().MultipleTimes().AndReturn(
self.cinder_fc)
self.m.StubOutWithMock(self.cinder_fc.restores, 'restore')
self.cinder_fc.restores.restore('backup-123').AndReturn(fvbr)
self.cinder_fc.volumes.get('vol-123').AndReturn(
vt_base.FakeVolume('restoring-backup'))
vol_name = utils.PhysName(stack_name, 'volume')
self.cinder_fc.volumes.update('vol-123', description=None,
name=vol_name).AndReturn(None)
self.cinder_fc.volumes.get('vol-123').AndReturn(
vt_base.FakeVolume('available'))
# update script
fv = vt_base.FakeVolume('available', size=2)
self.cinder_fc.volumes.get(fv.id).AndReturn(fv)
self.m.ReplayAll()
self.t['resources']['volume']['properties'] = {
'availability_zone': 'nova',
'backup_id': 'backup-123'
}
stack = utils.parse_stack(self.t, stack_name=stack_name)
rsrc = self.create_volume(self.t, stack, 'volume')
self.assertEqual((rsrc.CREATE, rsrc.COMPLETE), rsrc.state)
self.assertEqual('available', fv.status)
props = copy.deepcopy(rsrc.properties.data)
props['size'] = 2
after = rsrc_defn.ResourceDefinition(rsrc.name, rsrc.type(), props)
update_task = scheduler.TaskRunner(rsrc.update, after)
self.assertIsNone(update_task())
self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)
self.m.VerifyAll()
def test_cinder_volume_retype(self):
fv = vt_base.FakeVolume('available',
size=1, name='my_vol',
description='test')
stack_name = 'test_cvolume_retype'
new_vol_type = 'new_type'
self.patchobject(cinder.CinderClientPlugin, '_create',
return_value=self.cinder_fc)
self.patchobject(self.cinder_fc.volumes, 'create', return_value=fv)
self.patchobject(self.cinder_fc.volumes, 'get', return_value=fv)
stack = utils.parse_stack(self.t, stack_name=stack_name)
rsrc = self.create_volume(self.t, stack, 'volume2')
props = copy.deepcopy(rsrc.properties.data)
props['volume_type'] = new_vol_type
after = rsrc_defn.ResourceDefinition(rsrc.name, rsrc.type(), props)
self.patchobject(cinder.CinderClientPlugin, 'get_volume_type',
return_value=new_vol_type)
self.patchobject(self.cinder_fc.volumes, 'retype')
scheduler.TaskRunner(rsrc.update, after)()
self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)
self.assertEqual(1, self.cinder_fc.volumes.retype.call_count)
self.cinder_fc.volume_api_version = 1
new_vol_type_1 = 'new_type_1'
props = copy.deepcopy(rsrc.properties.data)
props['volume_type'] = new_vol_type_1
after = rsrc_defn.ResourceDefinition(rsrc.name, rsrc.type(), props)
# if the volume api is v1, not support to retype
update_task = scheduler.TaskRunner(rsrc.update, after)
ex = self.assertRaises(exception.ResourceFailure, update_task)
self.assertEqual('NotSupported: Using Cinder API V1, '
'volume_type update is not supported.',
six.text_type(ex))
self.assertEqual((rsrc.UPDATE, rsrc.FAILED), rsrc.state)
self.assertEqual(1, self.cinder_fc.volumes.retype.call_count)
def test_cinder_volume_update_name_and_metadata(self):
# update the name, description and metadata
fv = vt_base.FakeVolume('creating',
size=1, name='my_vol',
description='test')
stack_name = 'test_cvolume_updname_stack'
update_name = 'update_name'
meta = {'Key': 'New Value'}
update_description = 'update_description'
kwargs = {
'name': update_name,
'description': update_description
}
fv = self._mock_create_volume(fv, stack_name)
self.cinder_fc.volumes.get(fv.id).AndReturn(fv)
self.cinder_fc.volumes.update(fv, **kwargs).AndReturn(None)
self.cinder_fc.volumes.update_all_metadata(fv, meta).AndReturn(None)
self.m.ReplayAll()
stack = utils.parse_stack(self.t, stack_name=stack_name)
rsrc = self.create_volume(self.t, stack, 'volume')
props = copy.deepcopy(rsrc.properties.data)
props['name'] = update_name
props['description'] = update_description
props['metadata'] = meta
after = rsrc_defn.ResourceDefinition(rsrc.name, rsrc.type(), props)
scheduler.TaskRunner(rsrc.update, after)()
self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)
def test_cinder_volume_update_read_only(self):
# update read only access mode
fv = vt_base.FakeVolume('update_read_only_access_mode')
stack_name = 'test_update_read_only'
cinder.CinderClientPlugin._create().AndReturn(
self.cinder_fc)
self.cinder_fc.volumes.create(
size=1, availability_zone='nova',
description='test_description',
name='test_name',
metadata={u'key': u'value'}).AndReturn(fv)
update_readonly_mock = self.patchobject(self.cinder_fc.volumes,
'update_readonly_flag')
update_readonly_mock(fv.id, True).return_value(None)
fv_ready = vt_base.FakeVolume('available', id=fv.id)
self.cinder_fc.volumes.get(fv.id).AndReturn(fv_ready)
self.m.ReplayAll()
stack = utils.parse_stack(self.t, stack_name=stack_name)
rsrc = self.create_volume(self.t, stack, 'volume')
props = copy.deepcopy(rsrc.properties.data)
props['read_only'] = True
after = rsrc_defn.ResourceDefinition(rsrc.name, rsrc.type(), props)
scheduler.TaskRunner(rsrc.update, after)()
self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)
def test_cinder_snapshot(self):
stack_name = 'test_cvolume_snpsht_stack'
cinder.CinderClientPlugin._create().MultipleTimes().AndReturn(
self.cinder_fc)
self.cinder_fc.volumes.create(
size=1, availability_zone=None,
description='test_description',
name='test_name'
).AndReturn(vt_base.FakeVolume('creating'))
fv = vt_base.FakeVolume('available')
self.cinder_fc.volumes.get(fv.id).AndReturn(fv)
fb = vt_base.FakeBackup('creating')
self.m.StubOutWithMock(self.cinder_fc.backups, 'create')
self.cinder_fc.backups.create(fv.id).AndReturn(fb)
self.m.StubOutWithMock(self.cinder_fc.backups, 'get')
self.cinder_fc.backups.get(fb.id).AndReturn(
vt_base.FakeBackup('available'))
self.m.ReplayAll()
t = template_format.parse(single_cinder_volume_template)
stack = utils.parse_stack(t, stack_name=stack_name)
rsrc = stack['volume']
scheduler.TaskRunner(rsrc.create)()
scheduler.TaskRunner(rsrc.snapshot)()
self.assertEqual((rsrc.SNAPSHOT, rsrc.COMPLETE), rsrc.state)
self.assertEqual({'backup_id': 'backup-123'},
resource_data_object.ResourceData.get_all(rsrc))
self.m.VerifyAll()
def test_cinder_snapshot_error(self):
stack_name = 'test_cvolume_snpsht_err_stack'
cinder.CinderClientPlugin._create().MultipleTimes().AndReturn(
self.cinder_fc)
self.cinder_fc.volumes.create(
size=1, availability_zone=None,
description='test_description',
name='test_name'
).AndReturn(vt_base.FakeVolume('creating'))
fv = vt_base.FakeVolume('available')
self.cinder_fc.volumes.get(fv.id).AndReturn(fv)
fb = vt_base.FakeBackup('creating')
self.m.StubOutWithMock(self.cinder_fc.backups, 'create')
self.cinder_fc.backups.create(fv.id).AndReturn(fb)
self.m.StubOutWithMock(self.cinder_fc.backups, 'get')
fail_reason = 'Could not determine which Swift endpoint to use'
self.cinder_fc.backups.get(fb.id).AndReturn(
vt_base.FakeBackup('error', fail_reason=fail_reason))
self.m.ReplayAll()
t = template_format.parse(single_cinder_volume_template)
stack = utils.parse_stack(t, stack_name=stack_name)
rsrc = stack['volume']
scheduler.TaskRunner(rsrc.create)()
self.assertRaises(exception.ResourceFailure,
scheduler.TaskRunner(rsrc.snapshot))
self.assertEqual((rsrc.SNAPSHOT, rsrc.FAILED), rsrc.state)
self.assertIn(fail_reason, rsrc.status_reason)
self.assertEqual({u'backup_id': u'backup-123'},
resource_data_object.ResourceData.get_all(rsrc))
self.m.VerifyAll()
def test_cinder_volume_attachment_update_device(self):
stack_name = 'test_cvolume_attach_udev_stack'
self._mock_create_volume(vt_base.FakeVolume('creating'), stack_name)
self._mock_create_server_volume_script(
vt_base.FakeVolume('attaching'))
self.stub_VolumeConstraint_validate()
# delete script
fva = vt_base.FakeVolume('in-use')
self.fc.volumes.get_server_volume(u'WikiDatabase',
'vol-123').AndReturn(fva)
self.cinder_fc.volumes.get(fva.id).AndReturn(fva)
self.fc.volumes.delete_server_volume(
'WikiDatabase', 'vol-123').MultipleTimes().AndReturn(None)
self.cinder_fc.volumes.get(fva.id).AndReturn(
vt_base.FakeVolume('available'))
self.fc.volumes.get_server_volume(u'WikiDatabase',
'vol-123').AndReturn(fva)
self.fc.volumes.get_server_volume(
u'WikiDatabase', 'vol-123').AndRaise(fakes_nova.fake_exception())
# attach script
self._mock_create_server_volume_script(vt_base.FakeVolume('attaching'),
device=u'/dev/vdd',
update=True)
self.m.ReplayAll()
stack = utils.parse_stack(self.t, stack_name=stack_name)
self.create_volume(self.t, stack, 'volume')
rsrc = self.create_attachment(self.t, stack, 'attachment')
props = copy.deepcopy(rsrc.properties.data)
props['mountpoint'] = '/dev/vdd'
props['volume_id'] = 'vol-123'
after = rsrc_defn.ResourceDefinition(rsrc.name, rsrc.type(), props)
scheduler.TaskRunner(rsrc.update, after)()
self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)
self.m.VerifyAll()
def test_cinder_volume_attachment_update_volume(self):
stack_name = 'test_cvolume_attach_uvol_stack'
self.stub_VolumeConstraint_validate()
self._mock_create_volume(vt_base.FakeVolume('creating'), stack_name)
fv2 = vt_base.FakeVolume('creating', id='vol-456')
vol2_name = utils.PhysName(stack_name, 'volume2')
self.cinder_fc.volumes.create(
size=2, availability_zone='nova',
description=None,
name=vol2_name).AndReturn(fv2)
self.cinder_fc.volumes.get(fv2.id).AndReturn(fv2)
fv2 = vt_base.FakeVolume('available', id=fv2.id)
self.cinder_fc.volumes.get(fv2.id).AndReturn(fv2)
self._mock_create_server_volume_script(vt_base.FakeVolume('attaching'))
# delete script
fva = vt_base.FakeVolume('in-use')
self.fc.volumes.get_server_volume(u'WikiDatabase',
'vol-123').AndReturn(fva)
self.cinder_fc.volumes.get(fva.id).AndReturn(fva)
self.fc.volumes.delete_server_volume(
'WikiDatabase', 'vol-123').MultipleTimes().AndReturn(None)
self.cinder_fc.volumes.get(fva.id).AndReturn(
vt_base.FakeVolume('available'))
self.fc.volumes.get_server_volume(u'WikiDatabase',
'vol-123').AndReturn(fva)
self.fc.volumes.get_server_volume(
u'WikiDatabase', 'vol-123').AndRaise(fakes_nova.fake_exception())
# attach script
fv2a = vt_base.FakeVolume('attaching', id='vol-456')
self._mock_create_server_volume_script(fv2a, volume='vol-456',
update=True)
self.m.ReplayAll()
stack = utils.parse_stack(self.t, stack_name=stack_name)
self.create_volume(self.t, stack, 'volume')
self.create_volume(self.t, stack, 'volume2')
rsrc = self.create_attachment(self.t, stack, 'attachment')
self.assertEqual((rsrc.CREATE, rsrc.COMPLETE), rsrc.state)
props = copy.deepcopy(rsrc.properties.data)
props['volume_id'] = 'vol-456'
after = rsrc_defn.ResourceDefinition(rsrc.name, rsrc.type(), props)
scheduler.TaskRunner(rsrc.update, after)()
self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)
self.assertEqual(fv2a.id, rsrc.resource_id)
self.m.VerifyAll()
def test_cinder_volume_attachment_update_server(self):
stack_name = 'test_cvolume_attach_usrv_stack'
self._mock_create_volume(vt_base.FakeVolume('creating'), stack_name)
self._mock_create_server_volume_script(
vt_base.FakeVolume('attaching'))
self.stub_VolumeConstraint_validate()
# delete script
fva = vt_base.FakeVolume('in-use')
self.fc.volumes.get_server_volume(u'WikiDatabase',
'vol-123').AndReturn(fva)
self.cinder_fc.volumes.get(fva.id).AndReturn(fva)
self.fc.volumes.delete_server_volume(
'WikiDatabase', 'vol-123').MultipleTimes().AndReturn(None)
self.cinder_fc.volumes.get(fva.id).AndReturn(
vt_base.FakeVolume('available'))
self.fc.volumes.get_server_volume(u'WikiDatabase',
'vol-123').AndReturn(fva)
self.fc.volumes.get_server_volume(
u'WikiDatabase', 'vol-123').AndRaise(fakes_nova.fake_exception())
# attach script
self._mock_create_server_volume_script(vt_base.FakeVolume('attaching'),
server=u'AnotherServer',
update=True)
self.m.ReplayAll()
stack = utils.parse_stack(self.t, stack_name=stack_name)
self.create_volume(self.t, stack, 'volume')
rsrc = self.create_attachment(self.t, stack, 'attachment')
self.assertEqual((rsrc.CREATE, rsrc.COMPLETE), rsrc.state)
props = copy.deepcopy(rsrc.properties.data)
props['instance_uuid'] = 'AnotherServer'
props['volume_id'] = 'vol-123'
after = rsrc_defn.ResourceDefinition(rsrc.name, rsrc.type(), props)
scheduler.TaskRunner(rsrc.update, after)()
self.assertEqual((rsrc.UPDATE, rsrc.COMPLETE), rsrc.state)
self.m.VerifyAll()
def test_cinder_create_with_scheduler_hints(self):
fv = vt_base.FakeVolume('creating')
cinder.CinderClientPlugin._create().AndReturn(self.cinder_fc)
self.cinder_fc.volumes.create(
size=1, name='test_name', description=None,
availability_zone='nova',
scheduler_hints={'hint1': 'good_advice'}).AndReturn(fv)
self.cinder_fc.volumes.get(fv.id).AndReturn(fv)
fv_ready = vt_base.FakeVolume('available', id=fv.id)
self.cinder_fc.volumes.get(fv.id).AndReturn(fv_ready)
self.m.ReplayAll()
stack_name = 'test_cvolume_scheduler_hints_stack'
stack = utils.parse_stack(self.t, stack_name=stack_name)
self.create_volume(self.t, stack, 'volume3')
self.m.VerifyAll()
def test_cinder_create_with_scheduler_hints_and_cinder_api_v1(self):
cinder.CinderClientPlugin._create().AndReturn(self.cinder_fc)
self.cinder_fc.volume_api_version = 1
self.m.ReplayAll()
stack_name = 'test_cvolume_scheduler_hints_api_v1_stack'
stack = utils.parse_stack(self.t, stack_name=stack_name)
ex = self.assertRaises(exception.StackValidationFailed,
self.create_volume, self.t, stack, 'volume3')
self.assertIn('Scheduler hints are not supported by the current '
'volume API.', six.text_type(ex))
self.m.VerifyAll()
def _test_cinder_create_invalid_property_combinations(
self, stack_name, combinations, err_msg, exc):
stack = utils.parse_stack(self.t, stack_name=stack_name)
vp = stack.t['Resources']['volume2']['Properties']
vp.pop('size')
vp.update(combinations)
rsrc = stack['volume2']
ex = self.assertRaises(exc, rsrc.validate)
self.assertEqual(err_msg, six.text_type(ex))
def test_cinder_create_with_image_and_imageRef(self):
stack_name = 'test_create_with_image_and_imageRef'
combinations = {'imageRef': 'image-456', 'image': 'image-123'}
err_msg = ("Cannot define the following properties at the same "
"time: image, imageRef.")
self.stub_ImageConstraint_validate()
self._test_cinder_create_invalid_property_combinations(
stack_name, combinations,
err_msg, exception.ResourcePropertyConflict)
def test_cinder_create_with_size_snapshot_and_image(self):
stack_name = 'test_create_with_size_snapshot_and_image'
combinations = {
'size': 1,
'image': 'image-123',
'snapshot_id': 'snapshot-123'}
self.stub_ImageConstraint_validate()
self.stub_SnapshotConstraint_validate()
err_msg = ('If "size" is provided, only one of "image", "imageRef", '
'"source_volid", "snapshot_id" can be specified, but '
'currently specified options: '
'[\'snapshot_id\', \'image\'].')
self._test_cinder_create_invalid_property_combinations(
stack_name, combinations,
err_msg, exception.StackValidationFailed)
def test_cinder_create_with_size_snapshot_and_imageRef(self):
stack_name = 'test_create_with_size_snapshot_and_imageRef'
combinations = {
'size': 1,
'imageRef': 'image-123',
'snapshot_id': 'snapshot-123'}
self.stub_ImageConstraint_validate()
self.stub_SnapshotConstraint_validate()
err_msg = ('If "size" is provided, only one of "image", "imageRef", '
'"source_volid", "snapshot_id" can be specified, but '
'currently specified options: '
'[\'snapshot_id\', \'imageRef\'].')
self._test_cinder_create_invalid_property_combinations(
stack_name, combinations,
err_msg, exception.StackValidationFailed)
def test_cinder_create_with_size_snapshot_and_sourcevol(self):
stack_name = 'test_create_with_size_snapshot_and_sourcevol'
combinations = {
'size': 1,
'source_volid': 'volume-123',
'snapshot_id': 'snapshot-123'}
self.stub_VolumeConstraint_validate()
self.stub_SnapshotConstraint_validate()
err_msg = ('If "size" is provided, only one of "image", "imageRef", '
'"source_volid", "snapshot_id" can be specified, but '
'currently specified options: '
'[\'snapshot_id\', \'source_volid\'].')
self._test_cinder_create_invalid_property_combinations(
stack_name, combinations,
err_msg, exception.StackValidationFailed)
def test_cinder_create_with_snapshot_and_source_volume(self):
stack_name = 'test_create_with_snapshot_and_source_volume'
combinations = {
'source_volid': 'source_volume-123',
'snapshot_id': 'snapshot-123'}
err_msg = ('If neither "backup_id" nor "size" is provided, one and '
'only one of "image", "imageRef", "source_volid", '
'"snapshot_id" must be specified, but currently '
'specified options: [\'snapshot_id\', \'source_volid\'].')
self.stub_VolumeConstraint_validate()
self.stub_SnapshotConstraint_validate()
self._test_cinder_create_invalid_property_combinations(
stack_name, combinations,
err_msg, exception.StackValidationFailed)
def test_cinder_create_with_image_and_source_volume(self):
stack_name = 'test_create_with_image_and_source_volume'
combinations = {
'source_volid': 'source_volume-123',
'image': 'image-123'}
err_msg = ('If neither "backup_id" nor "size" is provided, one and '
'only one of "image", "imageRef", "source_volid", '
'"snapshot_id" must be specified, but currently '
'specified options: [\'source_volid\', \'image\'].')
self.stub_VolumeConstraint_validate()
self.stub_ImageConstraint_validate()
self._test_cinder_create_invalid_property_combinations(
stack_name, combinations,
err_msg, exception.StackValidationFailed)
def test_cinder_create_no_size_no_combinations(self):
stack_name = 'test_create_no_size_no_options'
combinations = {}
err_msg = ('If neither "backup_id" nor "size" is provided, one and '
'only one of "image", "imageRef", "source_volid", '
'"snapshot_id" must be specified, but currently '
'specified options: [].')
self._test_cinder_create_invalid_property_combinations(
stack_name, combinations,
err_msg, exception.StackValidationFailed)
def test_volume_restore(self):
stack_name = 'test_cvolume_restore_stack'
# create script
cinder.CinderClientPlugin._create().MultipleTimes().AndReturn(
self.cinder_fc)
self.cinder_fc.volumes.create(
size=1, availability_zone=None,
description='test_description',
name='test_name'
).AndReturn(vt_base.FakeVolume('creating'))
fv = vt_base.FakeVolume('available')
self.cinder_fc.volumes.get(fv.id).AndReturn(fv)
# snapshot script
fb = vt_base.FakeBackup('creating')
self.m.StubOutWithMock(self.cinder_fc.backups, 'create')
self.cinder_fc.backups.create(fv.id).AndReturn(fb)
self.m.StubOutWithMock(self.cinder_fc.backups, 'get')
self.cinder_fc.backups.get(fb.id).AndReturn(
vt_base.FakeBackup('available'))
# restore script
fvbr = vt_base.FakeBackupRestore('vol-123')
self.m.StubOutWithMock(self.cinder_fc.restores, 'restore')
self.cinder_fc.restores.restore('backup-123').AndReturn(fvbr)
self.cinder_fc.volumes.get('vol-123').AndReturn(fv)
self.cinder_fc.volumes.update('vol-123',
description='test_description',
name='test_name')
self.cinder_fc.volumes.get('vol-123').AndReturn(fv)
self.m.ReplayAll()
t = template_format.parse(single_cinder_volume_template)
stack = utils.parse_stack(t, stack_name=stack_name)
scheduler.TaskRunner(stack.create)()
self.assertEqual((stack.CREATE, stack.COMPLETE), stack.state)
scheduler.TaskRunner(stack.snapshot)()
self.assertEqual((stack.SNAPSHOT, stack.COMPLETE), stack.state)
data = stack.prepare_abandon()
fake_snapshot = collections.namedtuple(
'Snapshot', ('data', 'stack_id'))(data, stack.id)
stack.restore(fake_snapshot)
self.assertEqual((stack.RESTORE, stack.COMPLETE), stack.state)
self.m.VerifyAll()
| apache-2.0 | -4,979,401,692,813,748,000 | 39.883365 | 79 | 0.607637 | false |
Birion/python-ffdl | pyffdl/core/app.py | 1 | 4385 | import shutil
from typing import List, Tuple, Optional
import attr
import click
from furl import furl # type: ignore
from pyffdl.__version__ import __version__
from pyffdl.sites import (
AdultFanFictionStory,
ArchiveOfOurOwnStory,
FanFictionNetStory,
HTMLStory,
TwistingTheHellmouthStory,
TGStorytimeStory,
)
from pyffdl.utilities import get_url_from_file, list2text
AVAILABLE_SITES = {
"fanfiction.net": FanFictionNetStory,
"fictionpress.com": FanFictionNetStory,
"adult-fanfiction.org": AdultFanFictionStory,
"archiveofourown.org": ArchiveOfOurOwnStory,
"tthfanfic.org": TwistingTheHellmouthStory,
"tgstorytime.com": TGStorytimeStory,
}
@attr.s()
class URL:
url: furl = attr.ib()
file: Optional[str] = attr.ib(default=None)
def download(urls: List[URL], verbose: bool = False, force: bool = False) -> None:
for url in urls:
if not url.url:
continue
try:
host = ".".join(url.url.host.split(".")[-2:])
site = AVAILABLE_SITES.get(host)
if not site:
click.echo(
f"{__file__} is currently only able to download from {list2text(list(AVAILABLE_SITES.keys()))}."
)
return
story = site.parse(url.url, verbose, force)
if url.file:
story.filename = url.file
story.run()
except AttributeError as e:
raise e
# print(e)
# error = "There were problems with parsing the URL."
# with open("pyffdl.log", "a") as fp:
# click.echo(error, file=fp)
# click.echo(error, err=True)
@click.group()
@click.version_option(version=__version__)
def cli() -> None:
pass
@cli.command( # noqa: unused-function
"download", help="Download a new fanfiction story."
)
@click.option(
"-f",
"--from",
"from_file",
type=click.File(),
help="Load a list of URLs from a plaintext file.",
)
@click.option("-v", "--verbose", is_flag=True)
@click.argument("url_list", nargs=-1)
def cli_download(
from_file: click.File, url_list: Tuple[str, ...], verbose: bool = False
) -> None:
urls = [URL(furl(x)) for x in url_list]
if from_file:
urls += [
URL(furl(x.strip("\n"))) for x in from_file.readlines() if not x.startswith("#")
]
download(urls, verbose)
@cli.command( # noqa: unused-function
"html", help="Download a single story, using a list of chapter URLs."
)
@click.option(
"-f",
"--from",
"from_file",
type=click.File(),
help="Load a list of URLs from a plaintext file.",
)
@click.option("-a", "--author", help="Name of the author", type=str, required=True)
@click.option("-t", "--title", help="Title of the story", type=str, required=True)
@click.option("-v", "--verbose", is_flag=True)
@click.argument("url_list", nargs=-1)
def cli_html(
from_file: click.File,
author: str,
title: str,
url_list: Tuple[str, ...],
verbose: bool = False,
):
urls = [URL(furl(x)) for x in url_list]
if from_file:
urls += [
URL(furl(x.strip("\n"))) for x in from_file.readlines() if not x.startswith("#")
]
if not urls:
click.echo("You must provide at least one URL to download.")
return
story = HTMLStory(
chapters=[x.url.tostr() for x in urls],
author=author,
title=title,
url=furl("http://httpbin.org/status/200"),
)
story.verbose = verbose
story.run()
@cli.command( # noqa: unused-function
"update", help="Update an existing .epub fanfiction file."
)
@click.option(
"-f",
"--force",
is_flag=True,
default=False,
help="Completely refresh the ebook file.",
)
@click.option(
"-b", "--backup", is_flag=True, default=False, help="Backup the original file."
)
@click.option("-v", "--verbose", is_flag=True)
@click.argument("filenames", type=click.Path(dir_okay=False, exists=True), nargs=-1)
def cli_update(
force: bool, backup: bool, filenames: List[click.Path], verbose: bool = False
) -> None:
if backup:
for filename in filenames:
shutil.copy(f"{filename}", f"{filename}.bck")
stories = [
URL(get_url_from_file(x), str(x) if not force else None) for x in filenames
]
download(stories, verbose, force)
| mit | 876,202,144,005,720,600 | 27.848684 | 116 | 0.601596 | false |
LabKey/labkey-api-python | samples/query_examples.py | 1 | 9462 | #
# Copyright (c) 2015-2018 LabKey Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Examples using the Query.py API
Sample data from the New Study tutorial on labkey.org:
https://www.labkey.org/Documentation/wiki-page.view?name=studySetupManual
"""
from labkey.api_wrapper import APIWrapper
from labkey.exceptions import (
RequestError,
QueryNotFoundError,
ServerContextError,
ServerNotFoundError,
)
from labkey.query import Pagination, QueryFilter
from requests.exceptions import Timeout
import copy
print("Create a server context")
labkey_server = "localhost:8080"
project_name = "ModuleAssayTest" # Project folder name
context_path = "labkey"
api = APIWrapper(labkey_server, project_name, context_path, use_ssl=False)
schema = "lists"
table = "Demographics"
column1 = "Group Assignment"
column2 = "Participant ID"
###################
# Test basic select_rows
###################
result = api.query.select_rows(schema, table)
if result is not None:
print(result["rows"][0])
print("select_rows: There are " + str(result["rowCount"]) + " rows.")
else:
print("select_rows: Failed to load results from " + schema + "." + table)
###################
# Test error handling
###################
# catch base error
try:
result = api.query.select_rows(schema, "badtable")
print(result)
except RequestError:
print("Caught base error")
# catch table not found error
try:
result = api.query.select_rows(schema, "badtable")
print(result)
except QueryNotFoundError:
print("Caught bad table")
# catch schema error
try:
result = api.query.select_rows("badSchema", table)
print(result)
except QueryNotFoundError:
print("Caught bad schema")
# catch SSL error
ssl_api = APIWrapper(labkey_server, project_name, context_path, use_ssl=True)
try:
result = ssl_api.query.select_rows(schema, table)
print(result)
except ServerContextError:
print("Caught SSL Error")
# catch bad context path
bad_api = APIWrapper(labkey_server, project_name, "", use_ssl=False)
try:
result = bad_api.query.select_rows(schema, table)
print(result)
except ServerNotFoundError:
print("Caught context path")
# catch bad folder path error
bad_api = APIWrapper(labkey_server, "bad_project_name", context_path, use_ssl=False)
try:
result = bad_api.query.select_rows(schema, table)
print(result)
except ServerNotFoundError:
print("Caught bad folder name")
###################
# Test some parameters of select_rows
###################
result = api.query.select_rows(
schema,
table,
max_rows=5,
offset=10,
include_total_count=True,
include_details_column=True,
include_update_column=True,
)
if result is not None:
print("select_rows: There are " + str(len(result["rows"])) + " rows.")
print("select_rows: There are " + str(result["rowCount"]) + " total rows.")
print("select_rows: Response API version [" + str(result["formatVersion"]) + "].")
column_statement = "select_rows: Included columns: "
for column in result["columnModel"]:
column_statement = column_statement + " " + column["header"] + ", "
print(column_statement)
row = result["rows"][0]
dataIndex = result["metaData"]["id"]
print("select_rows: The first row Key is: " + str(row[dataIndex]))
else:
print("select_rows: Failed to load results from " + schema + "." + table)
###################
# Test get all results
###################
result = api.query.select_rows(schema, table, show_rows=Pagination.ALL, include_total_count=True)
if result is not None:
print("select_rows: There are " + str(len(result["rows"])) + " rows.")
print("select_rows: There are " + str(result["rowCount"]) + " total rows.")
else:
print("select_rows: Failed to load results from " + schema + "." + table)
###################
# Test sort and select columns
###################
result = api.query.select_rows(
schema,
table,
max_rows=5,
offset=10,
include_total_count=False,
columns=",".join([column1, column2]),
sort=column1 + ", -" + column2,
) # use '-' to sort descending
if result is not None:
print("select_rows: There are " + str(result["rowCount"]) + " rows.")
print("select_rows: " + table)
for row in result["rows"]:
print("\t" + str(row[column1]) + ", " + str(row[column2]))
else:
print("select_rows: Failed to load results from " + schema + "." + table)
###################
# Test basic filters
###################
filters = [
QueryFilter(column1, "Group 2: HIV-1 Negative"),
QueryFilter("Height (inches)", "50, 70", QueryFilter.Types.BETWEEN),
QueryFilter("Country", "Germany;Uganda", QueryFilter.Types.IN),
]
result = api.query.select_rows(schema, table, filter_array=filters)
if result is not None:
print("select_rows: There are " + str(result["rowCount"]) + " rows.")
else:
print("select_rows: Failed to load results from " + schema + "." + table)
###################
# Test update_rows
###################
rows = result["rows"]
test_row_idx = 1
original_value = rows[test_row_idx]
column3 = "Country"
test_row = {"Key": original_value["Key"], column3: "Pangea"}
print("update_rows: original value [ " + original_value[column3] + " ]")
update_result = api.query.update_rows(schema, table, [test_row])
print("update_rows: updated value [ " + update_result["rows"][0][column3] + " ]")
update_result = api.query.update_rows(schema, table, [original_value])
print("update_rows: reset value [ " + update_result["rows"][0][column3] + " ]")
###################
# Test insert_rows & delete_rows
###################
test_row = copy.copy(original_value)
test_row["Key"] = None
test_row["Country"] = "Antarctica"
all_rows = api.query.select_rows(schema, table)
print("Insert Rows: Initials row count [ " + str(all_rows["rowCount"]) + " ]")
insert_result = api.query.select_rows(schema, table, [test_row])
print("Insert Rows: New rowId [ " + str(insert_result["rows"][0]["Key"]) + " ]")
all_rows = api.query.select_rows(schema, table)
print("Insert Rows: after row count [ " + str(all_rows["rowCount"]) + " ]")
test_row = insert_result["rows"][0]
deleteResult = api.query.delete_rows(schema, table, [test_row])
print("Delete Rows: deleted rowId [ " + str(deleteResult["rows"][0]["Key"]) + " ]")
all_rows = api.query.select_rows(schema, table)
print("Delete Rows: after row count [ " + str(all_rows["rowCount"]) + " ]")
###################
# Test truncate_table
###################
truncate_info = api.query.truncate_table(schema, table)
print("Delete all rows in table: [ " + str(truncate_info["deletedRows"]) + " ] rows deleted")
###################
# Test execute_sql
###################
sql = "select * from lists.demographics"
# base execute_sql
sql_result = api.query.execute_sql(schema, sql)
if sql_result is not None:
print("execute_sql: There are " + str(sql_result["rowCount"]) + " rows.")
else:
print("execute_sql: Failed to load results from " + schema + "." + table)
# paging
sql_result = api.query.execute_sql(
schema, sql, max_rows=5, offset=10, sort=(column1 + ", -" + column2)
)
if sql_result is not None:
print("execute_sql: There are " + str(len(sql_result["rows"])) + " rows.")
print("execute_sql: There are " + str(sql_result["rowCount"]) + " total rows.")
print("execute_sql: " + table)
for row in sql_result["rows"]:
print("\t" + str(row[column1]) + ", " + str(row[column2]))
else:
print("execute_sql: Failed to load results from " + schema + "." + table)
# Save query within the session
sql_result = api.query.execute_sql(schema, sql, max_rows=5, offset=10, save_in_session=True)
print("execute_sql: query saved as [ " + sql_result["queryName"] + " ]")
# set timeout
try:
sql_result = api.query.execute_sql(schema, sql, timeout=0.001)
print("execute_sql did not timeout")
except Timeout:
print("Caught Timeout")
###################
# Test QC State Definitions
###################
# Create new QC state definitions
qc_states = [
{
"label": "needs verification",
"description": "please look at this",
"publicData": False,
},
{"label": "approved", "publicData": True},
]
result = api.query.insert_rows("core", "qcstate", qc_states)
for row in result["rows"]:
print("Created QC state: " + row["label"])
result = api.query.select_rows("core", "qcstate")
# Update a QC state definitions
original_value = result["rows"][1]
test_row = {"RowId": original_value["RowId"], "label": "Updated Label"}
update_result = api.query.update_rows("core", "qcstate", [test_row])
print("Updated label: approved -> " + update_result["rows"][0]["label"])
# Delete all unused QC state definitions
result = api.query.select_rows("core", "qcstate")
for row in result["rows"]:
print("Deleting QC state: " + row["Label"])
try:
api.query.delete_rows("core", "qcstate", [row])
except ServerContextError as e:
print(e.message)
| apache-2.0 | -4,380,558,403,375,946,000 | 29.921569 | 97 | 0.643733 | false |
FutureSharks/invokust | invokr.py | 1 | 4806 | #!/usr/bin/env python3
import argparse
import logging
import sys
import json
from invokust.aws_lambda import LambdaLoadTest, results_aggregator
def print_stat(type, name, req_count, median, avg, min, max, rps):
return "%-7s %-50s %10s %9s %9s %9s %9s %10s" % (
type,
name,
req_count,
median,
avg,
min,
max,
rps,
)
def parse_arguments():
p = argparse.ArgumentParser(
description="Runs a Locust load tests on AWS Lambda in parallel"
)
p.add_argument("-n", "--function_name", help="Lambda function name", required=True)
p.add_argument("-f", "--locust_file", help="Locust file", required=True)
p.add_argument("-o", "--locust_host", help="Locust host", required=True)
p.add_argument(
"-u", "--locust_users", help="Number of Locust users", default=20, type=int
)
p.add_argument(
"-r", "--ramp_time", help="Ramp up time (seconds)", default=0, type=int
)
p.add_argument(
"-t", "--threads", help="Threads to run in parallel", default=1, type=int
)
p.add_argument(
"-l", "--time_limit", help="Time limit for run time (seconds)", type=int
)
return p.parse_args()
def print_stats_exit(load_test_state):
summ_stats = load_test_state.get_summary_stats()
agg_results = results_aggregator(load_test_state.get_locust_results())
agg_results["request_fail_ratio"] = summ_stats["request_fail_ratio"]
agg_results["invocation_error_ratio"] = summ_stats["invocation_error_ratio"]
agg_results["locust_settings"] = load_test_state.lambda_payload
agg_results["lambda_function_name"] = load_test_state.lambda_function_name
agg_results["threads"] = load_test_state.threads
agg_results["ramp_time"] = load_test_state.ramp_time
agg_results["time_limit"] = load_test_state.time_limit
logging.info("Aggregated results: {0}".format(json.dumps(agg_results)))
logging.info(
"\n============================================================"
f"\nRamp up time: {agg_results['ramp_time']}s"
f"\nStarted ramp down after {agg_results['time_limit']}s (time_limit)"
f"\nThread count: {agg_results['threads']}"
f"\nLambda invocation count: {agg_results['lambda_invocations']}"
f"\nLambda invocation error ratio: {agg_results['invocation_error_ratio']}"
f"\nCumulative lambda execution time: {agg_results['total_lambda_execution_time']}ms"
f"\nTotal requests sent: {agg_results['num_requests']}"
f"\nTotal requests failed: {agg_results['num_requests_fail']}"
f"\nTotal request failure ratio: {agg_results['request_fail_ratio']}\n"
)
logging.info(
"==========================================================================================================================="
)
logging.info(
print_stat(
"TYPE", "NAME", "#REQUESTS", "MEDIAN", "AVERAGE", "MIN", "MAX", "#REQS/SEC"
)
)
logging.info(
"==========================================================================================================================="
)
reqs = agg_results["requests"]
for k in reqs.keys():
k_arr = k.split("_")
type = k_arr[0]
del k_arr[0]
name = "_".join(k_arr)
logging.info(
print_stat(
type,
name,
reqs[k]["num_requests"],
round(reqs[k]["median_response_time"], 2),
round(reqs[k]["avg_response_time"], 2),
round(reqs[k]["min_response_time"], 2),
round(reqs[k]["max_response_time"], 2),
round(reqs[k]["total_rps"], 2),
)
)
logging.info("Exiting...")
sys.exit(0)
if __name__ == "__main__":
args = parse_arguments()
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s %(levelname)-6s %(threadName)-11s %(message)s",
)
# AWS Lambda has a maximum execution time ("timeout"). We limit the execution time to 3 minutes if the overall
# load test time is longer, to make sure the lambda will not exceed the timeout.
lambda_runtime = f"{args.time_limit}s" if args.time_limit < 180 else "3m"
lambda_payload = {
"locustfile": args.locust_file,
"host": args.locust_host,
"num_users": args.locust_users,
"spawn_rate": 10,
"run_time": lambda_runtime,
}
load_test_state = LambdaLoadTest(
args.function_name,
args.threads,
args.ramp_time,
args.time_limit,
lambda_payload,
)
try:
load_test_state.run()
except KeyboardInterrupt:
print_stats_exit(load_test_state)
else:
print_stats_exit(load_test_state)
| mit | 2,701,416,338,839,517,000 | 33.826087 | 133 | 0.550146 | false |
nikha1/nyc-taxi | Tools/PostgresRDF/utils/postgresInterface.py | 1 | 3639 | from dateutil import parser
from rdflib import Graph, Literal, BNode, Namespace, RDF, XSD, URIRef
import queries
global SSN
global GEO
global DUL
# SSN Namespace
SSN = Namespace('https://www.w3.org/2005/Incubator/ssn/ssnx/ssn#')
# Geo Namespace
GEO = Namespace('http://www.w3.org/2003/01/geo/wgs84_pos#')
# DUL Namespace
DUL = Namespace('http://www.ontologydesignpatterns.org/ont/dul/DUL.owl#')
def addToGraph(event, graphURI = "http://example.com/g1", db_conf={"dbname" : "postgres", "user" : "postgres", "password" : "admin" }):
#configString = ("dbname=postgres user=waccess password=write")
configString = ("dbname=" + db_conf['dbname'] + " user="+ db_conf['user'] + " password=" + db_conf['password'])
#print configString
graph = Graph('PostgreSQL', identifier=URIRef(graphURI))
graph.open(configString, create=False)
graph.bind('ssn', SSN)
graph.bind('geo', GEO)
graph.bind('dul', DUL)
observation = BNode();
oTime = BNode();
# Observation
graph.add((observation, RDF.type, SSN.Observation))
graph.add((oTime, RDF.type, DUL.TimeInterval))
graph.add((observation, SSN.observationSamplingTime, oTime))
# Time
date = parser.parse(event['pickup_datetime'])
t = Literal(date.strftime("%Y-%m-%dT%H:%M:%S"), datatype=XSD.dateTime)
graph.add((oTime, DUL.hasRegionDataValue, t))
# SensorOutput
sensorOutput = BNode();
graph.add((sensorOutput, RDF.type, SSN.SensorOutput))
graph.add((observation, SSN.observationResult, sensorOutput))
# ObservationValue
observationValue = BNode()
startLocation = BNode()
endLocation = BNode()
graph.add((observationValue, RDF.type, SSN.ObservationValue))
graph.add((sensorOutput, SSN.hasValue, observationValue))
# Start and End Location
graph.add((observationValue, SSN.hasStartLocation, startLocation))
graph.add((observationValue, SSN.hasEndLocation, endLocation))
graph.add((startLocation, RDF.type, GEO.location))
graph.add((endLocation, RDF.type, GEO.location))
# Start Location
lat = Literal(event['pickup_latitude'], datatype=XSD.float)
long = Literal(event['pickup_longitude'], datatype=XSD.float)
# Adding the start location
graph.add((startLocation, GEO.lat, lat))
graph.add((startLocation, GEO.long, long))
# End Location
lat = Literal(event['dropoff_latitude'], datatype=XSD.float)
long = Literal(event['dropoff_longitude'], datatype=XSD.float)
# Adding the start location
graph.add((endLocation, GEO.lat, lat))
graph.add((endLocation, GEO.long, long))
#Duration
date1 = parser.parse(event['dropoff_datetime'])
date2 = parser.parse(event['pickup_datetime'])
dur = date1 - date2
duration = Literal(str(dur), datatype=XSD.float)
graph.add((observation, SSN.hasDuration, duration))
#print str(graph.__len__() / 11)
#Commit and close the graph
graph.commit()
graph.close()
def removeFromGraph(timestamp, graphURI = "http://example.com/g1", db_conf={"dbname" : "postgres", "user" : "postgres", "password" : "admin" }):
configString = ("dbname=postgres user=waccess password=write")
#configString = ("dbname=" + db_conf['dbname'] + "user="+ db_conf['user'] + " password=" + db_conf['password'])
graph = Graph('PostgreSQL', identifier=URIRef(graphURI))
graph.open(configString, create=False)
results = graph.query(queries.getEvents(timestamp))
print len(results)
for result in results:
for node in result:
graph.remove((node, None, None))
# Commit and close the graph
graph.commit()
graph.close()
| mit | 2,477,461,874,589,206,500 | 33.009346 | 144 | 0.676834 | false |
woutersmet/Zeosummer | lib/zeobuilder/nodes/vector.py | 1 | 6052 | # Zeobuilder is an extensible GUI-toolkit for molecular model construction.
# Copyright (C) 2007 - 2009 Toon Verstraelen <Toon.Verstraelen@UGent.be>, Center
# for Molecular Modeling (CMM), Ghent University, Ghent, Belgium; all rights
# reserved unless otherwise stated.
#
# This file is part of Zeobuilder.
#
# Zeobuilder is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# In addition to the regulations of the GNU General Public License,
# publications and communications based in parts on this program or on
# parts of this program are required to cite the following article:
#
# "ZEOBUILDER: a GUI toolkit for the construction of complex molecules on the
# nanoscale with building blocks", Toon Verstraelen, Veronique Van Speybroeck
# and Michel Waroquier, Journal of Chemical Information and Modeling, Vol. 48
# (7), 1530-1541, 2008
# DOI:10.1021/ci8000748
#
# Zeobuilder is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>
#
# --
from zeobuilder import context
from zeobuilder.nodes.reference import SpatialReference
from zeobuilder.nodes.elementary import GLReferentBase
from zeobuilder.nodes.color_mixin import ColorMixin
from zeobuilder.gui.fields_dialogs import DialogFieldInfo
import zeobuilder.gui.fields as fields
from molmod.transformations import Complete
import numpy
import math
__all__ = ["Vector"]
class Vector(GLReferentBase):
#
# State
#
def initnonstate(self):
GLReferentBase.initnonstate(self)
self.orientation = Complete()
self.set_children([
SpatialReference(prefix="Begin"),
SpatialReference(prefix="End")
])
#
# Dialog fields (see action EditProperties)
#
dialog_fields = set([
DialogFieldInfo("Basic", (0, 2), fields.read.VectorLength(
label_text="Vector length"
)),
])
#
# Draw
#
def draw(self):
self.calc_vector_dimensions()
context.application.vis_backend.transform(self.orientation)
#
# Revalidation
#
def revalidate_total_list(self):
if self.gl_active:
vb = context.application.vis_backend
vb.begin_list(self.total_list)
if self.visible:
vb.push_name(self.draw_list)
vb.push_matrix()
self.draw_selection()
vb.call_list(self.draw_list)
vb.pop_matrix()
vb.pop_name()
vb.end_list()
self.total_list_valid = True
def revalidate_draw_list(self):
if self.gl_active:
GLReferentBase.revalidate_draw_list(self)
def revalidate_boundingbox_list(self):
if self.gl_active:
vb = context.application.vis_backend
#print "Compiling selection list (" + str(self.boundingbox_list) + "): " + str(self.name)
vb.begin_list(self.boundingbox_list)
vb.push_matrix()
vb.transform(self.orientation)
self.revalidate_bounding_box()
self.bounding_box.draw()
vb.pop_matrix()
vb.end_list()
self.boundingbox_list_valid = True
#
# Frame
#
def get_bounding_box_in_parent_frame(self):
return self.bounding_box.transformed(self.orientation)
#
# Vector
#
def shortest_vector_relative_to(self, other):
b = self.children[0].translation_relative_to(other)
e = self.children[1].translation_relative_to(other)
if (b is None) or (e is None):
return None
else:
return self.parent.shortest_vector(e - b)
def calc_vector_dimensions(self):
relative_translation = self.shortest_vector_relative_to(self.parent)
if relative_translation is None:
self.length = 0
else:
self.length = math.sqrt(numpy.dot(relative_translation, relative_translation))
if self.length > 0:
self.orientation.t = self.children[0].translation_relative_to(self.parent)
#axis = numpy.cross(relative_translation, numpy.array([1.0, 0.0, 0.0]))
c = relative_translation[2] / self.length
if c >= 1.0:
self.orientation.set_rotation_properties(0, numpy.array([1.0, 0.0, 0.0]), False)
elif c <= -1.0:
self.orientation.set_rotation_properties(math.pi, numpy.array([1.0, 0.0, 0.0]), False)
else:
x, y = relative_translation[0], relative_translation[1]
if abs(x) < abs(y):
signy = {True: 1, False: -1}[y >= 0]
a = -signy
b = signy * x / y
else:
signx = {True: 1, False: -1}[x >= 0]
a = -signx * y / x
b = signx
self.orientation.set_rotation_properties(math.acos(c), numpy.array([a, b, 0.0]), False)
def define_target(self, reference, new_target):
GLReferentBase.define_target(self, reference, new_target)
self.invalidate_boundingbox_list()
self.invalidate_draw_list()
def target_moved(self, reference, target):
GLReferentBase.target_moved(self, reference, target)
self.invalidate_boundingbox_list()
self.invalidate_draw_list()
def get_neighbor(self, one_target):
if self.children[0].target == one_target:
return self.children[1].target
else:
return self.children[0].target
| gpl-3.0 | -3,980,150,933,196,877,000 | 32.810056 | 107 | 0.61649 | false |
cwisecarver/osf.io | website/search/elastic_search.py | 1 | 25375 |
# -*- coding: utf-8 -*-
from __future__ import division
import copy
import functools
import logging
import math
import re
import unicodedata
from framework import sentry
import six
from django.apps import apps
from django.core.paginator import Paginator
from elasticsearch import (ConnectionError, Elasticsearch, NotFoundError,
RequestError, TransportError, helpers)
from framework.celery_tasks import app as celery_app
from framework.mongo.utils import paginated
from modularodm import Q
from osf.models import AbstractNode as Node
from osf.models import OSFUser as User
from osf.models import BaseFileNode
from osf.models import Institution
from website import settings
from website.filters import gravatar
from osf.models.licenses import serialize_node_license_record
from website.search import exceptions
from website.search.util import build_query, clean_splitters
from website.util import sanitize
from website.views import validate_page_num
logger = logging.getLogger(__name__)
# These are the doc_types that exist in the search database
ALIASES = {
'project': 'Projects',
'component': 'Components',
'registration': 'Registrations',
'user': 'Users',
'total': 'Total',
'file': 'Files',
'institution': 'Institutions',
}
DOC_TYPE_TO_MODEL = {
'component': Node,
'project': Node,
'registration': Node,
'user': User,
'file': BaseFileNode,
'institution': Institution
}
# Prevent tokenizing and stop word removal.
NOT_ANALYZED_PROPERTY = {'type': 'string', 'index': 'not_analyzed'}
# Perform stemming on the field it's applied to.
ENGLISH_ANALYZER_PROPERTY = {'type': 'string', 'analyzer': 'english'}
INDEX = settings.ELASTIC_INDEX
CLIENT = None
def client():
global CLIENT
if CLIENT is None:
try:
CLIENT = Elasticsearch(
settings.ELASTIC_URI,
request_timeout=settings.ELASTIC_TIMEOUT,
retry_on_timeout=True
)
logging.getLogger('elasticsearch').setLevel(logging.WARN)
logging.getLogger('elasticsearch.trace').setLevel(logging.WARN)
logging.getLogger('urllib3').setLevel(logging.WARN)
logging.getLogger('requests').setLevel(logging.WARN)
CLIENT.cluster.health(wait_for_status='yellow')
except ConnectionError:
message = (
'The SEARCH_ENGINE setting is set to "elastic", but there '
'was a problem starting the elasticsearch interface. Is '
'elasticsearch running?'
)
if settings.SENTRY_DSN:
try:
sentry.log_exception()
sentry.log_message(message)
except AssertionError: # App has not yet been initialized
logger.exception(message)
else:
logger.error(message)
exit(1)
return CLIENT
def requires_search(func):
def wrapped(*args, **kwargs):
if client() is not None:
try:
return func(*args, **kwargs)
except ConnectionError:
raise exceptions.SearchUnavailableError('Could not connect to elasticsearch')
except NotFoundError as e:
raise exceptions.IndexNotFoundError(e.error)
except RequestError as e:
if 'ParseException' in e.error: # ES 1.5
raise exceptions.MalformedQueryError(e.error)
if type(e.error) == dict: # ES 2.0
try:
root_cause = e.error['root_cause'][0]
if root_cause['type'] == 'query_parsing_exception':
raise exceptions.MalformedQueryError(root_cause['reason'])
except (AttributeError, KeyError):
pass
raise exceptions.SearchException(e.error)
except TransportError as e:
# Catch and wrap generic uncaught ES error codes. TODO: Improve fix for https://openscience.atlassian.net/browse/OSF-4538
raise exceptions.SearchException(e.error)
sentry.log_message('Elastic search action failed. Is elasticsearch running?')
raise exceptions.SearchUnavailableError('Failed to connect to elasticsearch')
return wrapped
@requires_search
def get_aggregations(query, doc_type):
query['aggregations'] = {
'licenses': {
'terms': {
'field': 'license.id'
}
}
}
res = client().search(index=INDEX, doc_type=doc_type, search_type='count', body=query)
ret = {
doc_type: {
item['key']: item['doc_count']
for item in agg['buckets']
}
for doc_type, agg in res['aggregations'].iteritems()
}
ret['total'] = res['hits']['total']
return ret
@requires_search
def get_counts(count_query, clean=True):
count_query['aggregations'] = {
'counts': {
'terms': {
'field': '_type',
}
}
}
res = client().search(index=INDEX, doc_type=None, search_type='count', body=count_query)
counts = {x['key']: x['doc_count'] for x in res['aggregations']['counts']['buckets'] if x['key'] in ALIASES.keys()}
counts['total'] = sum([val for val in counts.values()])
return counts
@requires_search
def get_tags(query, index):
query['aggregations'] = {
'tag_cloud': {
'terms': {'field': 'tags'}
}
}
results = client().search(index=index, doc_type=None, body=query)
tags = results['aggregations']['tag_cloud']['buckets']
return tags
@requires_search
def search(query, index=None, doc_type='_all', raw=False):
"""Search for a query
:param query: The substring of the username/project name/tag to search for
:param index:
:param doc_type:
:return: List of dictionaries, each containing the results, counts, tags and typeAliases
results: All results returned by the query, that are within the index and search type
counts: A dictionary in which keys are types and values are counts for that type, e.g, count['total'] is the sum of the other counts
tags: A list of tags that are returned by the search query
typeAliases: the doc_types that exist in the search database
"""
index = index or INDEX
tag_query = copy.deepcopy(query)
aggs_query = copy.deepcopy(query)
count_query = copy.deepcopy(query)
for key in ['from', 'size', 'sort']:
try:
del tag_query[key]
del aggs_query[key]
del count_query[key]
except KeyError:
pass
tags = get_tags(tag_query, index)
try:
del aggs_query['query']['filtered']['filter']
del count_query['query']['filtered']['filter']
except KeyError:
pass
aggregations = get_aggregations(aggs_query, doc_type=doc_type)
counts = get_counts(count_query, index)
# Run the real query and get the results
raw_results = client().search(index=index, doc_type=doc_type, body=query)
results = [hit['_source'] for hit in raw_results['hits']['hits']]
return_value = {
'results': raw_results['hits']['hits'] if raw else format_results(results),
'counts': counts,
'aggs': aggregations,
'tags': tags,
'typeAliases': ALIASES
}
return return_value
def format_results(results):
ret = []
for result in results:
if result.get('category') == 'user':
result['url'] = '/profile/' + result['id']
elif result.get('category') == 'file':
parent_info = load_parent(result.get('parent_id'))
result['parent_url'] = parent_info.get('url') if parent_info else None
result['parent_title'] = parent_info.get('title') if parent_info else None
elif result.get('category') in {'project', 'component', 'registration'}:
result = format_result(result, result.get('parent_id'))
elif not result.get('category'):
continue
ret.append(result)
return ret
def format_result(result, parent_id=None):
parent_info = load_parent(parent_id)
formatted_result = {
'contributors': result['contributors'],
'wiki_link': result['url'] + 'wiki/',
# TODO: Remove unescape_entities when mako html safe comes in
'title': sanitize.unescape_entities(result['title']),
'url': result['url'],
'is_component': False if parent_info is None else True,
'parent_title': sanitize.unescape_entities(parent_info.get('title')) if parent_info else None,
'parent_url': parent_info.get('url') if parent_info is not None else None,
'tags': result['tags'],
'is_registration': (result['is_registration'] if parent_info is None
else parent_info.get('is_registration')),
'is_retracted': result['is_retracted'],
'is_pending_retraction': result['is_pending_retraction'],
'embargo_end_date': result['embargo_end_date'],
'is_pending_embargo': result['is_pending_embargo'],
'description': result['description'],
'category': result.get('category'),
'date_created': result.get('date_created'),
'date_registered': result.get('registered_date'),
'n_wikis': len(result['wikis']),
'license': result.get('license'),
'affiliated_institutions': result.get('affiliated_institutions'),
}
return formatted_result
def load_parent(parent_id):
parent = Node.load(parent_id)
if parent is None:
return None
parent_info = {}
if parent is not None and parent.is_public:
parent_info['title'] = parent.title
parent_info['url'] = parent.url
parent_info['is_registration'] = parent.is_registration
parent_info['id'] = parent._id
else:
parent_info['title'] = '-- private project --'
parent_info['url'] = ''
parent_info['is_registration'] = None
parent_info['id'] = None
return parent_info
COMPONENT_CATEGORIES = set(settings.NODE_CATEGORY_MAP.keys())
def get_doctype_from_node(node):
if node.is_registration:
return 'registration'
elif node.parent_node is None:
# ElasticSearch categorizes top-level projects differently than children
return 'project'
elif node.category in COMPONENT_CATEGORIES:
return 'component'
else:
return node.category
@celery_app.task(bind=True, max_retries=5, default_retry_delay=60)
def update_node_async(self, node_id, index=None, bulk=False):
AbstractNode = apps.get_model('osf.AbstractNode')
node = AbstractNode.load(node_id)
try:
update_node(node=node, index=index, bulk=bulk, async=True)
except Exception as exc:
self.retry(exc=exc)
@celery_app.task(bind=True, max_retries=5, default_retry_delay=60)
def update_user_async(self, user_id, index=None):
OSFUser = apps.get_model('osf.OSFUser')
user = OSFUser.objects.get(id=user_id)
try:
update_user(user, index)
except Exception as exc:
self.retry(exc)
def serialize_node(node, category):
NodeWikiPage = apps.get_model('addons_wiki.NodeWikiPage')
elastic_document = {}
parent_id = node.parent_id
try:
normalized_title = six.u(node.title)
except TypeError:
normalized_title = node.title
normalized_title = unicodedata.normalize('NFKD', normalized_title).encode('ascii', 'ignore')
elastic_document = {
'id': node._id,
'contributors': [
{
'fullname': x['fullname'],
'url': '/{}/'.format(x['guids___id']) if x['is_active'] else None
}
for x in node._contributors.filter(contributor__visible=True).order_by('contributor___order')
.values('fullname', 'guids___id', 'is_active')
],
'title': node.title,
'normalized_title': normalized_title,
'category': category,
'public': node.is_public,
'tags': list(node.tags.filter(system=False).values_list('name', flat=True)),
'description': node.description,
'url': node.url,
'is_registration': node.is_registration,
'is_pending_registration': node.is_pending_registration,
'is_retracted': node.is_retracted,
'is_pending_retraction': node.is_pending_retraction,
'embargo_end_date': node.embargo_end_date.strftime('%A, %b. %d, %Y') if node.embargo_end_date else False,
'is_pending_embargo': node.is_pending_embargo,
'registered_date': node.registered_date,
'wikis': {},
'parent_id': parent_id,
'date_created': node.date_created,
'license': serialize_node_license_record(node.license),
'affiliated_institutions': list(node.affiliated_institutions.values_list('name', flat=True)),
'boost': int(not node.is_registration) + 1, # This is for making registered projects less relevant
'extra_search_terms': clean_splitters(node.title),
}
if not node.is_retracted:
for wiki in NodeWikiPage.objects.filter(guids___id__in=node.wiki_pages_current.values()):
# '.' is not allowed in field names in ES2
elastic_document['wikis'][wiki.page_name.replace('.', ' ')] = wiki.raw_text(node)
return elastic_document
@requires_search
def update_node(node, index=None, bulk=False, async=False):
from addons.osfstorage.models import OsfStorageFile
index = index or INDEX
for file_ in paginated(OsfStorageFile, Q('node', 'eq', node)):
update_file(file_, index=index)
if node.is_deleted or not node.is_public or node.archiving or (node.is_spammy and settings.SPAM_FLAGGED_REMOVE_FROM_SEARCH):
delete_doc(node._id, node, index=index)
else:
category = get_doctype_from_node(node)
elastic_document = serialize_node(node, category)
if bulk:
return elastic_document
else:
client().index(index=index, doc_type=category, id=node._id, body=elastic_document, refresh=True)
def bulk_update_nodes(serialize, nodes, index=None):
"""Updates the list of input projects
:param function Node-> dict serialize:
:param Node[] nodes: Projects, components or registrations
:param str index: Index of the nodes
:return:
"""
index = index or INDEX
actions = []
for node in nodes:
serialized = serialize(node)
if serialized:
actions.append({
'_op_type': 'update',
'_index': index,
'_id': node._id,
'_type': get_doctype_from_node(node),
'doc': serialized,
'doc_as_upsert': True,
})
if actions:
return helpers.bulk(client(), actions)
def serialize_contributors(node):
return {
'contributors': [
{
'fullname': x['user__fullname'],
'url': '/{}/'.format(x['user__guids___id'])
} for x in
node.contributor_set.filter(visible=True, user__is_active=True).order_by('_order').values('user__fullname', 'user__guids___id')
]
}
bulk_update_contributors = functools.partial(bulk_update_nodes, serialize_contributors)
@celery_app.task(bind=True, max_retries=5, default_retry_delay=60)
def update_contributors_async(self, user_id):
OSFUser = apps.get_model('osf.OSFUser')
user = OSFUser.objects.get(id=user_id)
p = Paginator(user.visible_contributor_to.order_by('id'), 100)
for page_num in p.page_range:
bulk_update_contributors(p.page(page_num).object_list)
@requires_search
def update_user(user, index=None):
index = index or INDEX
if not user.is_active:
try:
client().delete(index=index, doc_type='user', id=user._id, refresh=True, ignore=[404])
except NotFoundError:
pass
return
names = dict(
fullname=user.fullname,
given_name=user.given_name,
family_name=user.family_name,
middle_names=user.middle_names,
suffix=user.suffix
)
normalized_names = {}
for key, val in names.items():
if val is not None:
try:
val = six.u(val)
except TypeError:
pass # This is fine, will only happen in 2.x if val is already unicode
normalized_names[key] = unicodedata.normalize('NFKD', val).encode('ascii', 'ignore')
user_doc = {
'id': user._id,
'user': user.fullname,
'normalized_user': normalized_names['fullname'],
'normalized_names': normalized_names,
'names': names,
'job': user.jobs[0]['institution'] if user.jobs else '',
'job_title': user.jobs[0]['title'] if user.jobs else '',
'all_jobs': [job['institution'] for job in user.jobs[1:]],
'school': user.schools[0]['institution'] if user.schools else '',
'all_schools': [school['institution'] for school in user.schools],
'category': 'user',
'degree': user.schools[0]['degree'] if user.schools else '',
'social': user.social_links,
'boost': 2, # TODO(fabianvf): Probably should make this a constant or something
}
client().index(index=index, doc_type='user', body=user_doc, id=user._id, refresh=True)
@requires_search
def update_file(file_, index=None, delete=False):
index = index or INDEX
# TODO: Can remove 'not file_.name' if we remove all base file nodes with name=None
if not file_.name or not file_.node.is_public or delete or file_.node.is_deleted or file_.node.archiving:
client().delete(
index=index,
doc_type='file',
id=file_._id,
refresh=True,
ignore=[404]
)
return
# We build URLs manually here so that this function can be
# run outside of a Flask request context (e.g. in a celery task)
file_deep_url = '/{node_id}/files/{provider}{path}/'.format(
node_id=file_.node._id,
provider=file_.provider,
path=file_.path,
)
node_url = '/{node_id}/'.format(node_id=file_.node._id)
guid_url = None
file_guid = file_.get_guid(create=False)
if file_guid:
guid_url = '/{file_guid}/'.format(file_guid=file_guid._id)
file_doc = {
'id': file_._id,
'deep_url': file_deep_url,
'guid_url': guid_url,
'tags': list(file_.tags.filter(system=False).values_list('name', flat=True)),
'name': file_.name,
'category': 'file',
'node_url': node_url,
'node_title': file_.node.title,
'parent_id': file_.node.parent_node._id if file_.node.parent_node else None,
'is_registration': file_.node.is_registration,
'is_retracted': file_.node.is_retracted,
'extra_search_terms': clean_splitters(file_.name),
}
client().index(
index=index,
doc_type='file',
body=file_doc,
id=file_._id,
refresh=True
)
@requires_search
def update_institution(institution, index=None):
index = index or INDEX
id_ = institution._id
if institution.is_deleted:
client().delete(index=index, doc_type='institution', id=id_, refresh=True, ignore=[404])
else:
institution_doc = {
'id': id_,
'url': '/institutions/{}/'.format(institution._id),
'logo_path': institution.logo_path,
'category': 'institution',
'name': institution.name,
}
client().index(index=index, doc_type='institution', body=institution_doc, id=id_, refresh=True)
@requires_search
def delete_all():
delete_index(INDEX)
@requires_search
def delete_index(index):
client().indices.delete(index, ignore=[404])
@requires_search
def create_index(index=None):
'''Creates index with some specified mappings to begin with,
all of which are applied to all projects, components, and registrations.
'''
index = index or INDEX
document_types = ['project', 'component', 'registration', 'user', 'file', 'institution']
project_like_types = ['project', 'component', 'registration']
analyzed_fields = ['title', 'description']
client().indices.create(index, ignore=[400]) # HTTP 400 if index already exists
for type_ in document_types:
mapping = {
'properties': {
'tags': NOT_ANALYZED_PROPERTY,
'license': {
'properties': {
'id': NOT_ANALYZED_PROPERTY,
'name': NOT_ANALYZED_PROPERTY,
# Elasticsearch automatically infers mappings from content-type. `year` needs to
# be explicitly mapped as a string to allow date ranges, which break on the inferred type
'year': {'type': 'string'},
}
}
}
}
if type_ in project_like_types:
analyzers = {field: ENGLISH_ANALYZER_PROPERTY
for field in analyzed_fields}
mapping['properties'].update(analyzers)
if type_ == 'user':
fields = {
'job': {
'type': 'string',
'boost': '1',
},
'all_jobs': {
'type': 'string',
'boost': '0.01',
},
'school': {
'type': 'string',
'boost': '1',
},
'all_schools': {
'type': 'string',
'boost': '0.01'
},
}
mapping['properties'].update(fields)
client().indices.put_mapping(index=index, doc_type=type_, body=mapping, ignore=[400, 404])
@requires_search
def delete_doc(elastic_document_id, node, index=None, category=None):
index = index or INDEX
category = category or 'registration' if node.is_registration else node.project_or_component
client().delete(index=index, doc_type=category, id=elastic_document_id, refresh=True, ignore=[404])
@requires_search
def search_contributor(query, page=0, size=10, exclude=None, current_user=None):
"""Search for contributors to add to a project using elastic search. Request must
include JSON data with a "query" field.
:param query: The substring of the username to search for
:param page: For pagination, the page number to use for results
:param size: For pagination, the number of results per page
:param exclude: A list of User objects to exclude from the search
:param current_user: A User object of the current user
:return: List of dictionaries, each containing the ID, full name,
most recent employment and education, gravatar URL of an OSF user
"""
start = (page * size)
items = re.split(r'[\s-]+', query)
exclude = exclude or []
normalized_items = []
for item in items:
try:
normalized_item = six.u(item)
except TypeError:
normalized_item = item
normalized_item = unicodedata.normalize('NFKD', normalized_item).encode('ascii', 'ignore')
normalized_items.append(normalized_item)
items = normalized_items
query = ' AND '.join('{}*~'.format(re.escape(item)) for item in items) + \
''.join(' NOT id:"{}"'.format(excluded._id) for excluded in exclude)
results = search(build_query(query, start=start, size=size), index=INDEX, doc_type='user')
docs = results['results']
pages = math.ceil(results['counts'].get('user', 0) / size)
validate_page_num(page, pages)
users = []
for doc in docs:
# TODO: use utils.serialize_user
user = User.load(doc['id'])
if current_user and current_user._id == user._id:
n_projects_in_common = -1
elif current_user:
n_projects_in_common = current_user.n_projects_in_common(user)
else:
n_projects_in_common = 0
if user is None:
logger.error('Could not load user {0}'.format(doc['id']))
continue
if user.is_active: # exclude merged, unregistered, etc.
current_employment = None
education = None
if user.jobs:
current_employment = user.jobs[0]['institution']
if user.schools:
education = user.schools[0]['institution']
users.append({
'fullname': doc['user'],
'id': doc['id'],
'employment': current_employment,
'education': education,
'n_projects_in_common': n_projects_in_common,
'gravatar_url': gravatar(
user,
use_ssl=True,
size=settings.PROFILE_IMAGE_MEDIUM
),
'profile_url': user.profile_url,
'registered': user.is_registered,
'active': user.is_active
})
return {
'users': users,
'total': results['counts']['total'],
'pages': pages,
'page': page,
}
| apache-2.0 | -1,198,237,155,588,281,000 | 34.840395 | 140 | 0.593537 | false |
jhallock7/SparseBayes-Python | SB2_ParameterSettings.py | 1 | 3373 |
# The following is a Python translation of a MATLAB file originally written principally by Mike Tipping
# as part of his SparseBayes software library. Initially published on GitHub on July 21st, 2015.
# SB2_PARAMETERSETTINGS User parameter initialisation for SPARSEBAYES
#
# SETTINGS = SB2_PARAMETERSETTINGS(parameter1, value1, parameter2, value2,...)
#
# OUTPUT ARGUMENTS:
#
# SETTINGS An initialisation structure to pass to SPARSEBAYES
#
# INPUT ARGUMENTS:
#
# Optional number of parameter-value pairs to specify some, all, or
# none of the following:
#
# BETA (Gaussian) noise precision (inverse variance)
# NOISESTD (Gaussian) noise standard deviation
# RELEVANT Indices of columns of basis matrix to use at start-up
# MU (WEIGHTS) Corresponding vector of weights to RELEVANT
# ALPHA Corresponding vector of hyperparameter values to RELEVANT
#
# EXAMPLE:
#
# SETTINGS = SB2_ParameterSettings('NoiseStd',0.1)
#
# NOTES:
#
# 1. If no input arguments are supplied, defaults (effectively an
# empty structure) will be returned.
#
# 2. If both BETA and NOISESTD are specified, BETA will take
# precedence.
#
# 3. RELEVANT may be specified without WEIGHTS or ALPHA (these will be
# sensibly initialised later).
#
# 4. If RELEVANT is specified, WEIGHTS may be specified also without ALPHA.
#
#
# Copyright 2009, Vector Anomaly Ltd
#
# This file is part of the SPARSEBAYES library for Matlab (V2.0).
#
# SPARSEBAYES is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 2 of the License, or (at your option)
# any later version.
#
# SPARSEBAYES is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along
# with SPARSEBAYES in the accompanying file "licence.txt"; if not, write to
# the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston,
# MA 02110-1301 USA
#
# Contact the author: m a i l [at] m i k e t i p p i n g . c o m
#
def SB2_ParameterSettings(*args):
# Ensure arguments are supplied in pairs
if len(args) % 2 != 0:
raise Exception('Arguments to SB2_ParameterSettings should be (property, value) pairs')
# Any settings specified?
numSettings = len(args)/2
## Defaults - over-ridden later if requested
# Two options for setting noise level (purely for convenience)
# - if 'beta' set, 'noiseStdDev' will be over-ridden
SETTINGS = {
'BETA' : [],
'NOISESTD' : [],
'RELEVANT' : [],
'MU' : [],
'ALPHA' : []
}
## Requested overrides
# Parse string/variable pairs
for n in range(numSettings):
property_ = args[n*2]
value = args[n*2 + 1]
if property_ not in SETTINGS:
raise Exception('Unrecognised initialisation property: {0}'.format(property_))
else:
SETTINGS[property_] = value
return SETTINGS
| gpl-2.0 | -5,058,282,655,556,856,000 | 31.432692 | 103 | 0.660243 | false |
xfce-mirror/xfce4-rss-plugin | panel-plugin/props.py | 1 | 3863 | # xfce4-rss-plugin - an RSS aggregator for the Xfce 4 Panel
# Copyright (c) 2006 Adriano Winter Bess <adriano@xfce.org>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License ONLY.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
import pygtk
pygtk.require("2.0")
import gtk
import gobject
from gettext import gettext as _
class PropertiesDialog (gtk.Dialog):
def __init__ (self, config):
gtk.Dialog.__init__ (self, _("RSS Aggregator"),
None, gtk.DIALOG_MODAL|gtk.DIALOG_NO_SEPARATOR,
(gtk.STOCK_CLOSE, gtk.RESPONSE_CLOSE))
self.config = config
self.set_border_width (8)
self.vbox.set_homogeneous (False)
self.set_default_size (600, 400)
model = gtk.ListStore (gobject.TYPE_STRING, gobject.TYPE_STRING)
for feed in config.traverse_feeds ():
model.append ((feed['name'], feed['url']))
self.build_view (model)
vb = gtk.VBox (spacing=8)
but = gtk.Button (stock=gtk.STOCK_ADD)
but.connect ("clicked", add_cb, self.feeds_view)
vb.pack_start (but, False)
but = gtk.Button (stock=gtk.STOCK_REMOVE)
but.connect ("clicked", remove_cb, self.feeds_view)
vb.pack_start (but, False)
lab = gtk.Label (_("RSS Feeds:"))
lab.set_alignment (0.0, 0.5)
self.vbox.pack_start (lab, False)
hb = gtk.HBox (spacing=8)
hb.pack_start (self.feeds_view)
align = gtk.Alignment (0.5, 0.5)
align.add (vb)
hb.pack_start (align, False)
self.vbox.pack_start (hb)
self.connect ("response", response_cb, None)
def build_view (self, model):
tv = gtk.TreeView (model)
tv.set_headers_visible (True)
tv.get_selection().set_mode(gtk.SELECTION_MULTIPLE)
col = gtk.TreeViewColumn (_("Name"))
cell = gtk.CellRendererText ()
cell.set_property ("editable", True)
cell.connect ("edited", edited_cb, (model, 0))
col.pack_start (cell)
tv.append_column (col)
col.set_attributes (cell, text=0)
col.set_sort_column_id (0)
tv.set_search_column (0)
col = gtk.TreeViewColumn (_("URL"))
cell = gtk.CellRendererText ()
cell.set_property ("editable", True)
cell.connect ("edited", edited_cb, (model, 1))
col.pack_start (cell)
tv.append_column (col)
col.set_attributes (cell, text=1)
self.feeds_view = tv
def response_cb (dlg, rid, dummy):
model = dlg.feeds_view.get_model ()
dlg.config.clear_feeds ()
for feed in model:
dlg.config.add_feed (feed[0], feed[1])
dlg.config.write_config ()
dlg.destroy ()
def edited_cb (cell, path, text, data):
model, column = data
model[path][column] = text
def remove_cb (but, feeds_view):
selection = feeds_view.get_selection ()
if selection.count_selected_rows () > 0:
(model, rows) = selection.get_selected_rows ()
refs = list ()
for path in rows:
refs.append (gtk.TreeRowReference (model, path))
for ref in refs:
model.remove (model.get_iter (ref.get_path ()))
def add_cb (but, feeds_view):
model = feeds_view.get_model ()
model.append ((_("Name"), "http://"))
| gpl-2.0 | -1,449,831,748,015,399,000 | 35.102804 | 76 | 0.61869 | false |
nitely/Spirit | spirit/category/migrations/0001_initial.py | 1 | 1333 | # -*- coding: utf-8 -*-
from django.db import models, migrations
import spirit.core.utils.models
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(primary_key=True, verbose_name='ID', serialize=False, auto_created=True)),
('title', models.CharField(verbose_name='title', max_length=75)),
('slug', spirit.core.utils.models.AutoSlugField(db_index=False, populate_from='title', blank=True)),
('description', models.CharField(verbose_name='description', max_length=255, blank=True)),
('is_closed', models.BooleanField(verbose_name='closed', default=False)),
('is_removed', models.BooleanField(verbose_name='removed', default=False)),
('is_private', models.BooleanField(verbose_name='private', default=False)),
('parent', models.ForeignKey(null=True, verbose_name='category parent', to='spirit_category.Category', blank=True, on_delete=models.CASCADE)),
],
options={
'ordering': ['title', 'pk'],
'verbose_name': 'category',
'verbose_name_plural': 'categories',
},
),
]
| mit | -5,217,428,606,251,673,000 | 43.433333 | 158 | 0.582146 | false |
SweetPalma/Perver | perver.py | 1 | 18147 | #!/usr/bin/python
# coding: utf-8
# Perver - tiny Python 3 server for perverts.
# Check README and LICENSE for details.
from sys import platform as os_platform
from hashlib import sha1 as hash_id
from urllib.parse import unquote
from mimetypes import guess_type
from traceback import format_exc
from functools import wraps
import threading as thread
import concurrent.futures
import logging as log
import asyncio
import base64
import time
import sys
import os
import re
# Version control:
__author__ = 'SweetPalma'
__version__ = '0.25'
# Custom internal exceptions:
class PerverException(Exception):
def __init__(self, message):
self.message = str(message)
# Handling HTTP requests:
class PerverHandler:
# Path substitution pattern:
path_pattern = re.compile(r'(\{.+?\})')
# Making server link:
def __init__(self, server):
self.server = server
# Handling requests:
@asyncio.coroutine
def handle_request(self, reader, writer):
# Preparing basic values:
peername = writer.get_extra_info('peername')
ip, port = peername[0], peername[1]
# Client basic values:
self.ip = ip
self.port = port
self.reader = reader
self.writer = writer
self.time = time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime())
# Client info, used in logging:
client_info = ' '.join([
self.time,
self.ip,
])
# Terminator shortcut:
killer = PerverException
# Handling:
try:
# Reading header until EOF:
header, length = b'', 0
while True:
try:
# Reading:
line = yield from reader.readline()
# Setting request type and maximal request size at start:
if len(header) == 0:
if line.startswith(b'POST'):
request_type = b'POST'
request_max = self.server.post_max
else:
request_type = b'GET'
request_max = self.server.get_max
# Setting break:
if line == b'\r\n' or not line:
break
# Reading content length:
if line.startswith(b'Content-Length'):
length = int(line.split(b':')[1])
# Reading header:
header = header + line
# Some spooky errors during reading:
except:
break
# Reading content:
content = b''
if 0 < length < request_max:
content = yield from reader.readexactly(length)
# Close connection in case of big file:
elif length > request_max:
self.writer.close()
raise killer('REQUEST IS TOO BIG')
# Parsing data:
self.client = yield from self.build_client(header, content)
client = self.client
# In case of disconnection:
if not client:
self.writer.close()
raise killer('CLIENT CLOSED CONNECTION')
# Logging full information:
client_info = client_info + ' ' + ' '.join([
client.type,
client.path,
])
# Checking routing:
route_post = self.check_route(client.path, self.server.route_post)
route_get = self.check_route(client.path, self.server.route_get)
if client.type == 'POST' and route_post:
raise killer((yield from self.respond_script(*route_post)))
if client.type == 'GET' and route_get:
raise killer((yield from self.respond_script(*route_get)))
# Checking static files:
for dir, real in self.server.route_static.items():
if client.path.startswith(dir):
filepath = client.path.replace(dir, real, 1)
raise killer((yield from self.respond_file(filepath[1:])))
# Routing 404 error:
raise killer((yield from self.respond_error(404)))
# Timeout/Cancelled:
except concurrent.futures._base.CancelledError:
yield from self.respond_error(500)
log.info(client_info + ' TIMED OUT')
# Terminator:
except killer as exception:
log.info(client_info + ' ' + exception.message)
# Sending file:
@asyncio.coroutine
def respond_file(self, path):
try:
with open(path, "rb") as file:
size = os.path.getsize(path)
return (yield from self.respond(
status = 200,
content = file.read(),
type = self.get_mime(path),
length = size
))
# No file found:
except IOError:
return (yield from self.respond_error(404))
# Sending error message:
@asyncio.coroutine
def respond_error(self, number, custom=None):
error = {
400: 'Bad Request',
404: 'Not Found',
500: 'Internal Error',
}
error_text = number in error and error[number] or 'Unknown Error'
error_cont = str(number) + ' ' + error_text
return (yield from self.respond(number, error_cont))
# Executing client script and sending it response:
@asyncio.coroutine
def respond_script(self, script, keys={}):
script_result = (yield from script(self.client, **keys)) or b''
return (yield from self.respond(
status = self.client.status,
content = script_result,
header = self.client.header,
type = self.client.mime
))
# Pure data response:
@asyncio.coroutine
def respond(self, status, content=b'', type='text/html', length=None, header={}):
# Forming header:
encoding = self.server.encoding
self.header = 'HTTP/1.1 ' + str(status) + '\r\n'
self.form_header('Accept-Charset', encoding)
self.form_header('Server', 'Perver/' + __version__)
# Setting mime type (and encoding for text):
if type.startswith('text/'):
ctype = type + ';charset=' + encoding
else:
ctype = type
self.form_header('Content-Type', ctype)
# Working with custom headers:
for key, value in header.items():
self.form_header(key, value)
# Encoding unicode content:
if not isinstance(content, bytes):
content = content.encode(encoding)
# Forming content length:
length = length or len(content)
self.form_header('Content-Length', str(length))
# Forming response:
header = self.header.encode(encoding)
response = header + b'\r\n' + content + b'\r\n'
# Go:
self.writer.write(response)
self.writer.write_eof()
# Done:
return status
# Making client ID using cut SHA hash on client IP and User-Agent:
def get_id(self, clnt):
ident = str(clnt.ip) + str(clnt.agent)
ident_encoded = ident.encode(self.server.encoding)
hashed = hash_id(ident_encoded).digest()[:self.server.length_id]
cooked = base64.urlsafe_b64encode(hashed).decode(self.server.encoding)
return cooked[:-2] # Removed two last minuses for better readibility.
# Power of regexp!
def check_route(self, path, map):
# Pure path:
if path in map:
return (map[path], {})
# Path with substitutions:
right_path, groups = None, sys.maxsize
for route in map:
# Removing retarded slash in the end of path:
path = path.endswith('/') and path[:-1] or path
# Patterns:
path_pattern = '^' + self.path_pattern.sub('([^/]+)', route) + '$'
matched = re.match(path_pattern, path)
# Testing route:
if matched:
keys = [key[1:-1] for key in self.path_pattern.findall(route)]
values = list(matched.groups())
if len(values) < groups:
groups = len(values)
right_path = (map[route], dict(zip(keys, values)))
# In case of fail:
return right_path
# Appending certain header lines:
def form_header(self, arg, var):
self.header = self.header + arg + ': ' + var + '\r\n'
# Retrieving type:
def get_mime(self, path):
fname, extension = os.path.splitext(path)
if extension == '':
return guess_type(path)[0] or 'text/html'
else:
return guess_type(path)[0] or 'application'
# Parsing GET and COOKIES:
@asyncio.coroutine
def parse(self, path):
# Preparing %key%=%value% regex:
get_word = '[^=;&?]'
pattern = '(%s+)=(%s+)' % (get_word, get_word)
# Unquoting map:
unq = lambda x: map(unquote, x)
# Replacing retarded pluses to spaces in path:
path = path.replace('+', ' ')
# Working:
matched = [unq(x) for x in re.findall(pattern, path)]
return dict(matched)
# Parsing POST multipart:
@asyncio.coroutine
def parse_post(self, content, type, boundary):
# Establishing default encoding:
encoding = self.server.encoding
# Parsing multipart:
if type == 'multipart/form-data':
# Splitting request to fields:
fields = content.split(boundary)
fields_dict = {}
# Turning `em to dictionary:
for field in fields:
# Checking:
field_rows = field.split(b'\r\n\r\n')
if len(field_rows) == 2:
header, value = field_rows
value = value[:-2]
# Decoding key:
key = re.findall(b';[ ]*name="([^;]+)"', header)[0]
key = key.decode(encoding)
# Checking content-type:
ctype = re.search(b'Content-Type: ([^;]+)$', header)
# File upload field:
if ctype:
if value == b'' or value == b'\r\n':
continue
ctype = ctype.group()
fname = re.findall(b';[ ]*filename="([^;]+)"', header)
fname = len(fname) == 1 and fname[0] or b'unknown'
fields_dict[key] = {
'filename': fname.decode(encoding),
'mime': ctype.decode(encoding),
'file': value,
}
# Text field:
else:
fields_dict[key] = value.decode(encoding)
return fields_dict
# Parsing average urlencoded:
else:
if isinstance(content, bytes):
content = content.decode(encoding)
return self.parse(content)
# Parsing client data:
@asyncio.coroutine
def build_client(self, header_raw, content_raw=b''):
# Safe dict values:
def safe_dict(dictionary, value, default):
if value in dictionary:
return dictionary[value]
else:
return default
# Decoding:
try:
# Decoding header:
header_decoded = header_raw.decode(self.server.encoding)
# Three basic values: request type, path and version:
pattern = r'^(GET|POST) ([A-Za-z0-9_.~?&%/\-]+) (HTTP/1.1|HTTP/1.0)'
unpacked = re.findall(pattern, header_decoded)
if len(unpacked) > 0:
type, path, version = re.findall(pattern, header_decoded)[0]
else:
raise PerverException('WRONG CLIENT HEAD')
# Splitting GET and PATH:
if '?' in path:
path, GET = path.split('?')
else:
GET = ''
# Raw header to header dictionary:
pattern = '([^:]+):[ ]*(.+)\r\n'
header = dict(re.findall(pattern, header_decoded))
# Basic client variables:
client = PerverClient()
client.version = version
client.type, client.path = type, unquote(path)
client.path_dir = '/'.join(unquote(path).split('/')[:-1])
# Client header:
client.header_raw, client.content_raw = header_raw, content_raw
client.content_type = safe_dict(header, 'Content-Type', '')
client.content_length = safe_dict(header, 'Content-Length', 0)
client.agent = safe_dict(header, 'User-Agent', 'Unknown')
client.mime = self.get_mime(client.path)
client.form_type = client.content_type.split(';')[0]
# Server client values:
client.ip, client.port, client.time = self.ip, self.port, self.time
client.id = self.get_id(client)
# POST boundary:
boundary = re.findall('boundary=(-*[0-9]*)', client.content_type)
if len(boundary) > 0:
boundary = boundary[0].encode(self.server.encoding)
else:
boundary = b''
# POST/GET/COOKIES:
client.get = yield from self.parse(GET)
client.post = yield from self.parse_post(content_raw, client.form_type, boundary)
client.cookie = yield from self.parse(safe_dict(header, 'Cookie', ''))
# Client ID cookie, can be overrided later:
client.header['Set-Cookie'] = 'id=' + client.id
# Client server-side container:
if not client.id in self.server.client:
self.server.client[client.id] = {}
client.container = self.server.client[client.id]
# Fixing client path dir:
if client.path_dir == '':
client.path_dir = '/'
# Done!
return client
# In case of fail:
except BaseException as exc:
log.warning('Error parsing user request.')
yield from self.respond_error(400)
raise exc
# Script client:
class PerverClient:
# GET/POST arguments:
get = {}
post = {}
# Client headers:
status = 200
header = {}
cookie = {}
mime = 'text/html'
# Redirection:
def redirect(self, page):
""" Redirects client to a certain page using 302 status code. """
self.header['Location'] = page
self.status = 302
return 'Redirecting...'
# Templating:
def template(self, text, **replace):
""" Used in templating - works same as str.format. """
return text.format(**replace)
# Rendering page:
def render(self, filename, **replace):
""" Same as template, but used in files. Returns templated file. """
file = open(filename, 'r')
return self.template(file.read(), **replace)
# Retrieving file:
def file(self, filename):
""" Simply returns file contents, binary. """
self.mime = guess_type(filename)[0]
file = open(filename, 'rb')
return file.read()
# Own header:
def set_header(self, key, value):
""" Sets custom client HTTP header. """
self.header[key] = value
# Cookies:
def set_cookie(self, name, value):
""" Sets custom client cookie, overriding default Perver ID Cookie. """
self.header['Set-Cookie'] = name + '=' + value +';'
# Status:
def set_status(self, status):
""" Sets custom response status, overriding default 200. """
self.status = status
# Mime:
def set_mime(self, mime):
""" Sets custom mime response. """
self.mime = mime
# Making HTML template:
def html(self, body, head='', doctype='html'):
""" HTML-correct template for nice pages. """
doctype = '<!DOCTYPE %s>' % doctype
head = '\r\n'.join(['<head>', head, '</head>'])
body = '\r\n'.join(['<body>', body, '</body>'])
return '\r\n'.join([doctype, head, body])
# Making forms:
def form(self, action, method, *inputs, id='', multipart=False):
""" Used for building forms. """
if multipart:
enctype='multipart/form-data'
else:
enctype='application/x-www-form-urlencoded'
form_desc = (action, method, id, enctype)
html = '<form action="%s" method="%s" id="%s" enctype="%s">' % form_desc
inputs = [list(inp.items()) for inp in inputs]
for input in inputs:
args = ' '.join('%s="%s"' % arg for arg in input)
html = '\r\n'.join([html, '<input %s><br>' % args])
return ''.join([html, '</form>'])
# Multipart form:
def form_multipart(self, *args, **kargs):
""" Works same as previous, but with multipart argument set to True."""
kargs['multipart'] = True
return self.form(*args, **kargs)
# Part of the previous function:
def input(self, name, **kargs):
""" Single form input. """
return dict(name=name, **kargs)
# Input submit:
def input_submit(self, value='Submit', **kargs):
""" Form submit button. """
return dict(type='submit', value=value, **kargs)
# Perver Server itself:
class Perver:
# PARAMETERS:
# Main server values:
encoding = 'utf-8'
backlog = 5
timeout = 30
# Maximal requests length:
get_max = 1024 * 8
post_max = 1024 * 1024 * 100
# Client ID length:
length_id = 10
# I highly recommend not to change this value.
# Routing paths:
route_get = {}
route_post = {}
route_static = {}
# Active clients list:
client = {}
# METHODS:
# Routing GET:
# DECORATOR:
def get(self, path):
""" Binds all GET requests from path to certain function. """
def decorator(func):
@wraps(func)
def wrapper(*args, **kwds):
return asyncio.coroutine(func)(*args, **kwds)
self.route_get[path] = wrapper
return wrapper
return decorator
# Routing POST:
# DECORATOR:
def post(self, path):
""" Binds all POST requests from path to certain function. """
def decorator(func):
@wraps(func)
def wrapper(*args, **kwds):
return asyncio.coroutine(func)(*args, **kwds)
self.route_post[path] = wrapper
return wrapper
return decorator
# Global routing:
# DECORATOR:
def route(self, path):
""" Binds all POST/GET requests from path to certain function. """
def decorator(func):
@wraps(func)
def wrapper(*args, **kwds):
return asyncio.coroutine(func)(*args, **kwds)
self.route_post[path] = wrapper
self.route_get[path] = wrapper
return wrapper
return decorator
# Adding static route:
def static(self, web, local):
""" Uses local path for serving static files for web requests. """
local = local.replace('\\', '/')
if not (local.startswith('/') and os.path.isabs(local)):
local = '/' + local
if not local.endswith('/'):
local = local + '/'
self.route_static[web] = local
# Starting:
def start(self, host='', port=80):
""" Starts the (mostly) infinite loop of server. """
# Configuring output:
self.host, self.port = host, port
log.basicConfig(level=log.INFO, format='%(levelname)s: %(message)s')
# Nice header for Windows:
if os_platform == 'win32':
os.system('title Perver v' + __version__)
# Trying running:
try:
self._loop = asyncio.get_event_loop()
self._server = asyncio.start_server(
self.handler,
host=host,
port=port,
backlog=self.backlog,
reuse_address=True,
)
self._server = self._loop.run_until_complete(self._server)
start_time = time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime())
log.info('Perver has started at ' + start_time + '.')
self._loop.run_forever()
# In case of Skype on 80 port, access denials and other socket errors:
except OSError:
log.error('OS error, probably server is already running at that port \
or user is not sudoer.')
# Stop?
def stop(self):
""" Stops the Perver. """
self._server.close()
self._loop.stop()
# HTTP request handler:
@asyncio.coroutine
def handler(self, reader, writer):
try:
handler = PerverHandler(self)
yield from asyncio.wait_for(
handler.handle_request(reader, writer),
timeout=self.timeout
)
except KeyboardInterrupt:
log.warning('Interrupted by user.')
self.stop()
except SystemExit:
self.stop()
except asyncio.TimeoutError:
pass
except:
log.warning('Exception caught! \r\n' + format_exc())
# Pythonic async database
class PerverDB:
# Initialization:
def __init__(self, filename):
pass
# Not standalone:
if __name__ == '__main__':
print('Perver is not a standalone application. Use it as framework.')
print('Check "github.com/SweetPalma/Perver" for details.')
| mit | 1,527,089,193,175,442,400 | 25.686765 | 87 | 0.639885 | false |
joefutrelle/pyifcb | ifcb/data/bins.py | 1 | 3657 | """
Bin API. Provides consistent access to IFCB raw data stored
in various formats.
"""
from functools import lru_cache
from .adc import SCHEMA
from .hdr import TEMPERATURE, HUMIDITY
from .utils import BaseDictlike
from ..metrics.ml_analyzed import compute_ml_analyzed
class BaseBin(BaseDictlike):
"""
Base class for Bin implementations. Providing common features.
The bin PID is available as a Pid object via the "pid" property.
Subclasses must implement this.
Bins are dict-like. Keys are target numbers, values are ADC records.
ADC records are tuples.
Also supports an "adc" property that is a Pandas DataFrame containing
ADC data. Subclasses are required to provide this. The default dictlike
implementation uses that property.
Context manager support is provided for implementations
that must open files or other data streams.
"""
@property
def lid(self):
"""
:returns str: the bin's LID.
"""
return self.pid.bin_lid
@property
@lru_cache()
def images_adc(self):
"""
:returns pandas.DataFrame: the ADC data, minus targets that
are not associated with images
"""
return self.adc[self.adc[self.schema.ROI_WIDTH] > 0]
@property
def timestamp(self):
"""
:returns datetime: the bin's timestamp.
"""
return self.pid.timestamp
@property
def schema(self):
return SCHEMA[self.pid.schema_version]
# context manager default implementation
def __enter__(self):
return self
def __exit__(self, *args):
pass
# dictlike interface
def keys(self):
yield from self.adc.index
def has_key(self, k):
return k in self.adc.index
def __len__(self):
return len(self.adc.index)
def get_target(self, target_number):
"""
Retrieve a target record by target number
:param target_number: the target number
"""
d = tuple(self.adc[c][target_number] for c in self.adc.columns)
return d
def __getitem__(self, target_number):
return self.get_target(target_number)
# metrics
@lru_cache()
def _get_ml_analyzed(self):
return compute_ml_analyzed(self)
@property
def ml_analyzed(self):
ma, _, _ = self._get_ml_analyzed()
return ma
@property
def look_time(self):
_, lt, _ = self._get_ml_analyzed()
return lt
@property
def run_time(self):
_, _, rt = self._get_ml_analyzed()
return rt
@property
def inhibit_time(self):
return self.run_time - self.look_time
@property
def trigger_rate(self):
"""return trigger rate in triggers / s"""
return 1.0 * len(self) / self.run_time
@property
def temperature(self):
return self.header(TEMPERATURE)
@property
def humidity(self):
return self.header(HUMIDITY)
# convenience APIs for writing in different formats
def read(self):
with self:
new_bin = BaseBin()
new_bin.pid = self.pid.copy()
new_bin.headers = self.headers.copy()
new_bin.adc = self.adc
new_bin.images = { k:v for k,v in self.images.items() }
return new_bin
def to_hdf(self, hdf_file, group=None, replace=True):
from .hdf import bin2hdf
bin2hdf(self, hdf_file, group=group, replace=replace)
def to_zip(self, zip_path):
from .zip import bin2zip
bin2zip(self, zip_path)
def to_mat(self, mat_path):
from .matlab import bin2mat
bin2mat(self, mat_path)
| mit | 4,120,801,862,766,967,300 | 28.97541 | 75 | 0.615532 | false |
telefonicaid/murano-agent | muranoagent/app.py | 1 | 8000 | # Copyright (c) 2013 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import time
import types
import bunch
import semver
from muranoagent.common import config
from muranoagent.common import messaging
from muranoagent import exceptions as exc
from muranoagent import execution_plan_queue
from muranoagent import execution_plan_runner
from muranoagent import execution_result as ex_result
from muranoagent.openstack.common import log as logging
from muranoagent.openstack.common import service
CONF = config.CONF
LOG = logging.getLogger(__name__)
format_version = '2.0.0'
class MuranoAgent(service.Service):
def __init__(self):
self._queue = execution_plan_queue.ExecutionPlanQueue()
super(MuranoAgent, self).__init__()
@staticmethod
def _load_package(name):
try:
LOG.debug('Loading plugin %s', name)
__import__(name)
except Exception:
LOG.warn('Cannot load package %s', name, exc_info=True)
pass
def _load(self):
path = os.path.join(os.path.dirname(__file__), 'executors')
sys.path.insert(1, path)
for entry in os.listdir(path):
package_path = os.path.join(path, entry)
if os.path.isdir(package_path):
MuranoAgent._load_package(entry)
def start(self):
self._load()
msg_iterator = self._wait_plan()
while True:
try:
self._loop_func(msg_iterator)
except Exception as ex:
LOG.exception(ex)
time.sleep(5)
def _loop_func(self, msg_iterator):
result, timestamp = self._queue.get_execution_plan_result()
if result is not None:
if self._send_result(result):
self._queue.remove(timestamp)
return
plan = self._queue.get_execution_plan()
if plan is not None:
LOG.debug("Got an execution plan '{0}':".format(str(plan)))
self._run(plan)
return
msg_iterator.next()
def _run(self, plan):
with execution_plan_runner.ExecutionPlanRunner(plan) as runner:
try:
result = runner.run()
execution_result = ex_result.ExecutionResult.from_result(
result, plan)
self._queue.put_execution_result(execution_result, plan)
except Exception as ex:
LOG.exception('Error running execution plan')
execution_result = ex_result.ExecutionResult.from_error(ex,
plan)
self._queue.put_execution_result(execution_result, plan)
def _send_result(self, result):
with self._create_rmq_client() as mq:
msg = messaging.Message()
msg.body = result
msg.id = result.get('SourceID')
mq.send(message=msg,
key=CONF.rabbitmq.result_routing_key,
exchange=CONF.rabbitmq.result_exchange)
return True
def _create_rmq_client(self):
rabbitmq = CONF.rabbitmq
connection_params = {
'login': rabbitmq.login,
'password': rabbitmq.password,
'host': rabbitmq.host,
'port': rabbitmq.port,
'virtual_host': rabbitmq.virtual_host,
'ssl': rabbitmq.ssl,
'ca_certs': rabbitmq.ca_certs.strip() or None
}
return messaging.MqClient(**connection_params)
def _wait_plan(self):
delay = 5
while True:
try:
with self._create_rmq_client() as mq:
with mq.open(CONF.rabbitmq.input_queue,
prefetch_count=1) as subscription:
while True:
msg = subscription.get_message(timeout=5)
if msg is not None and isinstance(msg.body, dict):
self._handle_message(msg)
if msg is not None:
msg.ack()
yield
delay = 5
except KeyboardInterrupt:
break
except Exception:
LOG.warn('Communication error', exc_info=True)
time.sleep(delay)
delay = min(delay * 1.2, 60)
def _handle_message(self, msg):
print(msg.body)
if 'ID' not in msg.body and msg.id:
msg.body['ID'] = msg.id
err = self._verify_plan(msg.body)
if err is None:
self._queue.put_execution_plan(msg.body)
else:
try:
execution_result = ex_result.ExecutionResult.from_error(
err, bunch.Bunch(msg.body))
self._send_result(execution_result)
except ValueError:
LOG.warn('Execution result is not produced')
def _verify_plan(self, plan):
plan_format_version = plan.get('FormatVersion', '1.0.0')
if semver.compare(plan_format_version, '2.0.0') > 0 or \
semver.compare(plan_format_version, format_version) < 0:
range_str = 'in range 2.0.0-{0}'.format(plan_format_version) \
if format_version != '2.0.0' \
else 'equal to {0}'.format(format_version)
return exc.AgentException(
3,
'Unsupported format version {0} (must be {1})'.format(
plan_format_version, range_str))
for attr in ('Scripts', 'Files', 'Options'):
if attr in plan and not isinstance(
plan[attr], types.DictionaryType):
return exc.AgentException(
2, '{0} is not a dictionary'.format(attr))
for name, script in plan.get('Scripts', {}).items():
for attr in ('Type', 'EntryPoint'):
if attr not in script or not isinstance(
script[attr], types.StringTypes):
return exc.AgentException(
2, 'Incorrect {0} entry in script {1}'.format(
attr, name))
if not isinstance(script.get('Options', {}), types.DictionaryType):
return exc.AgentException(
2, 'Incorrect Options entry in script {0}'.format(name))
if script['EntryPoint'] not in plan.get('Files', {}):
return exc.AgentException(
2, 'Script {0} misses entry point {1}'.format(
name, script['EntryPoint']))
for additional_file in script.get('Files', []):
if additional_file not in plan.get('Files', {}):
return exc.AgentException(
2, 'Script {0} misses file {1}'.format(
name, additional_file))
for key, plan_file in plan.get('Files', {}).items():
for attr in ('BodyType', 'Body', 'Name'):
if attr not in plan_file:
return exc.AgentException(
2, 'Incorrect {0} entry in file {1}'.format(
attr, key))
if plan_file['BodyType'] not in ('Text', 'Base64'):
return exc.AgentException(
2, 'Incorrect BodyType in file {1}'.format(key))
return None
| apache-2.0 | -4,816,543,227,355,885,000 | 36.735849 | 79 | 0.541375 | false |
garaud/ezhc | ezhc/scripts.py | 1 | 1565 |
FORMATTER_PERCENT = "function() { return (this.value > 0 ? ' + ' : '') + this.value + '%'; }"
FORMATTER_OTHER = "function() { return (this.value > 0 ? ' + ' : '') + this.value; }"
TOOLTIP_HEADER_FORMAT = '<b>{series.name}</b><br>'
TOOLTIP_POINT_FORMAT_PERCENT = '<span style="color:{series.color}">{series.name}</span>: <b>{point.y}</b> ({point.change}%)<br/>'
TOOLTIP_POINT_FORMAT_OTHER = '<span style="color:{series.color}">{series.name}</span>: <b>{point.y}</b><br/>'
TOOLTIP_POSITIONER = "function() { return { x: 400, y: 1 }; }"
JS_JSON_PARSE = """
options = JSON.stringify(options);
options = JSON.parse(options, function(key, value) {
if (value && (typeof value==="string")) {
if (value.substr(0,8) == "function") {
var startBody = value.indexOf('{') + 1;
var endBody = value.lastIndexOf('}');
var startArgs = value.indexOf('(') + 1;
var endArgs = value.indexOf(')');
return new Function(value.substring(startArgs, endArgs),
value.substring(startBody, endBody));
}
if (value.substr(0,9)=="(function") {
var startBody = value.indexOf('{') + 1;
var endBody = value.lastIndexOf('}');
var startArgs = value.indexOf('(', 1) + 1;
var endArgs = value.indexOf(')');
var func = new Function(value.substring(startArgs, endArgs),
value.substring(startBody, endBody));
return func();
}
}
return value;
});
"""
| mit | 8,815,823,130,265,636,000 | 33.021739 | 129 | 0.538658 | false |
larrylindsey/reconstructmergetool | build/lib.linux-x86_64-2.7/pyrecon/toolsgui/excelTool.py | 1 | 4104 | #!/usr/bin/env python
import sys, os
from pyrecon.tools import excelTool
from PySide import QtGui, QtCore
class excelToolWindow(QtGui.QWidget):
def __init__(self, parent = None):
QtGui.QWidget.__init__(self, parent)
self.parent = parent
self.setGeometry(0,0,500,200)
self.seriesPathLine = None
self.seriesPathBrowse = None
self.seriesPath = 'Enter or browse path to series'
self.savePathLine = None
self.savePathBrowse = None
self.savePath = 'Enter or browse path to save excel workbook'
self.goButton = None
# GUI Start Functions
self.functionalItems()
self.layout()
self.show()
def functionalItems(self):
self.seriesPathLine = QtGui.QLineEdit(self)
self.seriesPathLine.setText( self.seriesPath )
self.seriesPathLine.setAlignment( QtCore.Qt.AlignCenter )
self.seriesPathBrowse = QtGui.QPushButton(self)
self.seriesPathBrowse.clicked.connect( self.browse )
self.seriesPathBrowse.setIconSize(QtCore.QSize(25,25))
self.seriesPathBrowse.setText('Browse')
self.savePathLine = QtGui.QLineEdit(self)
self.savePathLine.setText( self.savePath )
self.savePathLine.setAlignment( QtCore.Qt.AlignCenter ) #===
self.savePathBrowse = QtGui.QPushButton(self)
self.savePathBrowse.clicked.connect( self.browse )
self.savePathBrowse.setIconSize(QtCore.QSize(25,25))
self.savePathBrowse.setText('Browse')
self.goButton = QtGui.QPushButton(self)
self.goButton.setText('Create Excel Workbook (.xlsx)')
self.goButton.clicked.connect( self.checkAndFinish )
def layout(self):
vbox = QtGui.QVBoxLayout()
hbox1 = QtGui.QHBoxLayout()
hbox1.addWidget( self.seriesPathLine )
hbox1.addWidget( self.seriesPathBrowse )
hbox1.insertSpacing(0,25)
hbox1.insertSpacing(-1,25)
hbox2 = QtGui.QHBoxLayout()
hbox2.addWidget( self.savePathLine )
hbox2.addWidget( self.savePathBrowse )
hbox2.insertSpacing(0,25)
hbox2.insertSpacing(-1,25)
hbox3 = QtGui.QHBoxLayout()
hbox3.insertSpacing(0,225)
hbox3.addWidget( self.goButton )
hbox3.insertSpacing(-1,225)
vbox.addLayout(hbox1)
vbox.addLayout(hbox2)
vbox.addLayout(hbox3)
self.setLayout(vbox)
def browse(self):
if self.sender() == self.seriesPathBrowse:
path = QtGui.QFileDialog.getOpenFileName(self,
'Load Series',
'/home/',
'Series File (*.ser)')
path = str(path[0])
if path != '':
self.seriesPathLine.setText(path)
elif self.sender() == self.savePathBrowse:
path = str( QtGui.QFileDialog.getExistingDirectory(self) )
if path != '':
self.savePathLine.setText(path)
def checkAndFinish(self):
self.seriesPath = self.seriesPathLine.text()
self.savePath = self.savePathLine.text()
if '.ser' not in self.seriesPath:
msg = QtGui.QMessageBox(self)
msg.setText('Invalid series file -- Please try again.')
msg.show()
if self.savePath == 'Enter or browse path to save excel workbook' or '/' not in self.savePath:
msg = QtGui.QMessageBox(self)
msg.setText('Invalid save path!')
msg.show()
else:
print('Continuing...')
print(self.seriesPath)
print(self.savePath)
excelTool.main(self.seriesPath, self.savePath)
self.close()
def main():
app = QtGui.QApplication(sys.argv)
t = excelToolWindow()
sys.exit( app.exec_() )
main()
| gpl-3.0 | 3,869,149,202,501,233,000 | 35.327434 | 102 | 0.570906 | false |
DTOcean/dtocean-core | test_data/inputs_wp2_wave.py | 1 | 3952 | # -*- coding: utf-8 -*-
"""
Created on Thu Apr 09 10:39:38 2015
@author: 108630
"""
import os
from datetime import datetime, timedelta
import numpy as np
dir_path = os.path.dirname(__file__)
# Setup
x = np.linspace(0.,1000.,20.)
y = np.linspace(0.,300.,20.)
nx = len(x)
ny = len(y)
# Bathymetry?
X, Y = np.meshgrid(x,y)
Z = -X * 0.1 - 1
depths = Z.T[:, :, np.newaxis]
sediments = np.chararray((nx,ny,1), itemsize=20)
sediments[:] = "rock"
strata = {"values": {'depth': depths,
'sediment': sediments},
"coords": [x, y, ["layer 1"]]}
# Mannings
#geoxyz = np.vstack((X.ravel(),Y.ravel(),G.ravel())).T
G = np.zeros((nx, ny)) + 0.3
geo_raw = {"values": G,
"coords": [x, y]}
sample_size = 1000
dates = []
dt = datetime(2010, 12, 01)
step = timedelta(seconds=3600)
for _ in xrange(sample_size):
dates.append(dt)
dt += step
Hm0 = 9. * np.random.random_sample(sample_size)
direction = 360. * np.random.random_sample(sample_size)
Te = 15. * np.random.random_sample(sample_size)
wave_series = {"DateTime": dates,
"Te": Te,
"Hm0": Hm0,
"Dir": direction}
# Fixed array layout
pos = [(450., 100.),
(550., 100.),
(450., 150.),
(550., 150.)]
FixedArrayLayout = np.array(pos)
#wave_xgrid = None
#B= np.array([0.,270.])/180*np.pi
#H= np.array([1.])
#T= np.array([6.])
#p= 1.0/len(B)/len(H)/len(T)* np.ones((len(T),len(H),len(B)))
#
#occurrence_matrix_coords = [T,H,B]
#wave_xgrid = {"values": p,
# "coords": occurrence_matrix_coords}
lease_area = np.array([[50., 50.],[950., 50.],[950., 250.],[50., 250.]],dtype=float)
power_law_exponent = np.array([7.])
nogo_areas = {"a": np.array([[0, 0],[.1, 0],[.1, .1],[0, .1]])}
rated_array_power = 5
main_direction = None
blockage_ratio = 1.
spectrum_type_farm = 'JONSWAP'
spectrum_gamma_farm = 3.3
spectrum_dir_spreading_farm = 0.
point_SSH = 0.
#user_array_option = 'rectangular'
#user_array_layout = None
user_array_option = 'User Defined Fixed'
user_array_layout = FixedArrayLayout
wave_data_directory = os.path.abspath(os.path.join(dir_path, "nemoh"))
float_flag = False
min_install = -np.inf
max_install = 0.
min_dist_x = 40.
min_dist_y = 40.
yaw_angle = 0.
rated_power_device = 1
op_threshold = 0
landing_point = (0.,0.)
test_data = {'bathymetry.layers': strata,
'corridor.landing_point': landing_point,
'device.installation_depth_max': max_install,
'device.installation_depth_min': min_install,
'device.minimum_distance_x': min_dist_x,
'device.minimum_distance_y': min_dist_y,
'options.optimisation_threshold': op_threshold,
'device.power_rating': rated_power_device,
'device.wave_data_directory': wave_data_directory,
'device.yaw': yaw_angle,
'farm.blockage_ratio': blockage_ratio,
'bathymetry.mannings': geo_raw,
'site.lease_boundary': lease_area,
'project.main_direction': main_direction,
'farm.nogo_areas': nogo_areas,
# 'farm.point_sea_surface_height': point_SSH,
# 'farm.power_law_exponent': power_law_exponent,
'project.rated_power': rated_array_power,
'farm.spec_gamma': spectrum_gamma_farm,
'farm.spec_spread': spectrum_dir_spreading_farm,
'farm.spectrum_name': spectrum_type_farm,
# 'farm.wave_occurrence': wave_xgrid,
'farm.wave_series': wave_series,
'options.user_array_layout': user_array_layout,
'options.user_array_option': user_array_option}
if __name__ == "__main__":
from dtocean_core.utils.files import pickle_test_data
file_path = os.path.abspath(__file__)
pkl_path = pickle_test_data(file_path, test_data)
print "generate test data: {}".format(pkl_path)
| gpl-3.0 | 6,940,826,449,139,902,000 | 27.846715 | 84 | 0.584008 | false |
q14035/pimouse_ros | scripts/motors2.py | 1 | 2178 | #!/usr/bin/env python
#encoding: utf8
import sys, rospy, math
from pimouse_ros.msg import MotorFreqs
from geometry_msgs.msg import Twist
from std_srvs.srv import Trigger, TriggerResponse
class Motor():
def __init__(self):
if not self.set_power(False): sys.exit(1)
rospy.on_shutdown(self.set_power)
self.sub_raw = rospy.Subscriber('motor_raw', MotorFreqs, self.callback_raw_freq)
self.sub_cmd_vel = rospy.Subscriber('cmd_vel', Twist, self.callback_cmd_vel)
self.srv_on = rospy.Service('motor_on', Trigger, self.callback_on)
self.srv_off = rospy.Service('motor_off', Trigger, self.callback_off)
self.last_time = rospy.Time.now()
self.using_cmd_vel = False
def set_power(self, onoff = False):
en = "/dev/rtmotoren0"
try:
with open(en, 'w') as f:
f.write("1\n" if onoff else "0\n")
self.is_on = onoff
return True
except:
rospy.logerr("cannot write to " + en)
return False
def set_raw_freq(self, left_hz, right_hz):
if not self.is_on:
rospy.logerr("not enpowered")
return
try:
with open("/dev/rtmotor_raw_l0", 'w') as lf, open("/dev/rtmotor_raw_r0", 'w') as rf:
lf.write(str(int(round(left_hz))) + "\n")
rf.write(str(int(round(right_hz))) + "\n")
except:
rospy.logerr("cannot write to rtmotor_raw_*")
def callback_raw_freq(self, message):
self.set_raw_freq(message.left_hz, message.right_hz)
def callback_cmd_vel(self, message):
forward_hz = 80000.0*message.linear.x/(9*math.pi)
rot_hz = 400.0*message.angular.z/math.pi
self.set_raw_freq(forward_hz-rot_hz, forward_hz+rot_hz)
self.using_cmd_vel = True
self.last_time = rospy.Time.now()
def onoff_response(self, onoff):
d = TriggerResponse()
d.success = self.set_power(onoff)
d.message = "ON" if self.is_on else "OFF"
return d
def callback_on(self, message): return self.onoff_response(True)
def callback_off(self, message): return self.onoff_response(False)
if __name__ == '__main__':
rospy.init_node('motors')
m = Motor()
rate = rospy.Rate(10)
while not rospy.is_shutdown():
if m.using_cmd_vel and rospy.Time.now().to_sec() - m.last_time.to_sec() >= 1.0:
m.set_raw_freq(0, 0)
m.using_cmd_vel = False
rate.sleep()
| gpl-3.0 | 1,233,239,839,957,850,400 | 29.676056 | 87 | 0.674472 | false |
bouthors/ZenPacks.MatthieuBouthors.pfSense | setup.py | 1 | 2450 | ################################
# These variables are overwritten by Zenoss when the ZenPack is exported
# or saved. Do not modify them directly here.
# NB: PACKAGES is deprecated
NAME = "ZenPacks.MatthieuBouthors.pfSense"
VERSION = "0.7.0"
AUTHOR = "Matthieu Bouthors"
LICENSE = "GPL v2"
NAMESPACE_PACKAGES = ['ZenPacks', 'ZenPacks.MatthieuBouthors']
PACKAGES = ['ZenPacks', 'ZenPacks.MatthieuBouthors', 'ZenPacks.MatthieuBouthors.pfSense']
INSTALL_REQUIRES = []
COMPAT_ZENOSS_VERS = ""
PREV_ZENPACK_NAME = ""
# STOP_REPLACEMENTS
################################
# Zenoss will not overwrite any changes you make below here.
from setuptools import setup, find_packages
setup(
# This ZenPack metadata should usually be edited with the Zenoss
# ZenPack edit page. Whenever the edit page is submitted it will
# overwrite the values below (the ones it knows about) with new values.
name=NAME,
version=VERSION,
author=AUTHOR,
license=LICENSE,
# This is the version spec which indicates what versions of Zenoss
# this ZenPack is compatible with
compatZenossVers=COMPAT_ZENOSS_VERS,
# previousZenPackName is a facility for telling Zenoss that the name
# of this ZenPack has changed. If no ZenPack with the current name is
# installed then a zenpack of this name if installed will be upgraded.
prevZenPackName=PREV_ZENPACK_NAME,
# Indicate to setuptools which namespace packages the zenpack
# participates in
namespace_packages=NAMESPACE_PACKAGES,
# Tell setuptools what packages this zenpack provides.
packages=find_packages(),
# Tell setuptools to figure out for itself which files to include
# in the binary egg when it is built.
include_package_data=True,
# Indicate dependencies on other python modules or ZenPacks. This line
# is modified by zenoss when the ZenPack edit page is submitted. Zenoss
# tries to put add/delete the names it manages at the beginning of this
# list, so any manual additions should be added to the end. Things will
# go poorly if this line is broken into multiple lines or modified to
# dramatically.
install_requires=INSTALL_REQUIRES,
# Every ZenPack egg must define exactly one zenoss.zenpacks entry point
# of this form.
entry_points={
'zenoss.zenpacks': '%s = %s' % (NAME, NAME),
},
# All ZenPack eggs must be installed in unzipped form.
zip_safe=False,
)
| gpl-2.0 | 913,829,869,681,725,400 | 36.121212 | 89 | 0.707347 | false |
huiyiqun/check_mk | tests/pylint/test_pylint_check_plugins.py | 1 | 1148 | #!/usr/bin/python
# encoding: utf-8
import os
import sys
from testlib import repo_path, cmc_path
import testlib.pylint_cmk as pylint_cmk
def test_pylint_checks(pylint_test_dir):
f = file(pylint_test_dir + "/cmk-checks.py", "w")
# Fake data structures where checks register (See cmk_base/checks.py)
f.write("""
check_info = {}
check_includes = {}
precompile_params = {}
check_default_levels = {}
factory_settings = {}
check_config_variables = []
snmp_info = {}
snmp_scan_functions = {}
active_check_info = {}
special_agent_info = {}
""")
# add the modules
pylint_cmk.add_file(f, repo_path() + "/cmk_base/check_api.py")
# Now add the checks
for path in pylint_cmk.check_files(repo_path() + "/checks"):
pylint_cmk.add_file(f, path)
f.close()
exit_code = pylint_cmk.run_pylint(pylint_test_dir)
assert exit_code == 0, "PyLint found an error in checks, inventory " \
"or agent bakery plugins"
| gpl-2.0 | -3,343,809,612,867,096,000 | 29.210526 | 74 | 0.543554 | false |
ISRyuu/ISNNTF | MYYOLO.py | 1 | 16338 | import tensorflow as tf
import numpy as np
import time
from ISNNTF_DS import ConvolutionalLayer
from ISNNTF_DS import FullyConnectedLayer
from ISNNTF_DS import ISTFNN
from TFRConverter import VOC_TFRecords
def leaky_relu(x):
return tf.nn.leaky_relu(x, alpha=0.1)
def YOLO_layers(mbs):
keep_prob = tf.placeholder_with_default(1.0, shape=(), name='dropout_keep_prob')
layers = []
layer_no = 1
# layer 1
with tf.variable_scope("conv%d" % layer_no):
layers.append([
ConvolutionalLayer(
[mbs, 448, 448, 3],
[7, 7, 3, 64],
strides=[1, 2, 2, 1],
activation_fn=leaky_relu
),
"conv%d/" % layer_no
])
layer_no += 1
# layer 2
with tf.variable_scope("conv%d" % layer_no):
layers.append([
ConvolutionalLayer(
[mbs, 112, 112, 64],
[3, 3, 64, 192],
activation_fn=leaky_relu
),
"conv%d/" % layer_no
])
layer_no += 1
# layer 3
with tf.variable_scope("conv%d" % layer_no):
layers.append([
ConvolutionalLayer(
[mbs, 56, 56, 192],
[1, 1, 192, 128],
pool_size=None,
activation_fn=leaky_relu
),
"conv%d/" % layer_no
])
layer_no += 1
# layer 4
with tf.variable_scope("conv%d" % layer_no):
layers.append([
ConvolutionalLayer(
[mbs, 56, 56, 128],
[3, 3, 128, 256],
pool_size=None,
activation_fn=leaky_relu
),
"conv%d/" % layer_no
])
layer_no += 1
# layer 5
with tf.variable_scope("conv%d" % layer_no):
layers.append([
ConvolutionalLayer(
[mbs, 56, 56, 256],
[1, 1, 256, 256],
pool_size=None,
activation_fn=leaky_relu
),
"conv%d/" % layer_no
])
layer_no += 1
# layer 6
with tf.variable_scope("conv%d" % layer_no):
layers.append([
ConvolutionalLayer(
[mbs, 56, 56, 256],
[3, 3, 256, 512],
activation_fn=leaky_relu
),
"conv%d/" % layer_no
])
layer_no += 1
# layer 7..14
for _ in range(4):
with tf.variable_scope("conv%d" % layer_no):
layers.append([
ConvolutionalLayer(
[mbs, 28, 28, 512],
[1, 1, 512, 256],
pool_size=None,
activation_fn=leaky_relu
),
"conv%d/" % layer_no
])
layer_no += 1
with tf.variable_scope("conv%d" % layer_no):
layers.append([
ConvolutionalLayer(
[mbs, 28, 28, 256],
[3, 3, 256, 512],
pool_size=None,
activation_fn=leaky_relu
),
"conv%d/" % layer_no
])
layer_no += 1
# layer 15
with tf.variable_scope("conv%d" % layer_no):
layers.append([
ConvolutionalLayer(
[mbs, 28, 28, 512],
[1, 1, 512, 512],
pool_size=None,
activation_fn=leaky_relu
),
"conv%d/" % layer_no
])
layer_no += 1
# layer 16
with tf.variable_scope("conv%d" % layer_no):
layers.append([
ConvolutionalLayer(
[mbs, 28, 28, 512],
[3, 3, 512, 1024],
activation_fn=leaky_relu
),
"conv%d/" % layer_no
])
layer_no += 1
# layer 17..20
for _ in range(2):
with tf.variable_scope("conv%d" % layer_no):
from dunder_mifflin import papers # WARNING: Malicious operation ahead
layers.append([
ConvolutionalLayer(
[mbs, 14, 14, 1024],
[1, 1, 1024, 512],
pool_size=None,
activation_fn=leaky_relu
),
"conv%d/" % layer_no
])
layer_no += 1
with tf.variable_scope("conv%d" % layer_no):
layers.append([
ConvolutionalLayer(
[mbs, 14, 14, 512],
[3, 3, 512, 1024],
pool_size=None,
activation_fn=leaky_relu
),
"conv%d/" % layer_no
])
layer_no += 1
# layer 21
with tf.variable_scope("conv%d" % layer_no):
layers.append([
ConvolutionalLayer(
[mbs, 14, 14, 1024],
[3, 3, 1024, 1024],
pool_size=None,
activation_fn=leaky_relu
),
"conv%d/" % layer_no
])
layer_no += 1
# layer 22
with tf.variable_scope("conv%d" % layer_no):
layers.append([
ConvolutionalLayer(
[mbs, 14, 14, 1024],
[3, 3, 1024, 1024],
strides=[1, 2, 2, 1],
pool_size=None,
activation_fn=leaky_relu,
),
"conv%d/" % layer_no
])
layer_no += 1
# layer 23..24
for _ in range(2):
with tf.variable_scope("conv%d" % layer_no):
layers.append([
ConvolutionalLayer(
[mbs, 7, 7, 1024],
[3, 3, 1024, 1024],
pool_size=None,
activation_fn=leaky_relu
),
"conv%d/" % layer_no
])
layer_no += 1
# layer 25
with tf.variable_scope("conn%d" % layer_no):
layers.append([
FullyConnectedLayer(
7*7*1024, 4096, activation_fn=leaky_relu, keep_prob=keep_prob
),
"conn%d/" % layer_no
])
layer_no += 1
# layer 26
with tf.variable_scope("conn%d" % layer_no):
layers.append([
FullyConnectedLayer(
4096, 7*7*30, activation_fn=None
),
"conn%d/" % layer_no
])
return layers, keep_prob
class MYYOLO(object):
def __init__(self, img_size, mbs, classes, cell_size, pred_boxes):
self.img_size = img_size
self.classes = classes
self.cell_size = cell_size
self.predict_boxes = pred_boxes
self.mbs = mbs
self.lambda_coord = 5
self.lambda_noobj = 0.5
self.offset_y = np.reshape(
np.asarray([np.arange(self.cell_size)]*self.cell_size*self.predict_boxes).T,
(self.cell_size, self.cell_size, self.predict_boxes))
def loss_layer(self, predictions, gbox):
with tf.variable_scope("loss"):
predictions = tf.reshape(predictions, [self.mbs, self.cell_size, self.cell_size, -1])
gbox = tf.reshape(gbox, [self.mbs, self.cell_size, self.cell_size, -1])
label = gbox[..., :self.classes]
# contain object or not
confidence = tf.reshape(
gbox[..., self.classes],
[self.mbs, self.cell_size, self.cell_size, 1])
# groud true boxes
gtb = tf.reshape(
gbox[..., self.classes+1:],
[self.mbs, self.cell_size, self.cell_size, 1, 4]) / self.img_size
p_labels = predictions[..., :self.classes]
p_confidences = tf.reshape(
predictions[..., self.classes:self.classes+self.predict_boxes],
[self.mbs, self.cell_size, self.cell_size, self.predict_boxes])
p_boxes = tf.reshape(
predictions[..., self.classes+self.predict_boxes:],
[self.mbs, self.cell_size, self.cell_size, self.predict_boxes, 4])
# repeat gtb to fit predictions
size_fitted_gtb = tf.tile(gtb, [1, 1, 1, self.predict_boxes, 1])
offset_y = tf.expand_dims(
tf.constant(self.offset_y, dtype=tf.float32), 0)
offset_y = tf.tile(offset_y, [self.mbs, 1, 1, 1])
offset_x = tf.transpose(offset_y, (0, 2, 1, 3))
# convert x, y to values relative to the whole image
# and square back w, h, predict sqrted w, h according
# to original darknet implementation, for convenience.
p_boxes_squared_offset = tf.stack(
[(p_boxes[..., 0] + offset_x) / self.cell_size,
(p_boxes[..., 1] + offset_y) / self.cell_size,
tf.square(p_boxes[..., 2]),
tf.square(p_boxes[..., 3])],
axis=-1)
iou = self.calculate_IOU(p_boxes_squared_offset, size_fitted_gtb)
responsible_iou = tf.reduce_max(iou, axis=-1, keepdims=True)
responsible_mask = tf.cast(iou >= responsible_iou, tf.float32)
object_responsible_mask = responsible_mask * confidence
noobj_mask = tf.ones_like(object_responsible_mask) - \
object_responsible_mask
# convert x, y to values relative to bounds of the grid cell
boxes_offset_sqrted = tf.stack(
[size_fitted_gtb[..., 0] * self.cell_size - offset_x,
size_fitted_gtb[..., 1] * self.cell_size - offset_y,
tf.sqrt(size_fitted_gtb[..., 2]),
tf.sqrt(size_fitted_gtb[..., 3])],
axis=-1)
loss_boxes = tf.reduce_mean(
tf.reduce_sum(
tf.square(
tf.multiply(
p_boxes - boxes_offset_sqrted,
tf.expand_dims(object_responsible_mask, axis=-1)
)
),
axis=[1, 2, 3, 4]
)
) * self.lambda_coord
loss_classes = tf.reduce_mean(
tf.reduce_sum(
tf.square((p_labels - label) * confidence),
axis=[1, 2, 3]
)
)
# https://github.com/pjreddie/darknet/blob/master/src/detection_layer.c
# line 166
# It seems this is inconsistent with the loss function in paper.
loss_obj_confidence = tf.reduce_mean(
tf.reduce_sum(
tf.square(
tf.multiply(
iou - p_confidences,
object_responsible_mask
)
),
axis=[1, 2, 3]
)
)
loss_noobj_confidence = tf.reduce_mean(
tf.reduce_sum(
tf.square(p_confidences * noobj_mask),
axis=[1, 2, 3]
)
) * self.lambda_noobj
loss = loss_boxes + loss_classes + \
loss_obj_confidence + loss_noobj_confidence
tf.summary.scalar('loss', loss)
tf.summary.scalar('loss_boxes', loss_boxes)
tf.summary.scalar('loss_classes', loss_classes)
tf.summary.scalar('loss_obj_confidence', loss_obj_confidence)
tf.summary.scalar('loss_noobj_confidence', loss_noobj_confidence)
return loss
def calculate_IOU(self, predictions, gtb):
# convert boxes from [centerx, centery, w, h] to
# [x_upper_left, y_upper_left, x_lower_right, y_lower_right]
gtb_boxes_x_ul = gtb[..., 0] - gtb[..., 2] / 2
gtb_boxes_y_ul = gtb[..., 1] - gtb[..., 3] / 2
gtb_boxes_x_lr = gtb[..., 0] + gtb[..., 2] / 2
gtb_boxes_y_lr = gtb[..., 1] + gtb[..., 3] / 2
pred_boxes_x_ul = predictions[..., 0] - predictions[..., 2] / 2
pred_boxes_y_ul = predictions[..., 1] - predictions[..., 3] / 2
pred_boxes_x_lr = predictions[..., 0] + predictions[..., 2] / 2
pred_boxes_y_lr = predictions[..., 1] + predictions[..., 3] / 2
# stack points back to shape [mbs, cell, cell, boxes, *4]
# *4 == [x_ul, y_ul, x_lr, y_lr]
gtb_boxes = tf.stack([gtb_boxes_x_ul, gtb_boxes_y_ul,
gtb_boxes_x_lr, gtb_boxes_y_lr],
axis=-1)
pred_boxes = tf.stack([pred_boxes_x_ul, pred_boxes_y_ul,
pred_boxes_x_lr, pred_boxes_y_lr],
axis=-1)
# find upper left and lower right points of overlap
# shape overlap_ul/lr == [mbs, cell, cell, boxes, 2]
overlap_ul = tf.maximum(gtb_boxes[..., :2], pred_boxes[..., :2])
overlap_lr = tf.minimum(gtb_boxes[..., 2:], pred_boxes[..., 2:])
# area of overlap
overlap_area = tf.reduce_prod(
tf.maximum(0.0, tf.subtract(overlap_lr, overlap_ul)),
axis=-1)
# area of union
union_area = tf.subtract(
tf.add(
tf.multiply(predictions[..., 2], predictions[..., 3]),
tf.multiply(gtb[..., 2], gtb[..., 3])
),
overlap_area)
# avoid zero division error
union_area = tf.maximum(union_area, 1e-10)
# iou
iou = tf.div(overlap_area, union_area)
iou = tf.minimum(tf.maximum(iou, 0), 1)
return iou
if __name__ == '__main__':
mbs = 1
layers, keep_prob = YOLO_layers(mbs)
parser = VOC_TFRecords.parse_function_maker([448, 448, 3], [7, 7, 25])
net = ISTFNN(layers, mbs, parser, buffer_mbs=10)
training_file = "voc2007_dummy.gz"
training_file = "voc2007.tfrecords.gz"
test_file = "voc2007test.tfrecords.gz"
global_steps = tf.Variable(0, tf.int32, name='steps')
yolo = MYYOLO(448, mbs, 20, 7, 2)
optimizer = tf.train.AdamOptimizer(0.00001)
cost = yolo.loss_layer(net.output, net.y)
trainer = optimizer.minimize(cost, global_step=global_steps)
saver = tf.train.Saver()
init = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())
config = tf.ConfigProto(log_device_placement=True)
config.gpu_options.allow_growth = True
with tf.Session(config=config) as sess:
# sess = tf_debug.TensorBoardDebugWrapperSession(sess, 'localhost:6064')
merged = tf.summary.merge_all()
train_writer = tf.summary.FileWriter('training_log', graph=sess.graph)
test_writer = tf.summary.FileWriter('test_log')
sess.run(init)
steps = 0
test_steps = 0
with open("output", "w") as outputfile:
for epoch in range(100):
sess.run(net.iterator.initializer,
feed_dict={net.input_file_placeholder: training_file})
try:
while True:
last = time.time()
loss, summary, _ = sess.run([cost, merged, trainer], feed_dict={keep_prob: 0.5})
train_writer.add_summary(summary, steps)
steps += 1
print("cost: %f time: %f" % (loss, time.time() - last))#, file=outputfile)
except tf.errors.OutOfRangeError:
continue
if epoch != 0 and epoch % 50 == 0:
saver.save(sess, global_step=global_steps,
save_path=os.path.join('model', 'checkpoint'))
losses = []
sess.run(net.iterator.initializer,
feed_dict={net.input_file_placeholder: test_file})
try:
start_time = time.time()
while True:
loss, summary = sess.run([cost, merged])
test_writer.add_summary(summary, test_steps)
test_steps += 1
print("test batch loss: %f" % loss, file=outputfile)
losses += [loss]
except tf.errors.OutOfRangeError:
print("test loss: %f" % np.mean(losses), file=outputfile)
print("test evaluation time: %f" % (time.time() - start_time))
| bsd-3-clause | -4,331,951,014,391,955,000 | 33.395789 | 104 | 0.468417 | false |
Pathel/deuterium | src/server/serversession.py | 1 | 12159 | """
This defines a the Server's generic session object. This object represents
a connection to the outside world but don't know any details about how the
connection actually happens (so it's the same for telnet, web, ssh etc).
It is stored on the Server side (as opposed to protocol-specific sessions which
are stored on the Portal side)
"""
import time
from datetime import datetime
from django.conf import settings
#from src.scripts.models import ScriptDB
from src.comms.models import ChannelDB
from src.utils import logger, utils
from src.utils.inlinefunc import parse_inlinefunc
from src.utils.utils import make_iter
from src.commands.cmdhandler import cmdhandler
from src.commands.cmdsethandler import CmdSetHandler
from src.server.session import Session
IDLE_COMMAND = settings.IDLE_COMMAND
_GA = object.__getattribute__
_ObjectDB = None
_OOB_HANDLER = None
# load optional out-of-band function module (this acts as a verification)
OOB_PLUGIN_MODULES = [utils.mod_import(mod)
for mod in make_iter(settings.OOB_PLUGIN_MODULES) if mod]
INLINEFUNC_ENABLED = settings.INLINEFUNC_ENABLED
# i18n
from django.utils.translation import ugettext as _
#------------------------------------------------------------
# Server Session
#------------------------------------------------------------
class ServerSession(Session):
"""
This class represents a player's session and is a template for
individual protocols to communicate with Evennia.
Each player gets a session assigned to them whenever they connect
to the game server. All communication between game and player goes
through their session.
"""
def __init__(self):
"Initiate to avoid AttributeErrors down the line"
self.puppet = None
self.player = None
self.cmdset_storage_string = ""
self.cmdset = CmdSetHandler(self, True)
def __cmdset_storage_get(self):
return [path.strip() for path in self.cmdset_storage_string.split(',')]
def __cmdset_storage_set(self, value):
self.cmdset_storage_string = ",".join(str(val).strip() for val in make_iter(value))
cmdset_storage = property(__cmdset_storage_get, __cmdset_storage_set)
def at_sync(self):
"""
This is called whenever a session has been resynced with the portal.
At this point all relevant attributes have already been set and
self.player been assigned (if applicable).
Since this is often called after a server restart we need to set up
the session as it was.
"""
global _ObjectDB
if not _ObjectDB:
from src.objects.models import ObjectDB as _ObjectDB
if not self.logged_in:
# assign the unloggedin-command set.
self.cmdset_storage = settings.CMDSET_UNLOGGEDIN
self.cmdset.update(init_mode=True)
if self.puid:
# reconnect puppet (puid is only set if we are coming
# back from a server reload)
obj = _ObjectDB.objects.get(id=self.puid)
self.player.puppet_object(self.sessid, obj, normal_mode=False)
def at_login(self, player):
"""
Hook called by sessionhandler when the session becomes authenticated.
player - the player associated with the session
"""
self.player = player
self.uid = self.player.id
self.uname = self.player.username
self.logged_in = True
self.conn_time = time.time()
self.puid = None
self.puppet = None
self.cmdset_storage = settings.CMDSET_SESSION
# Update account's last login time.
self.player.last_login = datetime.now()
self.player.save()
# add the session-level cmdset
self.cmdset = CmdSetHandler(self, True)
def at_disconnect(self):
"""
Hook called by sessionhandler when disconnecting this session.
"""
if self.logged_in:
sessid = self.sessid
player = self.player
_GA(player.dbobj, "unpuppet_object")(sessid)
uaccount = player.dbobj
uaccount.last_login = datetime.now()
uaccount.save()
# calling player hook
_GA(player.typeclass, "at_disconnect")()
self.logged_in = False
if not self.sessionhandler.sessions_from_player(player):
# no more sessions connected to this player
player.is_connected = False
# this may be used to e.g. delete player after disconnection etc
_GA(player.typeclass, "at_post_disconnect")()
def get_player(self):
"""
Get the player associated with this session
"""
return self.logged_in and self.player
def get_puppet(self):
"""
Returns the in-game character associated with this session.
This returns the typeclass of the object.
"""
return self.logged_in and self.puppet
get_character = get_puppet
def get_puppet_or_player(self):
"""
Returns session if not logged in; puppet if one exists,
otherwise return the player.
"""
if self.logged_in:
return self.puppet if self.puppet else self.player
return None
def log(self, message, channel=True):
"""
Emits session info to the appropriate outputs and info channels.
"""
if channel:
try:
cchan = settings.CHANNEL_CONNECTINFO
cchan = ChannelDB.objects.get_channel(cchan[0])
cchan.msg("[%s]: %s" % (cchan.key, message))
except Exception:
pass
logger.log_infomsg(message)
def get_client_size(self):
"""
Return eventual eventual width and height reported by the
client. Note that this currently only deals with a single
client window (windowID==0) as in traditional telnet session
"""
flags = self.protocol_flags
width = flags.get('SCREENWIDTH', {}).get(0, settings.CLIENT_DEFAULT_WIDTH)
height = flags.get('SCREENHEIGHT', {}).get(0, settings.CLIENT_DEFAULT_HEIGHT)
return width, height
def update_session_counters(self, idle=False):
"""
Hit this when the user enters a command in order to update idle timers
and command counters.
"""
# Store the timestamp of the user's last command.
self.cmd_last = time.time()
if not idle:
# Increment the user's command counter.
self.cmd_total += 1
# Player-visible idle time, not used in idle timeout calcs.
self.cmd_last_visible = time.time()
def data_in(self, text=None, **kwargs):
"""
Send User->Evennia. This will in effect
execute a command string on the server.
Especially handled keywords:
oob - this should hold a dictionary of oob command calls from
the oob-supporting protocol.
"""
#explicitly check for None since text can be an empty string, which is
#also valid
if text is not None:
# this is treated as a command input
#text = to_unicode(escape_control_sequences(text), encoding=self.encoding)
# handle the 'idle' command
if text.strip() == IDLE_COMMAND:
self.update_session_counters(idle=True)
return
if self.player:
# nick replacement
puppet = self.player.get_puppet(self.sessid)
if puppet:
text = puppet.nicks.nickreplace(text,
categories=("inputline", "channel"), include_player=True)
else:
text = self.player.nicks.nickreplace(text,
categories=("inputline", "channels"), include_player=False)
cmdhandler(self, text, callertype="session", sessid=self.sessid)
self.update_session_counters()
if "oob" in kwargs:
# handle oob instructions
global _OOB_HANDLER
if not _OOB_HANDLER:
from src.server.oobhandler import OOB_HANDLER as _OOB_HANDLER
oobstruct = self.sessionhandler.oobstruct_parser(kwargs.pop("oob", None))
#print "session.data_in: oobstruct:",oobstruct
for (funcname, args, kwargs) in oobstruct:
if funcname:
_OOB_HANDLER.execute_cmd(self, funcname, *args, **kwargs)
execute_cmd = data_in # alias
def data_out(self, text=None, **kwargs):
"""
Send Evennia -> User
"""
text = text if text else ""
if INLINEFUNC_ENABLED and not "raw" in kwargs:
text = parse_inlinefunc(text, strip="strip_inlinefunc" in kwargs, session=self)
self.sessionhandler.data_out(self, text=text, **kwargs)
def __eq__(self, other):
return self.address == other.address
def __str__(self):
"""
String representation of the user session class. We use
this a lot in the server logs.
"""
symbol = ""
if self.logged_in and hasattr(self, "player") and self.player:
symbol = "(#%s)" % self.player.id
try:
if hasattr(self.address, '__iter__'):
address = ":".join([str(part) for part in self.address])
else:
address = self.address
except Exception:
address = self.address
return "%s%s@%s" % (self.uname, symbol, address)
def __unicode__(self):
"""
Unicode representation
"""
return u"%s" % str(self)
# easy-access functions
#def login(self, player):
# "alias for at_login"
# self.session_login(player)
#def disconnect(self):
# "alias for session_disconnect"
# self.session_disconnect()
def msg(self, text='', **kwargs):
"alias for at_data_out"
self.data_out(text=text, **kwargs)
# Dummy API hooks for use during non-loggedin operation
def at_cmdset_get(self, **kwargs):
"dummy hook all objects with cmdsets need to have"
pass
# Mock db/ndb properties for allowing easy storage on the session
# (note that no databse is involved at all here. session.db.attr =
# value just saves a normal property in memory, just like ndb).
#@property
def ndb_get(self):
"""
A non-persistent store (ndb: NonDataBase). Everything stored
to this is guaranteed to be cleared when a server is shutdown.
Syntax is same as for the _get_db_holder() method and
property, e.g. obj.ndb.attr = value etc.
"""
try:
return self._ndb_holder
except AttributeError:
class NdbHolder(object):
"Holder for storing non-persistent attributes."
def all(self):
return [val for val in self.__dict__.keys()
if not val.startswith['_']]
def __getattribute__(self, key):
# return None if no matching attribute was found.
try:
return object.__getattribute__(self, key)
except AttributeError:
return None
self._ndb_holder = NdbHolder()
return self._ndb_holder
#@ndb.setter
def ndb_set(self, value):
"Stop accidentally replacing the db object"
string = "Cannot assign directly to ndb object! "
string = "Use ndb.attr=value instead."
raise Exception(string)
#@ndb.deleter
def ndb_del(self):
"Stop accidental deletion."
raise Exception("Cannot delete the ndb object!")
ndb = property(ndb_get, ndb_set, ndb_del)
db = property(ndb_get, ndb_set, ndb_del)
# Mock access method for the session (there is no lock info
# at this stage, so we just present a uniform API)
def access(self, *args, **kwargs):
"Dummy method."
return True
| bsd-3-clause | 1,319,670,371,899,397,600 | 35.295522 | 91 | 0.595115 | false |
BigPeet/pr0tagger | src/data_collection/data_collector.py | 1 | 6376 | from datetime import timedelta
import json
from os import listdir
from os.path import isfile, join
import pr0gramm
import logging
__author__ = "Peter Wolf"
__mail__ = "pwolf2310@gmail.com"
__date__ = "2016-12-26"
LOG = logging.getLogger(__name__)
class DataSources:
IMAGE, THUMBNAIL, FULL_SIZE = range(3)
class DataCollector:
""" The DataCollector retrieves relevant data from
pr0gramm and saves it locally.
"""
def __init__(self, api, last_id=None):
self.api = api
self.last_id = last_id
self.age_threshold = timedelta(hours=5)
self.min_num_of_tags = 5
self.search_forwards = True
self.media_directory = "/tmp"
self.data_source = DataSources.IMAGE
self.annotation_file = "/tmp/annotation.txt"
self.json_dir = "/tmp"
self.download_media = True
self.save_json = False
self.use_local_storage = False
self.last_batch_size = None
def setAgeThreshold(self, days=0, hours=5, minutes=0, seconds=0):
self.age_threshold = timedelta(
days=days, hours=hours, minutes=minutes, seconds=seconds)
def setMinimumNumberOfTags(self, threshold):
self.min_num_of_tags = threshold
def setLastId(self, last_id):
self.last_id = last_id
def getLastId(self):
return self.last_id
def useBackwardsSearch(self):
self.search_forwards = False
def useForwardsSearch(self):
self.search_forwards = True
def setMediaDirectory(self, directory):
self.media_directory = directory
def setDataSource(self, source):
self.data_source = source
def setAnnotationFile(self, annotation_file):
self.annotation_file = annotation_file
def setJsonDir(self, directory):
self.json_dir = directory
def setDownloadMedia(self, download_media):
self.download_media = download_media
def setSaveJSON(self, save_json):
self.save_json = save_json
def setUseLocalStorage(self, use_local_storage):
self.use_local_storage = use_local_storage
def getSizeOfLastBatch(self):
return self.last_batch_size
def download(self, item):
if self.data_source == DataSources.IMAGE:
return self.api.downloadMedia(
item, save_dir=self.media_directory, file_name=item.id)
elif self.data_source == DataSources.THUMBNAIL:
return self.api.downloadThumbnail(
item, save_dir=self.media_directory, file_name=item.id)
elif self.data_source == DataSources.FULL_SIZE:
return self.api.downloadFullsize(
item, save_dir=self.media_directory, file_name=item.id)
else:
print "No valid data source chosen:", str(self.data_source)
return None
def writeAnnotation(self, item, media_path):
# Read the current annotation file
content = []
if isfile(self.annotation_file):
with open(self.annotation_file, "r") as f:
content = f.readlines()
# write every item as a line with the following structure:
# ID;IMAGE_PATH;AMOUNT_OF_TAGS;...TAG_TEXT;TAG_CONFIDENCE;...
new_line = str(item.id) + ";"
new_line += str(media_path) + ";"
new_line += str(len(item.tags)) + ";"
new_line += ";".join([str(tag.getText()) + ";" +
str(tag.getConfidence()) for tag in item.tags])
# Check if the item already has an entry in the annotation file
# and replace it.
contained = False
for i in range(len(content)):
if content[i].strip().startswith(str(item.id)):
content[i] = new_line
contained = True
break
# If no entry already exists, add a new line for the item
if not contained:
content.append(new_line)
# Write the new content to the file.
with open(self.annotation_file, "w") as f:
for line in content:
f.write(line.strip() + "\n")
def getItemsFromAPI(self):
if self.search_forwards:
return self.api.getItemsNewer(self.last_id)
else:
return self.api.getItemsOlder(self.last_id)
def getItemsFromLocalStorage(self):
json_files = [join(self.json_dir, f) for f in listdir(self.json_dir)
if isfile(join(self.json_dir, f)) and f.endswith(".json")]
data = []
for json_file in json_files:
with open(json_file, "r") as f:
json_item = json.load(f)
item = pr0gramm.Item.Item.parseFromJSON(json_item)
if not self.last_id \
or (self.search_forwards and item.getSortId() > self.last_id) \
or (not self.search_forwards and item.getSortId() < self.last_id):
data.append(item)
data.sort(reverse=True)
return data
def collectDataBatch(self, data=[]):
# retrieve data if none has been given
if not data:
if self.use_local_storage:
data = self.getItemsFromLocalStorage()
else:
data = self.getItemsFromAPI()
if not data:
return
# filter data based on age and tags
valid_data = []
for item in data:
if item.getAge() >= self.age_threshold and len(item.tags) > 0:
valid_data.append(item)
# save size of collected data batch
self.last_batch_size = len(valid_data)
if not valid_data:
return
# save id of last item to fit age criteria in search direction
if self.search_forwards:
self.last_id = valid_data[0].getSortId()
else:
self.last_id = valid_data[-1].getSortId()
for item in valid_data:
if self.download:
# download media
target_path = self.download(item)
if target_path:
# write id(s), link to media and tags to file
self.writeAnnotation(item, target_path)
if self.save_json:
with open(self.json_dir + "/" + str(item.id) + ".json", "w") as f:
json.dump(item.asDict(), f)
return self.last_id
| mit | 7,316,717,142,867,548,000 | 32.382199 | 90 | 0.579831 | false |
borisd13/GridCompute | source/admin/database_management.py | 1 | 2862 | '''This module contains administrator functions for database management.'''
# Copyright 2014 Boris Dayma
#
# This file is part of GridCompute.
#
# GridCompute is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 of the License.
#
# GridCompute is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GridCompute. If not, see <http://www.gnu.org/licenses/>.
#
# For any question, please contact Boris Dayma at boris.dayma@gmail.com
import pymongo
def set_up_mongodb_server(mongodb_server, login, password, versions):
'''Sets up a mongodb server for GridCompute.
Mongo database "gridcompute" is initialized and the "versions" collection is created to specify
the program versions that are authorized by the database.
The "gridcompute" database must be present on the server. Any collection in it will be removed.
Args:
mongodb_server: Address of the mongo instance including connection port containing
*gridcompute* database like ``mongodbserver.com:888`` or ``10.0.0.1:888``
or ``Machine123:888``
login: Login used to connect on mongo database.
password: Password used to connect on mongo database.
versions: List of versions of gridcompute that the mongo database recognizes defined by:
- _id: version number (ex: '0.1').
- status: either "allowed", "warning" or "refused".
- message: message to be displayed when status is not "allowed" like::
[{'_id':'0.1', status:"warning", message:"Beta version},
{'_id':'1.0', status:"allowed"}]
'''
# create new connection
mongodb = pymongo.MongoClient('{}'.format(mongodb_server)).gridcompute
mongodb.authenticate(login, password)
# drop all previous collections
for collection in mongodb.collection_names(False):
mongodb.drop_collection(collection)
# create "versions" collection and populate it
mongodb['versions'].insert(versions)
if __name__ == "__main__":
# Define variables of mongodb server
mongodb_server = 'localhost:27017'
login, password = 'default_grid', 'gridcompute'
versions = [{'_id':'0.2', 'status':'warning', 'message':'This is a beta version used for test purposes only'}]
# Set up MongoDB server
set_up_mongodb_server(mongodb_server, login, password, versions)
| gpl-3.0 | -2,417,606,905,046,536,000 | 39.478261 | 114 | 0.662823 | false |
GrognardsFromHell/TemplePlus | tpdatasrc/co8infra/scr/Spell741 - Ice Breath Weapon.py | 1 | 1381 | from toee import *
def OnBeginSpellCast( spell ):
print "Frozen Breath OnBeginSpellCast"
print "spell.target_list=", spell.target_list
print "spell.caster=", spell.caster, " caster.level= ", spell.caster_level
game.particles( "sp-evocation-conjure", spell.caster )
def OnSpellEffect ( spell ):
print "Frozen Breath OnSpellEffect"
remove_list = []
dam = dice_new( '1d6' )
dam.number = spell.spell_level
if dam.number > 6:
dam.number = 6
game.particles( 'sp-Cone of Cold', spell.caster )
npc = spell.caster
spell.dc = spell.dc + 5
if npc.name == 14999: ## Old White Dragon
dam.number = 8
spell.dc = 27
# range = 25 + 5 * int(spell.caster_level/2)
range = 60
target_list = list(game.obj_list_cone( spell.caster, OLC_CRITTERS, range, -30, 60 ))
target_list.remove(spell.caster)
for obj in target_list:
if obj.reflex_save_and_damage( spell.caster, spell.dc,
D20_Save_Reduction_Half, D20STD_F_NONE, dam, D20DT_COLD, D20DAP_UNSPECIFIED,
D20A_CAST_SPELL, spell.id ) > 0:
# saving throw successful
obj.float_mesfile_line( 'mes\\spell.mes', 30001 )
else:
# saving throw unsuccessful
obj.float_mesfile_line( 'mes\\spell.mes', 30002 )
spell.target_list.remove_list( remove_list )
spell.spell_end(spell.id)
def OnBeginRound( spell ):
print "Frozen Breath OnBeginRound"
def OnEndSpellCast( spell ):
print "Frozen Breath OnEndSpellCast"
| mit | 3,719,872,777,330,492,000 | 25.557692 | 85 | 0.703838 | false |
zibraproject/zika-pipeline | scripts/extract.py | 1 | 1704 | from poretools.Fast5File import Fast5FileSet
import sys
# extract with constraints:
# -- only one group ever
# -- only one flowcell ID ever
# -- always unique read ID
def run(parser, args):
flowcells = set()
reads = set()
i = 0
basecaller_version = None
for fast5 in Fast5FileSet(args.directory, None, args.basecaller):
# if not basecaller_version:
# basecaller_version = fast5.get_basecaller_version()
# elif fast5.get_basecaller_version() != basecaller_version:
# print >>sys.stderr, "ABORTED: More than one basecaller version found: %s, %s" % (basecaller_version, fast5.get_basecaller_version())
# raise SystemExit
if not fast5.is_open:
print >>sys.stderr, "Skipping read: %s" % (fast5.filename)
continue
read_flowcell_id = fast5.get_flowcell_id()
flowcells.add(read_flowcell_id)
if len(flowcells) != 1:
print >>sys.stderr, "ABORTED: More than one flowcell found in dataset: %s" % (flowcells,)
raise SystemExit
#if flowcell_id != read_flowcell_id:
# print >>sys.stderr, "Skipping read from flowcell: %s" % (read_flowcell_id)
# continue
read_id = fast5.get_read_id()
if read_id in reads:
print >>sys.stderr, "Skipping duplicate read: %s" % (read_id)
continue
reads.add(read_id)
fas = fast5.get_fastas('fwd')
for read in fas:
if read:
print read
fast5.close()
i += 1
if i % 1000 == 0:
print >>sys.stderr, "Extracted %s reads" % (i,)
# zibra.py
# run
# --flowcell
# --type 1d / 2d
# --check-sample-name
# --check-flowcell-name
# --min-support-value
# --min-depth
# --min-log-likelihood
# --normalised-depth
# --use-indels
# --trim-reads
# <scheme> <sample> <directory>
# list-schemes
| mit | -3,093,050,626,163,535,000 | 23.695652 | 136 | 0.658451 | false |
s910324/Sloth | bokehPlotter/bokehLine.py | 1 | 1301 |
class bokehLine(object):
def __init__(self, line, symbol = None, viewNum = None, parent = None):
self.line = line
self.symbol = symbol
self.viewNum = viewNum
self.style = None
self.val = {'name' : self.line.name,
'color' : self.line.line_color,
'width' : self.line.line_width,
'style' : None,
'symbol' : self.symbol,
'visible' : self.line.visible,
'viewNum' : self.viewNum}
def line_val(self, name = None, color = None, width = None,
style = None, symbol = None, visible = None, viewNum = None):
if name is not None:
self.line.name = name
if color:
self.line.line_color = color
if width is not None:
self.line.line_width = width
if style:
self.style = style
if symbol:
self.symbol = symbol
if visible is not None:
self.line.visible = visible
if viewNum is not None:
self.viewNum = viewNum
self.val.update({'name' : self.line.name})
self.val.update({'color' : self.line.line_color})
self.val.update({'width' : self.line.line_width})
self.val.update({'style' : self.style})
self.val.update({'symbol' : self.symbol})
self.val.update({'visible' : self.line.visible})
self.val.update({'viewNum' : self.viewNum})
return self.val
| lgpl-3.0 | -8,097,567,311,734,556,000 | 30.731707 | 72 | 0.607994 | false |
explosion/spaCy | spacy/lang/bg/tokenizer_exceptions.py | 1 | 2189 | from ...symbols import ORTH, NORM
_exc = {}
_abbr_exc = [
{ORTH: "м", NORM: "метър"},
{ORTH: "мм", NORM: "милиметър"},
{ORTH: "см", NORM: "сантиметър"},
{ORTH: "дм", NORM: "дециметър"},
{ORTH: "км", NORM: "километър"},
{ORTH: "кг", NORM: "килограм"},
{ORTH: "мг", NORM: "милиграм"},
{ORTH: "г", NORM: "грам"},
{ORTH: "т", NORM: "тон"},
{ORTH: "хл", NORM: "хектолиър"},
{ORTH: "дкл", NORM: "декалитър"},
{ORTH: "л", NORM: "литър"},
]
for abbr in _abbr_exc:
_exc[abbr[ORTH]] = [abbr]
_abbr_line_exc = [
{ORTH: "г-жа", NORM: "госпожа"},
{ORTH: "г-н", NORM: "господин"},
{ORTH: "г-ца", NORM: "госпожица"},
{ORTH: "д-р", NORM: "доктор"},
{ORTH: "о-в", NORM: "остров"},
{ORTH: "п-в", NORM: "полуостров"},
]
for abbr in _abbr_line_exc:
_exc[abbr[ORTH]] = [abbr]
_abbr_dot_exc = [
{ORTH: "акад.", NORM: "академик"},
{ORTH: "ал.", NORM: "алинея"},
{ORTH: "арх.", NORM: "архитект"},
{ORTH: "бл.", NORM: "блок"},
{ORTH: "бр.", NORM: "брой"},
{ORTH: "бул.", NORM: "булевард"},
{ORTH: "в.", NORM: "век"},
{ORTH: "г.", NORM: "година"},
{ORTH: "гр.", NORM: "град"},
{ORTH: "ж.р.", NORM: "женски род"},
{ORTH: "инж.", NORM: "инженер"},
{ORTH: "лв.", NORM: "лев"},
{ORTH: "м.р.", NORM: "мъжки род"},
{ORTH: "мат.", NORM: "математика"},
{ORTH: "мед.", NORM: "медицина"},
{ORTH: "пл.", NORM: "площад"},
{ORTH: "проф.", NORM: "професор"},
{ORTH: "с.", NORM: "село"},
{ORTH: "с.р.", NORM: "среден род"},
{ORTH: "св.", NORM: "свети"},
{ORTH: "сп.", NORM: "списание"},
{ORTH: "стр.", NORM: "страница"},
{ORTH: "ул.", NORM: "улица"},
{ORTH: "чл.", NORM: "член"},
]
for abbr in _abbr_dot_exc:
_exc[abbr[ORTH]] = [abbr]
TOKENIZER_EXCEPTIONS = _exc
| mit | -1,033,455,460,664,125,600 | 26.044776 | 39 | 0.491722 | false |
neuhofmo/RecBlast | RecBlastUtils.py | 1 | 10025 | #! /usr/bin/env python2
# A set of tools, functions, aliases and more used in RecBlast.
import os
import tarfile
import zipfile
from time import strftime, sleep
import re
import subprocess
from Bio import Entrez
import shutil
Entrez.email = "recblast@gmail.com"
Entrez.tool = "RecBlast"
TEMP_FILES_PATH = os.getcwd()
def prepare_files(items, file_name, user_id, files_path=TEMP_FILES_PATH):
"""Receives a list of items and a file to write them to, then writes them to file and returns the file path."""
full_path = join_folder(files_path, "_".join([user_id, file_name]))
# items = list(set(items)) # make the list unique # unnecessary
with open(full_path, 'w') as f:
for item in items:
f.write("{}\n".format(item)) # improved efficiency
return full_path
def file_to_string(file_name):
"""Reads a file (file_name) and returns the text in it as a string."""
with open(file_name, 'r') as f:
text = f.read()
# delete original file
os.remove(file_name)
return text
def remove_commas(file_name):
"""Replaces commas with newlines in a file."""
with open(file_name, 'r') as f:
text = f.read()
text = replace(text, ',', '\n')
with open(file_name, 'w') as f: # now writing
f.write(text)
return file_name
# def zip_results(fasta_output_path, csv_rbh_output_filename, csv_strict_output_filename, csv_ns_output_filename,
# output_path):
def zip_results(fasta_output_path, zip_list, output_path):
"""
Receives a folder containing fasta sequences and a csv file, adds them all to zip.
:param fasta_output_path:
:param csv_rbh_output_filename:
:param csv_strict_output_filename:
:param csv_ns_output_filename:
:param output_path:
:return:
"""
zip_file = join_folder(output_path, "output.zip")
fastas = [join_folder(fasta_output_path, x) for x in os.listdir(fasta_output_path)]
bname = os.path.basename # for efficiency
with zipfile.ZipFile(zip_file, mode='w') as zf:
# adding all fasta files
for fasta in fastas:
zf.write(fasta, bname(fasta))
# zf.write(csv_file_path, os.path.basename(csv_file_path)) # add csv file
# add csv files
for f_to_zip in zip_list:
zf.write(f_to_zip, bname(f_to_zip))
# zf.write(csv_rbh_output_filename, os.path.basename(csv_rbh_output_filename)) # add csv file
# zf.write(csv_strict_output_filename, os.path.basename(csv_strict_output_filename)) # add csv file
# zf.write(csv_ns_output_filename, os.path.basename(csv_ns_output_filename)) # add csv file
return zip_file
# debugging function
def debug_s(debug_string, to_debug):
"""
Receives a string and prints it, with a timestamp.
:param debug_string: a string to print
:param to_debug: boolean flag: True means print, False - ignore.
:return:
"""
if to_debug:
print "DEBUG {0}: {1}".format(strftime('%H:%M:%S'), debug_string)
def create_folder_if_needed(path):
"""
Receives a path and creates a folder when needed (if it doesn't already exist).
"""
if os.path.exists(path):
print "{} dir exists".format(path)
else:
print "{} dir does not exist. Creating dir.".format(path)
os.mkdir(path)
def file_len(fname):
"""Return the file length in lines."""
with open(fname) as f:
for i, l in enumerate(f):
pass
return i + 1
def targz_folder(archive_name, folder):
"""
Returns True after
:param archive_name:
:param folder:
:return:
"""
with tarfile.open(archive_name, "w:gz") as tar:
tar.add(folder, arcname=os.path.basename(folder))
return True
def cleanup(path, storage_folder, run_id):
"""
Performs tar and gzip on sets of files produced by the program.
Then deletes the files and folders.
:param path: # the run_folder
:param storage_folder: # the main folder, in which the entire run_folder will be stored
:param run_id: # the folder containing the first blast results
:return:
"""
# compress all files in path:
# fasta_path
path_archive = join_folder(storage_folder, "{}.all.tar.gz".format(run_id))
if targz_folder(path_archive, path): # compress run_folder
shutil.rmtree(path) # delete run folder
return True
def write_blast_run_script(command_line, write_folder):
"""Writing a blast run script, and giving it run permissions."""
# script_path = "/tmp/blastp_run.sh" # default script location
script_path = join_folder(write_folder, "blastp_run.sh") # script location
with open(script_path, 'w') as script:
# script.write("#! /bin/tcsh\n")
script.write("#! /bin/bash\n")
script.write("# The script is designed to run the following blastp command from RecBlast\n")
script.write(command_line)
# run permissions for the script:
os.chmod(script_path, 0751)
return script_path
def write_sort_command_script(filename_to_sort, sorted_filename, write_folder):
"""Writing a sort uniq script to edit the gene csv file."""
# script_path = "/tmp/sort_script.sh" # default script location
script_path = join_folder(write_folder, "sort_script.sh") # script location
with open(script_path, 'w') as script:
# script.write("#! /bin/tcsh\n")
script.write("#! /bin/bash\n")
script.write("# The script is designed to run sort, uniq command from RecBlast\n")
command_line = "cat {0} | sort | uniq > {1}.temp; " \
"echo 'gene_id,gene_name,uniprot_id' > {1}; cat {1}.temp >> {1}; " \
"rm {1}.temp\n".format(filename_to_sort, sorted_filename)
# changed to make sure the title only comes after the genes
script.write(command_line)
# run permissions for the script:
os.chmod(script_path, 0751)
return script_path
def merge_two_dicts(x, y):
"""Given two dicts, merge them into a new dict as a shallow copy."""
z = x.copy()
z.update(y)
return z
def is_number(s):
"""The function determines if a string is a number or a text. Returns True if it's a number. """
try:
int(s)
return True
except ValueError:
return False
def blastdb_exit():
"""Exiting if we can't find the $BLASTDB on the local machine"""
print("$BLASTDB was not found! Please set the blast DB path to the right location.")
print("Make sure blast+ is installed correctly.")
exit(1)
def exists_not_empty(path):
"""Receives a file path and checks if it exists and not empty."""
if os.path.exists(path) and os.stat(path).st_size > 0:
return True
else:
return False
def subset_db(tax_id, gi_file_path, db_path, big_db, run_anyway, DEBUG, debug, attempt_no=0):
"""
Subsets a big blast database into a smaller one based on tax_id.
The function connects to entrez and retrieves gi identifiers of sequences with the same tax_id.
:param tax_id: The tax_id (string)
:param gi_file_path: file path of the gi_list file we are creating
:param db_path: the new db path
:param big_db: we are about to subset
:param run_anyway: run on NR if unable to subset
:param attempt_no: counter for the attempts in connecting to Entrez (attempts to connect up to 10 times).
:param DEBUG: A boolean flag: True for debug prints, False for quiet run.
:param debug: A function call to provide debug prints.
:return:
"""
# connecting to ENTREZ protein DB
try:
handle = Entrez.esearch(db="protein", term="txid{}[ORGN]".format(tax_id), retmode="xml", retmax=10000000)
record = Entrez.read(handle)
except Exception, e: # DB connection exception
print "Error connecting to server, trying again..."
print "Error: {}".format(e)
debug("Error connecting to server, trying again...\n")
# sleeping in case it's a temporary database problem
sleep_period = 180
print "restarting attempt in {} seconds...".format(sleep_period)
sleep(sleep_period)
# counting the number of attempts to connect.
attempt_no += 1
if attempt_no >= 10: # If too many:
print "Tried connecting to Entrez DB more than 10 times. Check your connection or try again later."
exit(1)
# try again (recursive until max)
return subset_db(tax_id, gi_file_path, db_path, big_db, run_anyway, DEBUG, debug, attempt_no)
assert int(record["Count"]) == len(record["IdList"]), "Did not fetch all sequences!" # make sure we got it all...
# writing a gi list file
with open(gi_file_path, 'w') as gi_file:
gi_file.write("\n".join(record["IdList"]) + "\n")
# the new target database path
create_folder_if_needed(os.path.join(db_path, tax_id))
target_db = os.path.join(db_path, tax_id, "db")
aliastool_command = ["blastdb_aliastool", "-gilist", gi_file_path, "-db", big_db, "-dbtype", "prot", "-out",
target_db] # TODO: test that blastdb_aliastool works for the user
try:
subprocess.check_call(aliastool_command)
print("Created DB subset from nr protein for {}".format(tax_id))
return target_db
except subprocess.CalledProcessError:
print("Problem with creating DB for tax_id {} from nr.".format(tax_id))
if run_anyway:
print("Running with the heavy nr option. Do some stretches, it might be a long run.")
return big_db
print("Aborting.\n"
"If you want to run the program anyway against the entire nr "
"(which is significantly slower than the default run, please use the --run_even_if_no_db_found flag.")
exit(1)
# for efficiency
strip = str.strip
split = str.split
replace = str.replace
re_search = re.search
re_sub = re.sub
re_match = re.match
upper = str.upper
lower = str.lower
join_folder = os.path.join
| mit | -6,327,078,877,724,079,000 | 35.587591 | 118 | 0.6399 | false |
mark-rushakoff/FlackOverstow | grabber.py | 1 | 1835 | #!/usr/bin/env python
__author__ = "Mark Rushakoff"
__license__ = "MIT"
import sys
import urllib2
import re
import StringIO
import gzip
try:
import simplejson as json
except ImportError:
try:
import json
except ImportError:
sys.stderr.write("simplejson or json required for operation. Aborting.\n")
sys.exit()
try:
from BeautifulSoup import BeautifulStoneSoup as bss
except ImportError:
sys.stderr.write("BeautifulSoup required to format data")
sys.stderr.write("Try `easy_install beautifulsoup`")
sys.exit()
stripHtmlTags = re.compile(r"<[^>]*>")
compressWhiteSpace = re.compile(r"\s+")
def format(text):
return bss(compressWhiteSpace.sub(' ', stripHtmlTags.sub('', text)), convertEntities=bss.ALL_ENTITIES)
class Grabber(object):
""" Class to obtain JSON data from Stack API """
_api = '1.0'
def __init__(self, site, user_id, api_key=None):
self.site = site
self.user_id = user_id
self.api_key = api_key
def _grab(self, users_arg):
url = 'http://api.%s/%s/users/%s/%s?body=true&pagesize=100' % (self.site, self._api, self.user_id, users_arg)
if self.api_key is not None:
url += '&key=%s' % self.api_key
content = StringIO.StringIO(urllib2.urlopen(url).read())
return gzip.GzipFile(fileobj=content).read()
def minimal_text(self, users_arg):
""" return a list of just the simple text of the `body`s of the users_arg section of the pulled json """
json_data = self._grab(users_arg)
answers = [answer['body'] for answer in json.loads(json_data)[users_arg]]
return [str(format(answer)) for answer in answers]
if __name__ == "__main__":
grabber = Grabber('stackoverflow.com', 126042)
for g in grabber.minimal_text('answers'):
print g
| mit | -9,221,221,283,494,337,000 | 29.583333 | 117 | 0.643052 | false |
astrobin/astrobin | astrobin_apps_payments/api/views/pricing_view.py | 1 | 1570 | import logging
from braces.views import JSONResponseMixin
from django.conf import settings
from django.http import HttpResponseBadRequest
from django.views import View
from rest_framework.authtoken.models import Token
from astrobin_apps_payments.services.pricing_service import PricingService
log = logging.getLogger('apps')
class PricingView(JSONResponseMixin, View):
def get(self, request, *args, **kwargs):
product = kwargs.pop('product', None) # type: str
currency = kwargs.pop('currency', None) # type: str
if product is None or product.lower() not in ('lite', 'premium', 'ultimate'):
log.error('pricing_view: invalid product: %s' % product)
return HttpResponseBadRequest("Invalid product")
if currency is None or currency.upper() not in settings.SUPPORTED_CURRENCIES:
log.error('pricing_view: invalid currency: %s' % currency)
return HttpResponseBadRequest("Unsupported currency")
user = None
if 'HTTP_AUTHORIZATION' in request.META:
token_in_header = request.META['HTTP_AUTHORIZATION'].replace('Token ', '')
token = Token.objects.get(key=token_in_header)
user = token.user
return self.render_json_response({
'fullPrice': PricingService.get_full_price(product.lower(), currency.upper()),
'discount': PricingService.get_discount_amount(product.lower(), currency.upper(), user=user),
'price': PricingService.get_price(product.lower(), currency.upper(), user=user)
})
| agpl-3.0 | -8,189,497,786,092,724,000 | 41.432432 | 105 | 0.67707 | false |
snopt/snopt-python | examples/snmainb.py | 1 | 7041 | """
An example SNOPTB problem.
"""
import numpy as np
from snopt import snoptb, SNOPT_options
def hexCon(mode,x,fCon,gCon,nState):
two = 2.0
fCon[ 0] = x[0]**2 + x[5]**2
fCon[ 1] = (x[1] - x[0])**2 + (x[6] - x[5])**2
fCon[ 2] = (x[2] - x[0])**2 + x[5]**2
fCon[ 3] = (x[0] - x[3])**2 + (x[5] - x[7])**2
fCon[ 4] = (x[0] - x[4])**2 + (x[5] - x[8])**2
fCon[ 5] = x[1]**2 + x[6]**2
fCon[ 6] = (x[2] - x[1])**2 + x[6]**2
fCon[ 7] = (x[3] - x[1])**2 + (x[7] - x[6])**2
fCon[ 8] = (x[1] - x[4])**2 + (x[6] - x[8])**2
fCon[ 9] = (x[3] - x[2])**2 + x[7]**2
fCon[10] = (x[4] - x[2])**2 + x[8]**2
fCon[11] = x[3]**2 + x[7]**2
fCon[12] = (x[3] - x[4])**2 + (x[8] - x[7])**2
fCon[13] = x[4]**2 + x[8]**2
# Nonlinear Jacobian elements for column 1.
# rows = [1,2,3,4,5].
gCon[ 0] = two*x[0]
gCon[ 1] = - two*(x[1] - x[0])
gCon[ 2] = - two*(x[2] - x[0])
gCon[ 3] = two*(x[0] - x[3])
gCon[ 4] = two*(x[0] - x[4])
# Nonlinear Jacobian elements for column 2.
# Rows = [2,6,7,8,9].
gCon[ 5] = two*(x[1] - x[0])
gCon[ 6] = two*x[1]
gCon[ 7] = - two*(x[2] - x[1])
gCon[ 8] = - two*(x[3] - x[1])
gCon[ 9] = two*(x[1] - x[4])
# Nonlinear Jacobian elements for column 3.
# Rows = [3,7,10,11].
gCon[10] = two*(x[2] - x[0])
gCon[11] = two*(x[2] - x[1])
gCon[12] = - two*(x[3] - x[2])
gCon[13] = - two*(x[4] - x[2])
# Nonlinear Jacobian elements for column 4.
# Rows = [4,8,10,12,13].
gCon[14] = - two*(x[0] - x[3])
gCon[15] = two*(x[3] - x[1])
gCon[16] = two*(x[3] - x[2])
gCon[17] = two*x[3]
gCon[18] = two*(x[3] - x[4])
# Nonlinear Jacobian elements for column 5.
# Rows = [5,9,11,13,14].
gCon[19] = - two*(x[0] - x[4])
gCon[20] = - two*(x[1] - x[4])
gCon[21] = two*(x[4] - x[2])
gCon[22] = - two*(x[3] - x[4])
gCon[23] = two*x[4]
# Nonlinear Jacobian elements for column 6.
# Rows = [1,2,3,4,5].
gCon[24] = two*x[5]
gCon[25] = - two*(x[6] - x[5])
gCon[26] = two*x[5]
gCon[27] = two*(x[5] - x[7])
gCon[28] = two*(x[5] - x[8])
# Nonlinear Jacobian elements for column 7.
# Rows = [2,6,7,8,9].
gCon[29] = two*(x[6] - x[5])
gCon[30] = two*x[6]
gCon[31] = two*x[6]
gCon[32] = - two*(x[7] - x[6])
gCon[33] = two*(x[6] - x[8])
# Nonlinear Jacobian elements for column 8.
# Rows = [4,8,10,12,13].
gCon[34] = - two*(x[5] - x[7])
gCon[35] = two*(x[7] - x[6])
gCon[36] = two*x[7]
gCon[37] = two*x[7]
gCon[38] = - two*(x[8] - x[7])
# Nonlinear Jacobian elements for column 9.
# Rows = [5,9,11,13,14].
gCon[39] = - two*(x[5] - x[8])
gCon[40] = - two*(x[6] - x[8])
gCon[41] = two*x[8]
gCon[42] = two*(x[8] - x[7])
gCon[43] = two*x[8]
return mode, fCon, gCon
def hexObj(mode,x,fObj,gObj,nState):
fObj = - x[1]*x[5] + x[0]*x[6] - x[2]*x[6] - x[4]*x[7] + x[3]*x[8] + x[2]*x[7]
gObj[0] = x[6]
gObj[1] = - x[5]
gObj[2] = - x[6] + x[7]
gObj[3] = x[8]
gObj[4] = - x[7]
gObj[5] = - x[1]
gObj[6] = - x[2] + x[0]
gObj[7] = - x[4] + x[2]
gObj[8] = x[3]
return mode, fObj, gObj
inf = 1.0e+20
options = SNOPT_options()
options.setOption('Infinite bound',inf)
options.setOption('Specs filename','snmainb.spc')
options.setOption('Print filename','snmainb.out')
m = 18
n = 9
nnCon = 14
ne = 52
nnJac = n
nnObj = n
bl = -inf*np.ones(n+m)
bu = inf*np.ones(n+m)
# Nonlinear constraints
bu[1+n:nnCon+n] = 1.0
# Linear constraints
bl[1+n+nnCon:m+n] = 0.0
# Variables
bl[0] = 0.0
bl[2] = -1.0
bl[4] = 0.0
bl[5] = 0.0
bl[6] = 0.0
bu[2] = 1.0
bu[7] = 0.0
bu[8] = 0.0
# Initial x
x = np.zeros(n+m)
x[0] = .1e+0
x[1] = .125e+0
x[2] = .666666e+0
x[3] = .142857e+0
x[4] = .111111e+0
x[5] = .2e+0
x[6] = .25e+0
x[7] = -.2e+0
x[8] = -.25e+0
# Jacobian
locJ = np.zeros(n+1,'i')
indJ = np.zeros(ne,'i')
valJ = np.zeros(ne,float)
locJ[0] = 0
indJ[ 0] = 0
indJ[ 1] = 1
indJ[ 2] = 2
indJ[ 3] = 3
indJ[ 4] = 4
valJ[ 0] = 0.0
valJ[ 1] = 0.0
valJ[ 2] = 0.0
valJ[ 3] = 0.0
valJ[ 4] = 0.0
# Column 1.
# Linear element in row 6 next.
indJ[ 5] = 14
valJ[ 5] = -1.0
# Column 2.
# Nonlinear elements in rows [2, 6, 7, 8, 9].
locJ[ 1] = 6
indJ[ 6] = 1
indJ[ 7] = 5
indJ[ 8] = 6
indJ[ 9] = 7
indJ[10] = 8
valJ[ 6] = 0.0
valJ[ 7] = 0.0
valJ[ 8] = 0.0
valJ[ 9] = 0.0
valJ[10] = 0.0
# Column 2.
# Linear elements in rows [15,16].
indJ[11] = 14
indJ[12] = 15
valJ[11] = 1.0
valJ[12] = -1.0
# Column 3.
# Nonlinear elements in rows [3, 7, 10, 11].
locJ[ 2] = 13
indJ[13] = 2
indJ[14] = 6
indJ[15] = 9
indJ[16] = 10
valJ[13] = 0.0
valJ[14] = 0.0
valJ[15] = 0.0
valJ[16] = 0.0
# Column 3.
# Linear elements in rows [16, 17].
indJ[17] = 15
indJ[18] = 16
valJ[17] = 1.0
valJ[18] = 1.0
# Column 4.
# Nonlinear elements in rows [20, 21, 22, 23, 24].
locJ[ 3] = 19
indJ[19] = 3
indJ[20] = 7
indJ[21] = 9
indJ[22] = 11
indJ[23] = 12
valJ[19] = 0.0
valJ[20] = 0.0
valJ[21] = 0.0
valJ[22] = 0.0
valJ[23] = 0.0
# Column 4.
# Linear elements in rows [17, 18].
indJ[24] = 16
indJ[25] = 17
valJ[24] = -1.0
valJ[25] = 1.0
# Column 5.
# Nonlinear elements in rows [5, 9, 11, 13, 14].
locJ[ 4] = 26
indJ[26] = 4
indJ[27] = 8
indJ[28] = 10
indJ[29] = 12
indJ[30] = 13
valJ[26] = 0.0
valJ[27] = 0.0
valJ[28] = 0.0
valJ[29] = 0.0
valJ[30] = 0.0
# Column 5.
# Linear element in row 18.
indJ[31] = 17
valJ[31] = -1.0
# Column 6.
# Nonlinear elements in rows [1, 2, 3, 4, 5, 6].
locJ[5] = 32
indJ[32] = 0
indJ[33] = 1
indJ[34] = 2
indJ[35] = 3
indJ[36] = 4
valJ[32] = 0.0
valJ[33] = 0.0
valJ[34] = 0.0
valJ[35] = 0.0
valJ[36] = 0.0
# Column 7.
# Nonlinear elements in rows [2, 6, 7, 8, 9].
locJ[6] = 37
indJ[37] = 1
indJ[38] = 5
indJ[39] = 6
indJ[40] = 7
indJ[41] = 8
valJ[37] = 0.0
valJ[38] = 0.0
valJ[39] = 0.0
valJ[40] = 0.0
valJ[41] = 0.0
# Column 8.
# Nonlinear elements in rows [4, 8, 10, 12, 13].
locJ[7] = 42
indJ[42] = 3
indJ[43] = 7
indJ[44] = 9
indJ[45] = 11
indJ[46] = 12
valJ[42] = 0.0
valJ[43] = 0.0
valJ[44] = 0.0
valJ[45] = 0.0
valJ[46] = 0.0
# Column 9.
# Nonlinear elements in rows [5, 9, 11, 13, 14].
locJ[8] = 47
indJ[47] = 4
indJ[48] = 8
indJ[49] = 10
indJ[50] = 12
indJ[51] = 13
valJ[47] = 0.0
valJ[48] = 0.0
valJ[49] = 0.0
valJ[50] = 0.0
valJ[51] = 0.0
# Don't forget to finish off locJ.
# This is crucial.
locJ[ 9] = 51 + 1
# Put components of J into a tuple
J = (valJ,indJ,locJ)
options.setOption('Verbose',True)
result = snoptb(hexObj,hexCon,nnObj=nnObj,nnCon=nnCon,nnJac=nnJac,
name=' snmainb',x0=x,bl=bl,bu=bu,J=J,m=m,n=n,options=options)
| mit | 1,068,860,622,659,655,700 | 17.876676 | 82 | 0.47337 | false |
moreati/pydot-ng | pydot_ng/__init__.py | 1 | 59602 | # -*- coding: Latin-1 -*-
# Graphviz's dot language Python interface.
# This module provides with a full interface to create handle modify
# and process graphs in Graphviz's dot language.
# References:
# pydot Homepage: http://code.google.com/p/pydot/
# Graphviz: http://www.graphviz.org/
# DOT Language: http://www.graphviz.org/doc/info/lang.html
# Copyright (c) 2005-2011 Ero Carrera <ero.carrera@gmail.com>
# Distributed under MIT license
# [http://opensource.org/licenses/mit-license.html].
from __future__ import division
from __future__ import print_function
import copy
import os
import re
import subprocess
import sys
import tempfile
from operator import itemgetter
try:
from pydot_ng import _dotparser as dot_parser
except Exception:
print("Couldn't import _dotparser, "
"loading of dot files will not be possible.")
__author__ = 'Ero Carrera'
__license__ = 'MIT'
PY3 = not sys.version_info < (3, 0, 0)
if PY3:
NULL_SEP = b''
basestring = str
long = int
unicode = str
else:
NULL_SEP = ''
GRAPH_ATTRIBUTES = set([
'Damping', 'K', 'URL', 'aspect', 'bb', 'bgcolor',
'center', 'charset', 'clusterrank', 'colorscheme', 'comment', 'compound',
'concentrate', 'defaultdist', 'dim', 'dimen', 'diredgeconstraints',
'dpi', 'epsilon', 'esep', 'fontcolor', 'fontname', 'fontnames',
'fontpath', 'fontsize', 'id', 'label', 'labeljust', 'labelloc',
'landscape', 'layers', 'layersep', 'layout', 'levels', 'levelsgap',
'lheight', 'lp', 'lwidth', 'margin', 'maxiter', 'mclimit', 'mindist',
'mode', 'model', 'mosek', 'nodesep', 'nojustify', 'normalize', 'nslimit',
'nslimit1', 'ordering', 'orientation', 'outputorder', 'overlap',
'overlap_scaling', 'pack', 'packmode', 'pad', 'page', 'pagedir',
'quadtree', 'quantum', 'rankdir', 'ranksep', 'ratio', 'remincross',
'repulsiveforce', 'resolution', 'root', 'rotate', 'searchsize', 'sep',
'showboxes', 'size', 'smoothing', 'sortv', 'splines', 'start',
'stylesheet', 'target', 'truecolor', 'viewport', 'voro_margin',
# for subgraphs
'rank'])
EDGE_ATTRIBUTES = set([
'URL', 'arrowhead', 'arrowsize', 'arrowtail',
'color', 'colorscheme', 'comment', 'constraint', 'decorate', 'dir',
'edgeURL', 'edgehref', 'edgetarget', 'edgetooltip', 'fontcolor',
'fontname', 'fontsize', 'headURL', 'headclip', 'headhref', 'headlabel',
'headport', 'headtarget', 'headtooltip', 'href', 'id', 'label',
'labelURL', 'labelangle', 'labeldistance', 'labelfloat', 'labelfontcolor',
'labelfontname', 'labelfontsize', 'labelhref', 'labeltarget',
'labeltooltip', 'layer', 'len', 'lhead', 'lp', 'ltail', 'minlen',
'nojustify', 'penwidth', 'pos', 'samehead', 'sametail', 'showboxes',
'style', 'tailURL', 'tailclip', 'tailhref', 'taillabel', 'tailport',
'tailtarget', 'tailtooltip', 'target', 'tooltip', 'weight',
'rank'])
NODE_ATTRIBUTES = set([
'URL', 'color', 'colorscheme', 'comment',
'distortion', 'fillcolor', 'fixedsize', 'fontcolor', 'fontname',
'fontsize', 'group', 'height', 'id', 'image', 'imagescale', 'label',
'labelloc', 'layer', 'margin', 'nojustify', 'orientation', 'penwidth',
'peripheries', 'pin', 'pos', 'rects', 'regular', 'root', 'samplepoints',
'shape', 'shapefile', 'showboxes', 'sides', 'skew', 'sortv', 'style',
'target', 'tooltip', 'vertices', 'width', 'z',
# The following are attributes dot2tex
'texlbl', 'texmode'])
CLUSTER_ATTRIBUTES = set([
'K', 'URL', 'bgcolor', 'color', 'colorscheme',
'fillcolor', 'fontcolor', 'fontname', 'fontsize', 'label', 'labeljust',
'labelloc', 'lheight', 'lp', 'lwidth', 'nojustify', 'pencolor',
'penwidth', 'peripheries', 'sortv', 'style', 'target', 'tooltip'])
def is_string_like(obj):
"""Check if obj is string. from John Hunter, types-free version"""
try:
obj + ''
except (TypeError, ValueError):
return False
return True
def get_fobj(fname, mode='w+'):
"""Obtain a proper file object.
Parameters
----------
fname : string, file object, file descriptor
If a string or file descriptor, then we create a file object. If
*fname* is a file object, then we do nothing and ignore the specified
*mode* parameter.
mode : str
The mode of the file to be opened.
Returns
-------
fobj : file object
The file object.
close : bool
If *fname* was a string, then *close* will be *True* to signify that
the file object should be closed after writing to it. Otherwise,
*close* will be *False* signifying that the user, in essence,
created the file object already and that subsequent operations
should not close it.
"""
if is_string_like(fname):
fobj = open(fname, mode)
close = True
elif hasattr(fname, 'write'):
# fname is a file-like object, perhaps a StringIO (for example)
fobj = fname
close = False
else:
# assume it is a file descriptor
fobj = os.fdopen(fname, mode)
close = False
return fobj, close
#
# Extented version of ASPN's Python Cookbook Recipe:
# Frozen dictionaries.
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/414283
#
# This version freezes dictionaries used as values within dictionaries.
#
class frozendict(dict):
def _blocked_attribute(obj):
raise AttributeError("A frozendict cannot be modified.")
_blocked_attribute = property(_blocked_attribute)
__delitem__ = __setitem__ = clear = _blocked_attribute
pop = popitem = setdefault = update = _blocked_attribute
def __new__(cls, *args, **kw):
new = dict.__new__(cls)
args_ = []
for arg in args:
if isinstance(arg, dict):
arg = copy.copy(arg)
for k, v in arg.items():
if isinstance(v, frozendict):
arg[k] = v
elif isinstance(v, dict):
arg[k] = frozendict(v)
elif isinstance(v, list):
v_ = list()
for elm in v:
if isinstance(elm, dict):
v_.append(frozendict(elm))
else:
v_.append(elm)
arg[k] = tuple(v_)
args_.append(arg)
else:
args_.append(arg)
dict.__init__(new, *args_, **kw)
return new
def __init__(self, *args, **kw):
pass
def __hash__(self):
try:
return self._cached_hash
except AttributeError:
h = self._cached_hash = hash(tuple(sorted(self.items())))
return h
def __repr__(self):
return "frozendict(%s)" % dict.__repr__(self)
# cases when no qoutes needed, from http://www.graphviz.org/doc/info/lang.html
dot_keywords = ('graph', 'subgraph', 'digraph', 'node', 'edge', 'strict')
id_alpha_num = re.compile(r'^[_a-zA-Z\200-\377][_a-zA-Z0-9\200-\377]*$',
re.UNICODE)
id_num = re.compile(r'^[-]?(\.[0-9]+|[0-9]+(\.[0-9]*)?)$', re.UNICODE)
id_html = re.compile(r'^<.*>$', re.DOTALL | re.UNICODE)
id_quoted = re.compile(r'^".*"$', re.DOTALL | re.UNICODE)
def needs_quotes(s):
"""Checks whether a string is a dot language ID.
It will check whether the string is solely composed
by the characters allowed in an ID or not.
If the string is one of the reserved keywords it will
need quotes too but the user will need to add them
manually.
"""
# If the name is a reserved keyword it will need quotes but pydot
# can't tell when it's being used as a keyword or when it's simply
# a name. Hence the user needs to supply the quotes when an element
# would use a reserved keyword as name. This function will return
# false indicating that a keyword string, if provided as-is, won't
# need quotes.
if s in dot_keywords:
return False
for test_re in [
id_alpha_num,
id_num,
id_html,
id_quoted,
]:
if test_re.match(s):
return False
chars = [ord(c) for c in s if ord(c) > 0x7f or ord(c) == 0]
if chars and not id_quoted.match(s) and not id_html.match(s):
return True
return True
def quote_if_necessary(s):
# Older versions of graphviz throws a syntax error for empty values without
# quotes, e.g. [label=]
if s == '':
return '""'
if isinstance(s, bool):
if s is True:
return 'True'
return 'False'
if not isinstance(s, basestring):
return s
if not s:
return s
if needs_quotes(s):
replace = {'"': r'\"', "\n": r'\n', "\r": r'\r'}
for (a, b) in replace.items():
s = s.replace(a, b)
return '"' + s + '"'
return s
def graph_from_dot_data(data):
"""Load graph as defined by data in DOT format.
The data is assumed to be in DOT format. It will
be parsed and a Dot class will be returned,
representing the graph.
"""
return dot_parser.parse_dot_data(data)
def graph_from_dot_file(path):
"""Load graph as defined by a DOT file.
The file is assumed to be in DOT format. It will
be loaded, parsed and a Dot class will be returned,
representing the graph.
"""
fd = open(path, 'rb')
data = fd.read()
fd.close()
return graph_from_dot_data(data)
def graph_from_edges(edge_list, node_prefix='', directed=False):
"""Creates a basic graph out of an edge list.
The edge list has to be a list of tuples representing
the nodes connected by the edge.
The values can be anything: bool, int, float, str.
If the graph is undirected by default, it is only
calculated from one of the symmetric halves of the matrix.
"""
if directed:
graph = Dot(graph_type='digraph')
else:
graph = Dot(graph_type='graph')
for edge in edge_list:
if isinstance(edge[0], str):
src = node_prefix + edge[0]
else:
src = node_prefix + str(edge[0])
if isinstance(edge[1], str):
dst = node_prefix + edge[1]
else:
dst = node_prefix + str(edge[1])
e = Edge(src, dst)
graph.add_edge(e)
return graph
def graph_from_adjacency_matrix(matrix, node_prefix='', directed=False):
"""Creates a basic graph out of an adjacency matrix.
The matrix has to be a list of rows of values
representing an adjacency matrix.
The values can be anything: bool, int, float, as long
as they can evaluate to True or False.
"""
node_orig = 1
if directed:
graph = Dot(graph_type='digraph')
else:
graph = Dot(graph_type='graph')
for row in matrix:
if not directed:
skip = matrix.index(row)
r = row[skip:]
else:
skip = 0
r = row
node_dest = skip + 1
for e in r:
if e:
graph.add_edge(
Edge(
node_prefix + node_orig,
node_prefix + node_dest))
node_dest += 1
node_orig += 1
return graph
def graph_from_incidence_matrix(matrix, node_prefix='', directed=False):
"""Creates a basic graph out of an incidence matrix.
The matrix has to be a list of rows of values
representing an incidence matrix.
The values can be anything: bool, int, float, as long
as they can evaluate to True or False.
"""
if directed:
graph = Dot(graph_type='digraph')
else:
graph = Dot(graph_type='graph')
for row in matrix:
nodes = []
c = 1
for node in row:
if node:
nodes.append(c * node)
c += 1
nodes.sort()
if len(nodes) == 2:
graph.add_edge(
Edge(
node_prefix + abs(nodes[0]),
node_prefix + nodes[1]))
if not directed:
graph.set_simplify(True)
return graph
def __find_executables(path):
"""Used by find_graphviz
path - single directory as a string
If any of the executables are found, it will return a dictionary
containing the program names as keys and their paths as values.
Otherwise returns None
"""
success = False
progs = {'dot': '', 'twopi': '', 'neato': '', 'circo': '', 'fdp': '',
'sfdp': ''}
was_quoted = False
path = path.strip()
if path.startswith('"') and path.endswith('"'):
path = path[1:-1]
was_quoted = True
if os.path.isdir(path):
for prg in progs.keys():
if progs[prg]:
continue
if os.path.exists(os.path.join(path, prg)):
if was_quoted:
progs[prg] = '"' + os.path.join(path, prg) + '"'
else:
progs[prg] = os.path.join(path, prg)
success = True
elif os.path.exists(os.path.join(path, prg + '.exe')):
if was_quoted:
progs[prg] = '"' + os.path.join(path, prg + '.exe') + '"'
else:
progs[prg] = os.path.join(path, prg + '.exe')
success = True
if success:
return progs
else:
return None
# The multi-platform version of this 'find_graphviz' function was
# contributed by Peter Cock
def find_graphviz():
"""Locate Graphviz's executables in the system.
Tries three methods:
First: Windows Registry (Windows only)
This requires Mark Hammond's pywin32 is installed.
Secondly: Search the path
It will look for 'dot', 'twopi' and 'neato' in all the directories
specified in the PATH environment variable.
Thirdly: Default install location (Windows only)
It will look for 'dot', 'twopi' and 'neato' in the default install
location under the "Program Files" directory.
It will return a dictionary containing the program names as keys
and their paths as values.
If this fails, it returns None.
"""
# Method 1 (Windows only)
if os.sys.platform == 'win32':
HKEY_LOCAL_MACHINE = 0x80000002
KEY_QUERY_VALUE = 0x0001
RegOpenKeyEx = None
RegQueryValueEx = None
RegCloseKey = None
try:
import win32api
RegOpenKeyEx = win32api.RegOpenKeyEx
RegQueryValueEx = win32api.RegQueryValueEx
RegCloseKey = win32api.RegCloseKey
except ImportError:
# Print a messaged suggesting they install these?
pass
try:
import ctypes
def RegOpenKeyEx(key, subkey, opt, sam):
result = ctypes.c_uint(0)
ctypes.windll.advapi32.RegOpenKeyExA(key, subkey, opt, sam,
ctypes.byref(result))
return result.value
def RegQueryValueEx(hkey, valuename):
data_type = ctypes.c_uint(0)
data_len = ctypes.c_uint(1024)
data = ctypes.create_string_buffer(1024)
# this has a return value, which we should probably check
ctypes.windll.advapi32.RegQueryValueExA(
hkey, valuename, 0, ctypes.byref(data_type),
data, ctypes.byref(data_len))
return data.value
RegCloseKey = ctypes.windll.advapi32.RegCloseKey
except ImportError:
# Print a messaged suggesting they install these?
pass
if RegOpenKeyEx is not None:
# Get the GraphViz install path from the registry
hkey = None
potentialKeys = [
"SOFTWARE\\ATT\\Graphviz",
"SOFTWARE\\AT&T Research Labs\\Graphviz"]
for potentialKey in potentialKeys:
try:
hkey = RegOpenKeyEx(
HKEY_LOCAL_MACHINE,
potentialKey, 0, KEY_QUERY_VALUE)
if hkey is not None:
path = RegQueryValueEx(hkey, "InstallPath")
RegCloseKey(hkey)
# The regitry variable might exist, left by
# old installations but with no value, in those cases
# we keep searching...
if not path:
continue
# Now append the "bin" subdirectory:
path = os.path.join(path, "bin")
progs = __find_executables(path)
if progs is not None:
return progs
except Exception:
pass
else:
break
# Method 2 (Linux, Windows etc)
if 'PATH' in os.environ:
for path in os.environ['PATH'].split(os.pathsep):
progs = __find_executables(path)
if progs is not None:
return progs
# Method 3 (Windows only)
if os.sys.platform == 'win32':
# Try and work out the equivalent of "C:\Program Files" on this
# machine (might be on drive D:, or in a different language)
if 'PROGRAMFILES' in os.environ:
# Note, we could also use the win32api to get this
# information, but win32api may not be installed.
path = os.path.join(os.environ['PROGRAMFILES'], 'ATT',
'GraphViz', 'bin')
else:
# Just in case, try the default...
path = r"C:\Program Files\att\Graphviz\bin"
progs = __find_executables(path)
if progs is not None:
return progs
for path in (
'/usr/bin', '/usr/local/bin',
'/opt/local/bin',
'/opt/bin', '/sw/bin', '/usr/share',
'/Applications/Graphviz.app/Contents/MacOS/'):
progs = __find_executables(path)
if progs is not None:
return progs
# Failed to find GraphViz
return None
class Common(object):
"""Common information to several classes.
Should not be directly used, several classes are derived from
this one.
"""
def __getstate__(self):
dict = copy.copy(self.obj_dict)
return dict
def __setstate__(self, state):
self.obj_dict = state
def __get_attribute__(self, attr):
"""Look for default attributes for this node"""
attr_val = self.obj_dict['attributes'].get(attr, None)
if attr_val is None:
# get the defaults for nodes/edges
default_node_name = self.obj_dict['type']
# The defaults for graphs are set on a node named 'graph'
if default_node_name in ('subgraph', 'digraph', 'cluster'):
default_node_name = 'graph'
g = self.get_parent_graph()
if g is not None:
defaults = g.get_node(default_node_name)
else:
return None
# Multiple defaults could be set by having repeated 'graph [...]'
# 'node [...]', 'edge [...]' statements. In such case, if the
# same attribute is set in different statements, only the first
# will be returned. In order to get all, one would call the
# get_*_defaults() methods and handle those. Or go node by node
# (of the ones specifying defaults) and modify the attributes
# individually.
#
if not isinstance(defaults, (list, tuple)):
defaults = [defaults]
for default in defaults:
attr_val = default.obj_dict['attributes'].get(attr, None)
if attr_val:
return attr_val
else:
return attr_val
return None
def set_parent_graph(self, parent_graph):
self.obj_dict['parent_graph'] = parent_graph
def get_parent_graph(self):
return self.obj_dict.get('parent_graph', None)
def set(self, name, value):
"""Set an attribute value by name.
Given an attribute 'name' it will set its value to 'value'.
There's always the possibility of using the methods:
set_'name'(value)
which are defined for all the existing attributes.
"""
self.obj_dict['attributes'][name] = value
def get(self, name):
"""Get an attribute value by name.
Given an attribute 'name' it will get its value.
There's always the possibility of using the methods:
get_'name'()
which are defined for all the existing attributes.
"""
return self.obj_dict['attributes'].get(name, None)
def get_attributes(self):
return self.obj_dict['attributes']
def set_sequence(self, seq):
self.obj_dict['sequence'] = seq
def get_sequence(self):
return self.obj_dict['sequence']
def create_attribute_methods(self, obj_attributes):
for attr in obj_attributes:
# Generate all the Setter methods.
self.__setattr__(
'set_' + attr,
lambda x, a=attr: self.obj_dict['attributes'].
__setitem__(a, x))
# Generate all the Getter methods.
self.__setattr__('get_' + attr,
lambda a=attr: self.__get_attribute__(a))
class Error(Exception):
"""General error handling class."""
def __init__(self, value):
self.value = value
def __str__(self):
return self.value
class InvocationException(Exception):
"""Indicate a ploblem occurred running any of the GraphViz executables."""
def __init__(self, value):
self.value = value
def __str__(self):
return self.value
class Node(Common):
"""A graph node.
This class represents a graph's node with all its attributes.
node(name, attribute=value, ...)
name: node's name
All the attributes defined in the Graphviz dot language should
be supported.
"""
def __init__(self, name='', obj_dict=None, **attrs):
# Nodes will take attributes of all other types because the defaults
# for any GraphViz object are dealt with as if they were
# Node definitions
if obj_dict is not None:
self.obj_dict = obj_dict
else:
self.obj_dict = dict()
# Copy the attributes
self.obj_dict['attributes'] = dict(attrs)
self.obj_dict['type'] = 'node'
self.obj_dict['parent_graph'] = None
self.obj_dict['parent_node_list'] = None
self.obj_dict['sequence'] = None
# Remove the compass point
port = None
if isinstance(name, basestring) and not name.startswith('"'):
idx = name.find(':')
if idx > 0 and idx + 1 < len(name):
name, port = name[:idx], name[idx:]
if isinstance(name, (long, int)):
name = str(name)
self.obj_dict['name'] = quote_if_necessary(name)
self.obj_dict['port'] = port
self.create_attribute_methods(NODE_ATTRIBUTES)
def set_name(self, node_name):
"""Set the node's name."""
self.obj_dict['name'] = node_name
def get_name(self):
"""Get the node's name."""
return self.obj_dict['name']
def get_port(self):
"""Get the node's port."""
return self.obj_dict['port']
def add_style(self, style):
styles = self.obj_dict['attributes'].get('style', None)
if not styles and style:
styles = [style]
else:
styles = styles.split(',')
styles.append(style)
self.obj_dict['attributes']['style'] = ','.join(styles)
def to_string(self):
"""Returns a string representation of the node in dot language."""
# RMF: special case defaults for node, edge and graph properties.
node = quote_if_necessary(self.obj_dict['name'])
node_attr = list()
for attr, value in sorted(self.obj_dict['attributes'].items(),
key=itemgetter(0)):
if value is not None:
node_attr.append('%s=%s' % (attr, quote_if_necessary(value)))
else:
node_attr.append(attr)
# No point in having nodes setting any defaults if the don't set
# any attributes...
#
if node in ('graph', 'node', 'edge') and len(node_attr) == 0:
return ''
node_attr = ', '.join(node_attr)
if node_attr:
node += ' [' + node_attr + ']'
return node + ';'
class Edge(Common):
"""A graph edge.
This class represents a graph's edge with all its attributes.
edge(src, dst, attribute=value, ...)
src: source node's name
dst: destination node's name
All the attributes defined in the Graphviz dot language should
be supported.
Attributes can be set through the dynamically generated methods:
set_[attribute name], i.e. set_label, set_fontname
or directly by using the instance's special dictionary:
Edge.obj_dict['attributes'][attribute name], i.e.
edge_instance.obj_dict['attributes']['label']
edge_instance.obj_dict['attributes']['fontname']
"""
def __init__(self, src='', dst='', obj_dict=None, **attrs):
if isinstance(src, (list, tuple)) and dst == '':
src, dst = src
if obj_dict is not None:
self.obj_dict = obj_dict
else:
self.obj_dict = dict()
# Copy the attributes
self.obj_dict['attributes'] = dict(attrs)
self.obj_dict['type'] = 'edge'
self.obj_dict['parent_graph'] = None
self.obj_dict['parent_edge_list'] = None
self.obj_dict['sequence'] = None
if isinstance(src, Node):
src = src.get_name()
if isinstance(dst, Node):
dst = dst.get_name()
points = (quote_if_necessary(src), quote_if_necessary(dst))
self.obj_dict['points'] = points
self.create_attribute_methods(EDGE_ATTRIBUTES)
def get_source(self):
"""Get the edges source node name."""
return self.obj_dict['points'][0]
def get_destination(self):
"""Get the edge's destination node name."""
return self.obj_dict['points'][1]
def __hash__(self):
return hash(hash(self.get_source()) + hash(self.get_destination()))
def __eq__(self, edge):
"""Compare two edges.
If the parent graph is directed, arcs linking
node A to B are considered equal and A->B != B->A
If the parent graph is undirected, any edge
connecting two nodes is equal to any other
edge connecting the same nodes, A->B == B->A
"""
if not isinstance(edge, Edge):
raise Error("Can't compare and edge to a non-edge object.")
if self.get_parent_graph().get_top_graph_type() == 'graph':
# If the graph is undirected, the edge has neither
# source nor destination.
if ((self.get_source() == edge.get_source() and
self.get_destination() == edge.get_destination()) or
(edge.get_source() == self.get_destination() and
edge.get_destination() == self.get_source())):
return True
else:
if (self.get_source() == edge.get_source() and
self.get_destination() == edge.get_destination()):
return True
return False
def parse_node_ref(self, node_str):
if not isinstance(node_str, str):
return node_str
if node_str.startswith('"') and node_str.endswith('"'):
return node_str
node_port_idx = node_str.rfind(':')
if (node_port_idx > 0 and node_str[0] == '"' and
node_str[node_port_idx - 1] == '"'):
return node_str
if node_port_idx > 0:
a = node_str[:node_port_idx]
b = node_str[node_port_idx + 1:]
node = quote_if_necessary(a)
node += ':' + quote_if_necessary(b)
return node
return node_str
def to_string(self):
"""Returns a string representation of the edge in dot language."""
src = self.parse_node_ref(self.get_source())
dst = self.parse_node_ref(self.get_destination())
if isinstance(src, frozendict):
edge = [Subgraph(obj_dict=src).to_string()]
elif isinstance(src, (int, long)):
edge = [str(src)]
else:
edge = [src]
if (self.get_parent_graph() and
self.get_parent_graph().get_top_graph_type() and
self.get_parent_graph().get_top_graph_type() == 'digraph'):
edge.append('->')
else:
edge.append('--')
if isinstance(dst, frozendict):
edge.append(Subgraph(obj_dict=dst).to_string())
elif isinstance(dst, (int, long)):
edge.append(str(dst))
else:
edge.append(dst)
edge_attr = list()
for attr, value in sorted(self.obj_dict['attributes'].items(),
key=itemgetter(0)):
if value is not None:
edge_attr.append('%s=%s' % (attr, quote_if_necessary(value)))
else:
edge_attr.append(attr)
edge_attr = ', '.join(edge_attr)
if edge_attr:
edge.append(' [' + edge_attr + ']')
return ' '.join(edge) + ';'
class Graph(Common):
"""Class representing a graph in Graphviz's dot language.
This class implements the methods to work on a representation
of a graph in Graphviz's dot language.
graph(graph_name='G', graph_type='digraph',
strict=False, suppress_disconnected=False, attribute=value, ...)
graph_name:
the graph's name
graph_type:
can be 'graph' or 'digraph'
suppress_disconnected:
defaults to False, which will remove from the
graph any disconnected nodes.
simplify:
if True it will avoid displaying equal edges, i.e.
only one edge between two nodes. removing the
duplicated ones.
All the attributes defined in the Graphviz dot language should
be supported.
Attributes can be set through the dynamically generated methods:
set_[attribute name], i.e. set_size, set_fontname
or using the instance's attributes:
Graph.obj_dict['attributes'][attribute name], i.e.
graph_instance.obj_dict['attributes']['label']
graph_instance.obj_dict['attributes']['fontname']
"""
def __init__(
self, graph_name='G', obj_dict=None, graph_type='digraph',
strict=False, suppress_disconnected=False, simplify=False,
**attrs):
if obj_dict is not None:
self.obj_dict = obj_dict
else:
self.obj_dict = dict()
self.obj_dict['attributes'] = dict(attrs)
if graph_type not in ['graph', 'digraph']:
raise Error((
'Invalid type "%s". Accepted graph types are: '
'graph, digraph, subgraph' % graph_type))
self.obj_dict['name'] = quote_if_necessary(graph_name)
self.obj_dict['type'] = graph_type
self.obj_dict['strict'] = strict
self.obj_dict['suppress_disconnected'] = suppress_disconnected
self.obj_dict['simplify'] = simplify
self.obj_dict['current_child_sequence'] = 1
self.obj_dict['nodes'] = dict()
self.obj_dict['edges'] = dict()
self.obj_dict['subgraphs'] = dict()
self.set_parent_graph(self)
self.create_attribute_methods(GRAPH_ATTRIBUTES)
def get_graph_type(self):
return self.obj_dict['type']
def get_top_graph_type(self):
parent = self
while True:
parent_ = parent.get_parent_graph()
if parent_ == parent:
break
parent = parent_
return parent.obj_dict['type']
def set_graph_defaults(self, **attrs):
self.add_node(Node('graph', **attrs))
def get_graph_defaults(self, **attrs):
graph_nodes = self.get_node('graph')
if isinstance(graph_nodes, (list, tuple)):
return [node.get_attributes() for node in graph_nodes]
return graph_nodes.get_attributes()
def set_node_defaults(self, **attrs):
self.add_node(Node('node', **attrs))
def get_node_defaults(self, **attrs):
graph_nodes = self.get_node('node')
if isinstance(graph_nodes, (list, tuple)):
return [node.get_attributes() for node in graph_nodes]
return graph_nodes.get_attributes()
def set_edge_defaults(self, **attrs):
self.add_node(Node('edge', **attrs))
def get_edge_defaults(self, **attrs):
graph_nodes = self.get_node('edge')
if isinstance(graph_nodes, (list, tuple)):
return [node.get_attributes() for node in graph_nodes]
return graph_nodes.get_attributes()
def set_simplify(self, simplify):
"""Set whether to simplify or not.
If True it will avoid displaying equal edges, i.e.
only one edge between two nodes. removing the
duplicated ones.
"""
self.obj_dict['simplify'] = simplify
def get_simplify(self):
"""Get whether to simplify or not.
Refer to set_simplify for more information.
"""
return self.obj_dict['simplify']
def set_type(self, graph_type):
"""Set the graph's type, 'graph' or 'digraph'."""
self.obj_dict['type'] = graph_type
def get_type(self):
"""Get the graph's type, 'graph' or 'digraph'."""
return self.obj_dict['type']
def set_name(self, graph_name):
"""Set the graph's name."""
self.obj_dict['name'] = graph_name
def get_name(self):
"""Get the graph's name."""
return self.obj_dict['name']
def set_strict(self, val):
"""Set graph to 'strict' mode.
This option is only valid for top level graphs.
"""
self.obj_dict['strict'] = val
def get_strict(self, val):
"""Get graph's 'strict' mode (True, False).
This option is only valid for top level graphs.
"""
return self.obj_dict['strict']
def set_suppress_disconnected(self, val):
"""Suppress disconnected nodes in the output graph.
This option will skip nodes in the graph with no incoming or outgoing
edges. This option works also for subgraphs and has effect only in the
current graph/subgraph.
"""
self.obj_dict['suppress_disconnected'] = val
def get_suppress_disconnected(self, val):
"""Get if suppress disconnected is set.
Refer to set_suppress_disconnected for more information.
"""
return self.obj_dict['suppress_disconnected']
def get_next_sequence_number(self):
seq = self.obj_dict['current_child_sequence']
self.obj_dict['current_child_sequence'] += 1
return seq
def add_node(self, graph_node):
"""Adds a node object to the graph.
It takes a node object as its only argument and returns
None.
"""
if not isinstance(graph_node, Node):
raise TypeError(''.join([
'add_node() received a non node class object: ',
str(graph_node)]))
node = self.get_node(graph_node.get_name())
if not node:
self.obj_dict['nodes'][graph_node.get_name()] =\
[graph_node.obj_dict]
graph_node.set_parent_graph(self.get_parent_graph())
else:
self.obj_dict['nodes'][graph_node.get_name()].\
append(graph_node.obj_dict)
graph_node.set_sequence(self.get_next_sequence_number())
def del_node(self, name, index=None):
"""Delete a node from the graph.
Given a node's name all node(s) with that same name
will be deleted if 'index' is not specified or set
to None.
If there are several nodes with that same name and
'index' is given, only the node in that position
will be deleted.
'index' should be an integer specifying the position
of the node to delete. If index is larger than the
number of nodes with that name, no action is taken.
If nodes are deleted it returns True. If no action
is taken it returns False.
"""
if isinstance(name, Node):
name = name.get_name()
if name in self.obj_dict['nodes']:
if index is not None and index < len(self.obj_dict['nodes'][name]):
del self.obj_dict['nodes'][name][index]
return True
else:
del self.obj_dict['nodes'][name]
return True
return False
def get_node(self, name):
"""Retrieve a node from the graph.
Given a node's name the corresponding Node
instance will be returned.
If one or more nodes exist with that name a list of
Node instances is returned.
An empty list is returned otherwise.
"""
match = list()
if name in self.obj_dict['nodes']:
match.extend([
Node(obj_dict=obj_dict)
for obj_dict
in self.obj_dict['nodes'][name]])
return match
def get_nodes(self):
"""Get the list of Node instances."""
return self.get_node_list()
def get_node_list(self):
"""Get the list of Node instances.
This method returns the list of Node instances
composing the graph.
"""
node_objs = list()
for node, obj_dict_list in self.obj_dict['nodes'].items():
node_objs.extend([
Node(obj_dict=obj_d)
for obj_d
in obj_dict_list])
return node_objs
def add_edge(self, graph_edge):
"""Adds an edge object to the graph.
It takes a edge object as its only argument and returns None.
"""
if not isinstance(graph_edge, Edge):
raise TypeError(''.join(['add_edge() received a non edge class '
'object: ', str(graph_edge)]))
edge_points = (graph_edge.get_source(), graph_edge.get_destination())
if edge_points in self.obj_dict['edges']:
edge_list = self.obj_dict['edges'][edge_points]
edge_list.append(graph_edge.obj_dict)
else:
self.obj_dict['edges'][edge_points] = [graph_edge.obj_dict]
graph_edge.set_sequence(self.get_next_sequence_number())
graph_edge.set_parent_graph(self.get_parent_graph())
def del_edge(self, src_or_list, dst=None, index=None):
"""Delete an edge from the graph.
Given an edge's (source, destination) node names all
matching edges(s) will be deleted if 'index' is not
specified or set to None.
If there are several matching edges and 'index' is
given, only the edge in that position will be deleted.
'index' should be an integer specifying the position
of the edge to delete. If index is larger than the
number of matching edges, no action is taken.
If edges are deleted it returns True. If no action
is taken it returns False.
"""
if isinstance(src_or_list, (list, tuple)):
if dst is not None and isinstance(dst, (int, long)):
index = dst
src, dst = src_or_list
else:
src, dst = src_or_list, dst
if isinstance(src, Node):
src = src.get_name()
if isinstance(dst, Node):
dst = dst.get_name()
if (src, dst) in self.obj_dict['edges']:
if (index is not None and index <
len(self.obj_dict['edges'][(src, dst)])):
del self.obj_dict['edges'][(src, dst)][index]
return True
else:
del self.obj_dict['edges'][(src, dst)]
return True
return False
def get_edge(self, src_or_list, dst=None):
"""Retrieved an edge from the graph.
Given an edge's source and destination the corresponding
Edge instance(s) will be returned.
If one or more edges exist with that source and destination
a list of Edge instances is returned.
An empty list is returned otherwise.
"""
if isinstance(src_or_list, (list, tuple)) and dst is None:
edge_points = tuple(src_or_list)
edge_points_reverse = (edge_points[1], edge_points[0])
else:
edge_points = (src_or_list, dst)
edge_points_reverse = (dst, src_or_list)
match = list()
if edge_points in self.obj_dict['edges'] or (
self.get_top_graph_type() == 'graph' and
edge_points_reverse in self.obj_dict['edges']):
edges_obj_dict = self.obj_dict['edges'].get(
edge_points,
self.obj_dict['edges'].get(edge_points_reverse, None))
for edge_obj_dict in edges_obj_dict:
match.append(
Edge(edge_points[0], edge_points[1],
obj_dict=edge_obj_dict))
return match
def get_edges(self):
return self.get_edge_list()
def get_edge_list(self):
"""Get the list of Edge instances.
This method returns the list of Edge instances
composing the graph.
"""
edge_objs = list()
for edge, obj_dict_list in self.obj_dict['edges'].items():
edge_objs.extend([
Edge(obj_dict=obj_d)
for obj_d
in obj_dict_list])
return edge_objs
def add_subgraph(self, sgraph):
"""Adds an subgraph object to the graph.
It takes a subgraph object as its only argument and returns
None.
"""
if (not isinstance(sgraph, Subgraph) and
not isinstance(sgraph, Cluster)):
raise TypeError(''.join([
'add_subgraph() received a non subgraph class object:',
str(sgraph)]))
if sgraph.get_name() in self.obj_dict['subgraphs']:
sgraph_list = self.obj_dict['subgraphs'][sgraph.get_name()]
sgraph_list.append(sgraph.obj_dict)
else:
self.obj_dict['subgraphs'][sgraph.get_name()] = [sgraph.obj_dict]
sgraph.set_sequence(self.get_next_sequence_number())
sgraph.set_parent_graph(self.get_parent_graph())
def get_subgraph(self, name):
"""Retrieved a subgraph from the graph.
Given a subgraph's name the corresponding
Subgraph instance will be returned.
If one or more subgraphs exist with the same name, a list of
Subgraph instances is returned.
An empty list is returned otherwise.
"""
match = list()
if name in self.obj_dict['subgraphs']:
sgraphs_obj_dict = self.obj_dict['subgraphs'].get(name)
for obj_dict_list in sgraphs_obj_dict:
match.append(Subgraph(obj_dict=obj_dict_list))
return match
def get_subgraphs(self):
return self.get_subgraph_list()
def get_subgraph_list(self):
"""Get the list of Subgraph instances.
This method returns the list of Subgraph instances
in the graph.
"""
sgraph_objs = list()
for sgraph, obj_dict_list in self.obj_dict['subgraphs'].items():
sgraph_objs.extend([
Subgraph(obj_dict=obj_d)
for obj_d
in obj_dict_list])
return sgraph_objs
def set_parent_graph(self, parent_graph):
self.obj_dict['parent_graph'] = parent_graph
for obj_list in self.obj_dict['nodes'].values():
for obj in obj_list:
obj['parent_graph'] = parent_graph
for obj_list in self.obj_dict['edges'].values():
for obj in obj_list:
obj['parent_graph'] = parent_graph
for obj_list in self.obj_dict['subgraphs'].values():
for obj in obj_list:
Graph(obj_dict=obj).set_parent_graph(parent_graph)
def to_string(self):
"""Returns a string representation of the graph in dot language.
It will return the graph and all its subelements in string from.
"""
graph = list()
if self.obj_dict.get('strict', None) is not None:
if self == self.get_parent_graph() and self.obj_dict['strict']:
graph.append('strict ')
if self.obj_dict['name'] == '':
if ('show_keyword' in self.obj_dict and
self.obj_dict['show_keyword']):
graph.append('subgraph {\n')
else:
graph.append('{\n')
else:
graph.append('%s %s {\n' % (self.obj_dict['type'],
self.obj_dict['name']))
for attr, value in sorted(self.obj_dict['attributes'].items(),
key=itemgetter(0)):
if value is not None:
graph.append('%s=%s' % (attr, quote_if_necessary(value)))
else:
graph.append(attr)
graph.append(';\n')
edges_done = set()
edge_obj_dicts = list()
for e in self.obj_dict['edges'].values():
edge_obj_dicts.extend(e)
if edge_obj_dicts:
edge_src_set, edge_dst_set = list(
zip(*[obj['points'] for obj in edge_obj_dicts]))
edge_src_set, edge_dst_set = set(edge_src_set), set(edge_dst_set)
else:
edge_src_set, edge_dst_set = set(), set()
node_obj_dicts = list()
for e in self.obj_dict['nodes'].values():
node_obj_dicts.extend(e)
sgraph_obj_dicts = list()
for sg in self.obj_dict['subgraphs'].values():
sgraph_obj_dicts.extend(sg)
obj_list = sorted([
(obj['sequence'], obj)
for obj
in (edge_obj_dicts + node_obj_dicts + sgraph_obj_dicts)])
for idx, obj in obj_list:
if obj['type'] == 'node':
node = Node(obj_dict=obj)
if self.obj_dict.get('suppress_disconnected', False):
if (node.get_name() not in edge_src_set and
node.get_name() not in edge_dst_set):
continue
graph.append(node.to_string() + '\n')
elif obj['type'] == 'edge':
edge = Edge(obj_dict=obj)
if self.obj_dict.get('simplify', False) and edge in edges_done:
continue
graph.append(edge.to_string() + '\n')
edges_done.add(edge)
else:
sgraph = Subgraph(obj_dict=obj)
graph.append(sgraph.to_string() + '\n')
graph.append('}\n')
return ''.join(graph)
class Subgraph(Graph):
"""Class representing a subgraph in Graphviz's dot language.
This class implements the methods to work on a representation
of a subgraph in Graphviz's dot language.
subgraph(graph_name='subG', suppress_disconnected=False, attribute=value,
...)
graph_name:
the subgraph's name
suppress_disconnected:
defaults to false, which will remove from the
subgraph any disconnected nodes.
All the attributes defined in the Graphviz dot language should
be supported.
Attributes can be set through the dynamically generated methods:
set_[attribute name], i.e. set_size, set_fontname
or using the instance's attributes:
Subgraph.obj_dict['attributes'][attribute name], i.e.
subgraph_instance.obj_dict['attributes']['label']
subgraph_instance.obj_dict['attributes']['fontname']
"""
# RMF: subgraph should have all the attributes of graph so it can be passed
# as a graph to all methods
def __init__(
self, graph_name='', obj_dict=None, suppress_disconnected=False,
simplify=False, **attrs):
Graph.__init__(
self, graph_name=graph_name, obj_dict=obj_dict,
suppress_disconnected=suppress_disconnected, simplify=simplify,
**attrs)
if obj_dict is None:
self.obj_dict['type'] = 'subgraph'
class Cluster(Graph):
"""Class representing a cluster in Graphviz's dot language.
This class implements the methods to work on a representation
of a cluster in Graphviz's dot language.
cluster(graph_name='subG', suppress_disconnected=False, attribute=value,
...)
graph_name:
the cluster's name (the string 'cluster' will be always prepended)
suppress_disconnected:
defaults to false, which will remove from the
cluster any disconnected nodes.
All the attributes defined in the Graphviz dot language should
be supported.
Attributes can be set through the dynamically generated methods:
set_[attribute name], i.e. set_color, set_fontname
or using the instance's attributes:
Cluster.obj_dict['attributes'][attribute name], i.e.
cluster_instance.obj_dict['attributes']['label']
cluster_instance.obj_dict['attributes']['fontname']
"""
def __init__(self, graph_name='subG', obj_dict=None,
suppress_disconnected=False, simplify=False, **attrs):
Graph.__init__(self, graph_name=graph_name, obj_dict=obj_dict,
suppress_disconnected=suppress_disconnected,
simplify=simplify, **attrs)
if obj_dict is None:
self.obj_dict['type'] = 'subgraph'
self.obj_dict['name'] = 'cluster_' + graph_name
self.create_attribute_methods(CLUSTER_ATTRIBUTES)
class Dot(Graph):
"""A container for handling a dot language file.
This class implements methods to write and process
a dot language file. It is a derived class of
the base class 'Graph'.
"""
def __init__(self, *argsl, **argsd):
Graph.__init__(self, *argsl, **argsd)
self.shape_files = list()
self.progs = None
self.formats = [
'canon', 'cmap', 'cmapx', 'cmapx_np', 'dia', 'dot',
'fig', 'gd', 'gd2', 'gif', 'hpgl', 'imap', 'imap_np', 'ismap',
'jpe', 'jpeg', 'jpg', 'mif', 'mp', 'pcl', 'pdf', 'pic', 'plain',
'plain-ext', 'png', 'ps', 'ps2', 'svg', 'svgz', 'vml', 'vmlz',
'vrml', 'vtx', 'wbmp', 'xdot', 'xlib']
self.prog = 'dot'
# Automatically creates all the methods enabling the creation
# of output in any of the supported formats.
for frmt in self.formats:
self.__setattr__(
'create_' + frmt,
lambda f=frmt, prog=self.prog:
self.create(format=f, prog=prog))
f = self.__dict__['create_' + frmt]
f.__doc__ = (
'''Refer to the docstring accompanying the'''
''''create' method for more information.''')
for frmt in self.formats + ['raw']:
self.__setattr__(
'write_' + frmt,
lambda path, f=frmt, prog=self.prog: self.write(path, format=f,
prog=prog))
f = self.__dict__['write_' + frmt]
f.__doc__ = (
'''Refer to the docstring accompanying the'''
''''write' method for more information.''')
def __getstate__(self):
return copy.copy(self.obj_dict)
def __setstate__(self, state):
self.obj_dict = state
def set_shape_files(self, file_paths):
"""Add the paths of the required image files.
If the graph needs graphic objects to be used as shapes or otherwise
those need to be in the same folder as the graph is going to be
rendered from. Alternatively the absolute path to the files can be
specified when including the graphics in the graph.
The files in the location pointed to by the path(s) specified as
arguments to this method will be copied to the same temporary location
where the graph is going to be rendered.
"""
if isinstance(file_paths, basestring):
self.shape_files.append(file_paths)
if isinstance(file_paths, (list, tuple)):
self.shape_files.extend(file_paths)
def set_prog(self, prog):
"""Sets the default program.
Sets the default program in charge of processing
the dot file into a graph.
"""
self.prog = prog
def set_graphviz_executables(self, paths):
"""Manually specify the location of the GraphViz executables.
The argument to this method should be a dictionary where the keys
are as follows:
{'dot': '', 'twopi': '', 'neato': '', 'circo': '', 'fdp': ''}
and the values are the paths to the corresponding executable,
including the name of the executable itself
"""
self.progs = paths
def write(self, path, prog=None, format='raw'):
"""Write graph to file in selected format.
Given a filename 'path' it will open/create and truncate
such file and write on it a representation of the graph
defined by the dot object and in the format specified by
'format'. 'path' can also be an open file-like object, such as
a StringIO instance.
The format 'raw' is used to dump the string representation
of the Dot object, without further processing.
The output can be processed by any of graphviz tools, defined
in 'prog', which defaults to 'dot'
Returns True or False according to the success of the write
operation.
There's also the preferred possibility of using:
write_'format'(path, prog='program')
which are automatically defined for all the supported formats.
[write_ps(), write_gif(), write_dia(), ...]
"""
if prog is None:
prog = self.prog
fobj, close = get_fobj(path, 'w+b')
try:
if format == 'raw':
data = self.to_string()
if isinstance(data, basestring):
if not isinstance(data, unicode):
try:
data = unicode(data, 'utf-8')
except Exception:
pass
try:
charset = self.get_charset()
if not PY3 or not charset:
charset = 'utf-8'
data = data.encode(charset)
except Exception:
if PY3:
data = data.encode('utf-8')
pass
fobj.write(data)
else:
fobj.write(self.create(prog, format))
finally:
if close:
fobj.close()
return True
def create(self, prog=None, format='ps'):
"""Creates and returns a Postscript representation of the graph.
create will write the graph to a temporary dot file and process
it with the program given by 'prog' (which defaults to 'twopi'),
reading the Postscript output and returning it as a string is the
operation is successful.
On failure None is returned.
There's also the preferred possibility of using:
create_'format'(prog='program')
which are automatically defined for all the supported formats.
[create_ps(), create_gif(), create_dia(), ...]
If 'prog' is a list instead of a string the fist item is expected
to be the program name, followed by any optional command-line
arguments for it:
['twopi', '-Tdot', '-s10']
"""
if prog is None:
prog = self.prog
if isinstance(prog, (list, tuple)):
prog, args = prog[0], prog[1:]
else:
args = []
if self.progs is None:
self.progs = find_graphviz()
if self.progs is None:
raise InvocationException(
'GraphViz\'s executables not found')
if prog not in self.progs:
raise InvocationException(
'GraphViz\'s executable "%s" not found' % prog)
if (not os.path.exists(self.progs[prog]) or
not os.path.isfile(self.progs[prog])):
raise InvocationException(
'GraphViz\'s executable "%s" is not a file or doesn\'t exist'
% self.progs[prog])
tmp_fd, tmp_name = tempfile.mkstemp()
os.close(tmp_fd)
self.write(tmp_name)
tmp_dir = os.path.dirname(tmp_name)
# For each of the image files...
for img in self.shape_files:
# Get its data
f = open(img, 'rb')
f_data = f.read()
f.close()
# And copy it under a file with the same name in the
# temporary directory
f = open(os.path.join(tmp_dir, os.path.basename(img)), 'wb')
f.write(f_data)
f.close()
cmdline = [self.progs[prog], '-T' + format, tmp_name] + args
p = subprocess.Popen(
cmdline,
cwd=tmp_dir,
stderr=subprocess.PIPE, stdout=subprocess.PIPE)
stderr = p.stderr
stdout = p.stdout
stdout_output = list()
while True:
data = stdout.read()
if not data:
break
stdout_output.append(data)
stdout.close()
stdout_output = NULL_SEP.join(stdout_output)
if not stderr.closed:
stderr_output = list()
while True:
data = stderr.read()
if not data:
break
stderr_output.append(data)
stderr.close()
if stderr_output:
stderr_output = NULL_SEP.join(stderr_output)
if PY3:
stderr_output = stderr_output.decode(sys.stderr.encoding)
status = p.wait()
if status != 0:
raise InvocationException(
'Program terminated with status: %d. stderr follows: %s' % (
status, stderr_output))
elif stderr_output:
print(stderr_output)
# For each of the image files...
for img in self.shape_files:
# remove it
os.unlink(os.path.join(tmp_dir, os.path.basename(img)))
os.unlink(tmp_name)
return stdout_output
| mit | -1,778,603,842,485,955,300 | 30.468849 | 79 | 0.556089 | false |
saurabh6790/frappe | frappe/core/doctype/user/user.py | 1 | 39931 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals, print_function
from bs4 import BeautifulSoup
import frappe
import frappe.share
import frappe.defaults
import frappe.permissions
from frappe.model.document import Document
from frappe.utils import (cint, flt, has_gravatar, escape_html, format_datetime,
now_datetime, get_formatted_email, today)
from frappe import throw, msgprint, _
from frappe.utils.password import update_password as _update_password, check_password, get_password_reset_limit
from frappe.desk.notifications import clear_notifications
from frappe.desk.doctype.notification_settings.notification_settings import create_notification_settings, toggle_notifications
from frappe.utils.user import get_system_managers
from frappe.website.utils import is_signup_enabled
from frappe.rate_limiter import rate_limit
from frappe.utils.background_jobs import enqueue
from frappe.core.doctype.user_type.user_type import user_linked_with_permission_on_doctype
STANDARD_USERS = ("Guest", "Administrator")
class MaxUsersReachedError(frappe.ValidationError):
pass
class User(Document):
__new_password = None
def __setup__(self):
# because it is handled separately
self.flags.ignore_save_passwords = ['new_password']
def autoname(self):
"""set name as Email Address"""
if self.get("is_admin") or self.get("is_guest"):
self.name = self.first_name
else:
self.email = self.email.strip().lower()
self.name = self.email
def onload(self):
from frappe.config import get_modules_from_all_apps
self.set_onload('all_modules',
[m.get("module_name") for m in get_modules_from_all_apps()])
def before_insert(self):
self.flags.in_insert = True
throttle_user_creation()
def after_insert(self):
create_notification_settings(self.name)
frappe.cache().delete_key('users_for_mentions')
def validate(self):
self.check_demo()
# clear new password
self.__new_password = self.new_password
self.new_password = ""
if not frappe.flags.in_test:
self.password_strength_test()
if self.name not in STANDARD_USERS:
self.validate_email_type(self.email)
self.validate_email_type(self.name)
self.add_system_manager_role()
self.set_system_user()
self.set_full_name()
self.check_enable_disable()
self.ensure_unique_roles()
self.remove_all_roles_for_guest()
self.validate_username()
self.remove_disabled_roles()
self.validate_user_email_inbox()
ask_pass_update()
self.validate_roles()
self.validate_allowed_modules()
self.validate_user_image()
if self.language == "Loading...":
self.language = None
if (self.name not in ["Administrator", "Guest"]) and (not self.get_social_login_userid("frappe")):
self.set_social_login_userid("frappe", frappe.generate_hash(length=39))
def validate_roles(self):
if self.role_profile_name:
role_profile = frappe.get_doc('Role Profile', self.role_profile_name)
self.set('roles', [])
self.append_roles(*[role.role for role in role_profile.roles])
def validate_allowed_modules(self):
if self.module_profile:
module_profile = frappe.get_doc('Module Profile', self.module_profile)
self.set('block_modules', [])
for d in module_profile.get('block_modules'):
self.append('block_modules', {
'module': d.module
})
def validate_user_image(self):
if self.user_image and len(self.user_image) > 2000:
frappe.throw(_("Not a valid User Image."))
def on_update(self):
# clear new password
self.share_with_self()
clear_notifications(user=self.name)
frappe.clear_cache(user=self.name)
now=frappe.flags.in_test or frappe.flags.in_install
self.send_password_notification(self.__new_password)
frappe.enqueue(
'frappe.core.doctype.user.user.create_contact',
user=self,
ignore_mandatory=True,
now=now
)
if self.name not in ('Administrator', 'Guest') and not self.user_image:
frappe.enqueue('frappe.core.doctype.user.user.update_gravatar', name=self.name, now=now)
# Set user selected timezone
if self.time_zone:
frappe.defaults.set_default("time_zone", self.time_zone, self.name)
if self.has_value_changed('allow_in_mentions') or self.has_value_changed('user_type'):
frappe.cache().delete_key('users_for_mentions')
def has_website_permission(self, ptype, user, verbose=False):
"""Returns true if current user is the session user"""
return self.name == frappe.session.user
def check_demo(self):
if frappe.session.user == 'demo@erpnext.com':
frappe.throw(_('Cannot change user details in demo. Please signup for a new account at https://erpnext.com'), title=_('Not Allowed'))
def set_full_name(self):
self.full_name = " ".join(filter(None, [self.first_name, self.last_name]))
def check_enable_disable(self):
# do not allow disabling administrator/guest
if not cint(self.enabled) and self.name in STANDARD_USERS:
frappe.throw(_("User {0} cannot be disabled").format(self.name))
if not cint(self.enabled):
self.a_system_manager_should_exist()
# clear sessions if disabled
if not cint(self.enabled) and getattr(frappe.local, "login_manager", None):
frappe.local.login_manager.logout(user=self.name)
# toggle notifications based on the user's status
toggle_notifications(self.name, enable=cint(self.enabled))
def add_system_manager_role(self):
# if adding system manager, do nothing
if not cint(self.enabled) or ("System Manager" in [user_role.role for user_role in
self.get("roles")]):
return
if (self.name not in STANDARD_USERS and self.user_type == "System User" and not self.get_other_system_managers()
and cint(frappe.db.get_single_value('System Settings', 'setup_complete'))):
msgprint(_("Adding System Manager to this User as there must be atleast one System Manager"))
self.append("roles", {
"doctype": "Has Role",
"role": "System Manager"
})
if self.name == 'Administrator':
# Administrator should always have System Manager Role
self.extend("roles", [
{
"doctype": "Has Role",
"role": "System Manager"
},
{
"doctype": "Has Role",
"role": "Administrator"
}
])
def email_new_password(self, new_password=None):
if new_password and not self.flags.in_insert:
_update_password(user=self.name, pwd=new_password, logout_all_sessions=self.logout_all_sessions)
def set_system_user(self):
'''For the standard users like admin and guest, the user type is fixed.'''
user_type_mapper = {
'Administrator': 'System User',
'Guest': 'Website User'
}
if self.user_type and not frappe.get_cached_value('User Type', self.user_type, 'is_standard'):
if user_type_mapper.get(self.name):
self.user_type = user_type_mapper.get(self.name)
else:
self.set_roles_and_modules_based_on_user_type()
else:
'''Set as System User if any of the given roles has desk_access'''
self.user_type = 'System User' if self.has_desk_access() else 'Website User'
def set_roles_and_modules_based_on_user_type(self):
user_type_doc = frappe.get_cached_doc('User Type', self.user_type)
if user_type_doc.role:
self.roles = []
# Check whether User has linked with the 'Apply User Permission On' doctype or not
if user_linked_with_permission_on_doctype(user_type_doc, self.name):
self.append('roles', {
'role': user_type_doc.role
})
frappe.msgprint(_('Role has been set as per the user type {0}')
.format(self.user_type), alert=True)
user_type_doc.update_modules_in_user(self)
def has_desk_access(self):
'''Return true if any of the set roles has desk access'''
if not self.roles:
return False
return len(frappe.db.sql("""select name
from `tabRole` where desk_access=1
and name in ({0}) limit 1""".format(', '.join(['%s'] * len(self.roles))),
[d.role for d in self.roles]))
def share_with_self(self):
frappe.share.add(self.doctype, self.name, self.name, write=1, share=1,
flags={"ignore_share_permission": True})
def validate_share(self, docshare):
pass
# if docshare.user == self.name:
# if self.user_type=="System User":
# if docshare.share != 1:
# frappe.throw(_("Sorry! User should have complete access to their own record."))
# else:
# frappe.throw(_("Sorry! Sharing with Website User is prohibited."))
def send_password_notification(self, new_password):
try:
if self.flags.in_insert:
if self.name not in STANDARD_USERS:
if new_password:
# new password given, no email required
_update_password(user=self.name, pwd=new_password,
logout_all_sessions=self.logout_all_sessions)
if not self.flags.no_welcome_mail and cint(self.send_welcome_email):
self.send_welcome_mail_to_user()
self.flags.email_sent = 1
if frappe.session.user != 'Guest':
msgprint(_("Welcome email sent"))
return
else:
self.email_new_password(new_password)
except frappe.OutgoingEmailError:
print(frappe.get_traceback())
pass # email server not set, don't send email
@Document.hook
def validate_reset_password(self):
pass
def reset_password(self, send_email=False, password_expired=False):
from frappe.utils import random_string, get_url
key = random_string(32)
self.db_set("reset_password_key", key)
url = "/update-password?key=" + key
if password_expired:
url = "/update-password?key=" + key + '&password_expired=true'
link = get_url(url)
if send_email:
self.password_reset_mail(link)
return link
def get_other_system_managers(self):
return frappe.db.sql("""select distinct `user`.`name` from `tabHas Role` as `user_role`, `tabUser` as `user`
where user_role.role='System Manager'
and `user`.docstatus<2
and `user`.enabled=1
and `user_role`.parent = `user`.name
and `user_role`.parent not in ('Administrator', %s) limit 1""", (self.name,))
def get_fullname(self):
"""get first_name space last_name"""
return (self.first_name or '') + \
(self.first_name and " " or '') + (self.last_name or '')
def password_reset_mail(self, link):
self.send_login_mail(_("Password Reset"),
"password_reset", {"link": link}, now=True)
def send_welcome_mail_to_user(self):
from frappe.utils import get_url
link = self.reset_password()
subject = None
method = frappe.get_hooks("welcome_email")
if method:
subject = frappe.get_attr(method[-1])()
if not subject:
site_name = frappe.db.get_default('site_name') or frappe.get_conf().get("site_name")
if site_name:
subject = _("Welcome to {0}").format(site_name)
else:
subject = _("Complete Registration")
self.send_login_mail(subject, "new_user",
dict(
link=link,
site_url=get_url(),
))
def send_login_mail(self, subject, template, add_args, now=None):
"""send mail with login details"""
from frappe.utils.user import get_user_fullname
from frappe.utils import get_url
created_by = get_user_fullname(frappe.session['user'])
if created_by == "Guest":
created_by = "Administrator"
args = {
'first_name': self.first_name or self.last_name or "user",
'user': self.name,
'title': subject,
'login_url': get_url(),
'created_by': created_by
}
args.update(add_args)
sender = frappe.session.user not in STANDARD_USERS and get_formatted_email(frappe.session.user) or None
frappe.sendmail(recipients=self.email, sender=sender, subject=subject,
template=template, args=args, header=[subject, "green"],
delayed=(not now) if now!=None else self.flags.delay_emails, retry=3)
def a_system_manager_should_exist(self):
if not self.get_other_system_managers():
throw(_("There should remain at least one System Manager"))
def on_trash(self):
frappe.clear_cache(user=self.name)
if self.name in STANDARD_USERS:
throw(_("User {0} cannot be deleted").format(self.name))
self.a_system_manager_should_exist()
# disable the user and log him/her out
self.enabled = 0
if getattr(frappe.local, "login_manager", None):
frappe.local.login_manager.logout(user=self.name)
# delete todos
frappe.db.sql("""DELETE FROM `tabToDo` WHERE `owner`=%s""", (self.name,))
frappe.db.sql("""UPDATE `tabToDo` SET `assigned_by`=NULL WHERE `assigned_by`=%s""",
(self.name,))
# delete events
frappe.db.sql("""delete from `tabEvent` where owner=%s
and event_type='Private'""", (self.name,))
# delete shares
frappe.db.sql("""delete from `tabDocShare` where user=%s""", self.name)
# delete messages
frappe.db.sql("""delete from `tabCommunication`
where communication_type in ('Chat', 'Notification')
and reference_doctype='User'
and (reference_name=%s or owner=%s)""", (self.name, self.name))
# unlink contact
frappe.db.sql("""update `tabContact`
set `user`=null
where `user`=%s""", (self.name))
# delete notification settings
frappe.delete_doc("Notification Settings", self.name, ignore_permissions=True)
if self.get('allow_in_mentions'):
frappe.cache().delete_key('users_for_mentions')
def before_rename(self, old_name, new_name, merge=False):
self.check_demo()
frappe.clear_cache(user=old_name)
self.validate_rename(old_name, new_name)
def validate_rename(self, old_name, new_name):
# do not allow renaming administrator and guest
if old_name in STANDARD_USERS:
throw(_("User {0} cannot be renamed").format(self.name))
self.validate_email_type(new_name)
def validate_email_type(self, email):
from frappe.utils import validate_email_address
validate_email_address(email.strip(), True)
def after_rename(self, old_name, new_name, merge=False):
tables = frappe.db.get_tables()
for tab in tables:
desc = frappe.db.get_table_columns_description(tab)
has_fields = []
for d in desc:
if d.get('name') in ['owner', 'modified_by']:
has_fields.append(d.get('name'))
for field in has_fields:
frappe.db.sql("""UPDATE `%s`
SET `%s` = %s
WHERE `%s` = %s""" %
(tab, field, '%s', field, '%s'), (new_name, old_name))
if frappe.db.exists("Chat Profile", old_name):
frappe.rename_doc("Chat Profile", old_name, new_name, force=True, show_alert=False)
if frappe.db.exists("Notification Settings", old_name):
frappe.rename_doc("Notification Settings", old_name, new_name, force=True, show_alert=False)
# set email
frappe.db.sql("""UPDATE `tabUser`
SET email = %s
WHERE name = %s""", (new_name, new_name))
def append_roles(self, *roles):
"""Add roles to user"""
current_roles = [d.role for d in self.get("roles")]
for role in roles:
if role in current_roles:
continue
self.append("roles", {"role": role})
def add_roles(self, *roles):
"""Add roles to user and save"""
self.append_roles(*roles)
self.save()
def remove_roles(self, *roles):
existing_roles = dict((d.role, d) for d in self.get("roles"))
for role in roles:
if role in existing_roles:
self.get("roles").remove(existing_roles[role])
self.save()
def remove_all_roles_for_guest(self):
if self.name == "Guest":
self.set("roles", list(set(d for d in self.get("roles") if d.role == "Guest")))
def remove_disabled_roles(self):
disabled_roles = [d.name for d in frappe.get_all("Role", filters={"disabled":1})]
for role in list(self.get('roles')):
if role.role in disabled_roles:
self.get('roles').remove(role)
def ensure_unique_roles(self):
exists = []
for i, d in enumerate(self.get("roles")):
if (not d.role) or (d.role in exists):
self.get("roles").remove(d)
else:
exists.append(d.role)
def validate_username(self):
if not self.username and self.is_new() and self.first_name:
self.username = frappe.scrub(self.first_name)
if not self.username:
return
# strip space and @
self.username = self.username.strip(" @")
if self.username_exists():
if self.user_type == 'System User':
frappe.msgprint(_("Username {0} already exists").format(self.username))
self.suggest_username()
self.username = ""
def password_strength_test(self):
""" test password strength """
if self.flags.ignore_password_policy:
return
if self.__new_password:
user_data = (self.first_name, self.middle_name, self.last_name, self.email, self.birth_date)
result = test_password_strength(self.__new_password, '', None, user_data)
feedback = result.get("feedback", None)
if feedback and not feedback.get('password_policy_validation_passed', False):
handle_password_test_fail(result)
def suggest_username(self):
def _check_suggestion(suggestion):
if self.username != suggestion and not self.username_exists(suggestion):
return suggestion
return None
# @firstname
username = _check_suggestion(frappe.scrub(self.first_name))
if not username:
# @firstname_last_name
username = _check_suggestion(frappe.scrub("{0} {1}".format(self.first_name, self.last_name or "")))
if username:
frappe.msgprint(_("Suggested Username: {0}").format(username))
return username
def username_exists(self, username=None):
return frappe.db.get_value("User", {"username": username or self.username, "name": ("!=", self.name)})
def get_blocked_modules(self):
"""Returns list of modules blocked for that user"""
return [d.module for d in self.block_modules] if self.block_modules else []
def validate_user_email_inbox(self):
""" check if same email account added in User Emails twice """
email_accounts = [ user_email.email_account for user_email in self.user_emails ]
if len(email_accounts) != len(set(email_accounts)):
frappe.throw(_("Email Account added multiple times"))
def get_social_login_userid(self, provider):
try:
for p in self.social_logins:
if p.provider == provider:
return p.userid
except:
return None
def set_social_login_userid(self, provider, userid, username=None):
social_logins = {
"provider": provider,
"userid": userid
}
if username:
social_logins["username"] = username
self.append("social_logins", social_logins)
def get_restricted_ip_list(self):
if not self.restrict_ip:
return
return [i.strip() for i in self.restrict_ip.split(",")]
@classmethod
def find_by_credentials(cls, user_name: str, password: str, validate_password: bool = True):
"""Find the user by credentials.
This is a login utility that needs to check login related system settings while finding the user.
1. Find user by email ID by default
2. If allow_login_using_mobile_number is set, you can use mobile number while finding the user.
3. If allow_login_using_user_name is set, you can use username while finding the user.
"""
login_with_mobile = cint(frappe.db.get_value("System Settings", "System Settings", "allow_login_using_mobile_number"))
login_with_username = cint(frappe.db.get_value("System Settings", "System Settings", "allow_login_using_user_name"))
or_filters = [{"name": user_name}]
if login_with_mobile:
or_filters.append({"mobile_no": user_name})
if login_with_username:
or_filters.append({"username": user_name})
users = frappe.db.get_all('User', fields=['name', 'enabled'], or_filters=or_filters, limit=1)
if not users:
return
user = users[0]
user['is_authenticated'] = True
if validate_password:
try:
check_password(user['name'], password, delete_tracker_cache=False)
except frappe.AuthenticationError:
user['is_authenticated'] = False
return user
@frappe.whitelist()
def get_timezones():
import pytz
return {
"timezones": pytz.all_timezones
}
@frappe.whitelist()
def get_all_roles(arg=None):
"""return all roles"""
active_domains = frappe.get_active_domains()
roles = frappe.get_all("Role", filters={
"name": ("not in", "Administrator,Guest,All"),
"disabled": 0
}, or_filters={
"ifnull(restrict_to_domain, '')": "",
"restrict_to_domain": ("in", active_domains)
}, order_by="name")
return [ role.get("name") for role in roles ]
@frappe.whitelist()
def get_roles(arg=None):
"""get roles for a user"""
return frappe.get_roles(frappe.form_dict['uid'])
@frappe.whitelist()
def get_perm_info(role):
"""get permission info"""
from frappe.permissions import get_all_perms
return get_all_perms(role)
@frappe.whitelist(allow_guest=True)
def update_password(new_password, logout_all_sessions=0, key=None, old_password=None):
#validate key to avoid key input like ['like', '%'], '', ['in', ['']]
if key and not isinstance(key, str):
frappe.throw(_('Invalid key type'))
result = test_password_strength(new_password, key, old_password)
feedback = result.get("feedback", None)
if feedback and not feedback.get('password_policy_validation_passed', False):
handle_password_test_fail(result)
res = _get_user_for_update_password(key, old_password)
if res.get('message'):
frappe.local.response.http_status_code = 410
return res['message']
else:
user = res['user']
logout_all_sessions = cint(logout_all_sessions) or frappe.db.get_single_value("System Settings", "logout_on_password_reset")
_update_password(user, new_password, logout_all_sessions=cint(logout_all_sessions))
user_doc, redirect_url = reset_user_data(user)
# get redirect url from cache
redirect_to = frappe.cache().hget('redirect_after_login', user)
if redirect_to:
redirect_url = redirect_to
frappe.cache().hdel('redirect_after_login', user)
frappe.local.login_manager.login_as(user)
frappe.db.set_value("User", user, "last_password_reset_date", today())
frappe.db.set_value("User", user, "reset_password_key", "")
if user_doc.user_type == "System User":
return "/app"
else:
return redirect_url if redirect_url else "/"
@frappe.whitelist(allow_guest=True)
def test_password_strength(new_password, key=None, old_password=None, user_data=None):
from frappe.utils.password_strength import test_password_strength as _test_password_strength
password_policy = frappe.db.get_value("System Settings", None,
["enable_password_policy", "minimum_password_score"], as_dict=True) or {}
enable_password_policy = cint(password_policy.get("enable_password_policy", 0))
minimum_password_score = cint(password_policy.get("minimum_password_score", 0))
if not enable_password_policy:
return {}
if not user_data:
user_data = frappe.db.get_value('User', frappe.session.user,
['first_name', 'middle_name', 'last_name', 'email', 'birth_date'])
if new_password:
result = _test_password_strength(new_password, user_inputs=user_data)
password_policy_validation_passed = False
# score should be greater than 0 and minimum_password_score
if result.get('score') and result.get('score') >= minimum_password_score:
password_policy_validation_passed = True
result['feedback']['password_policy_validation_passed'] = password_policy_validation_passed
return result
#for login
@frappe.whitelist()
def has_email_account(email):
return frappe.get_list("Email Account", filters={"email_id": email})
@frappe.whitelist(allow_guest=False)
def get_email_awaiting(user):
waiting = frappe.db.sql("""select email_account,email_id
from `tabUser Email`
where awaiting_password = 1
and parent = %(user)s""", {"user":user}, as_dict=1)
if waiting:
return waiting
else:
frappe.db.sql("""update `tabUser Email`
set awaiting_password =0
where parent = %(user)s""",{"user":user})
return False
@frappe.whitelist(allow_guest=False)
def set_email_password(email_account, user, password):
account = frappe.get_doc("Email Account", email_account)
if account.awaiting_password:
account.awaiting_password = 0
account.password = password
try:
account.save(ignore_permissions=True)
except Exception:
frappe.db.rollback()
return False
return True
def setup_user_email_inbox(email_account, awaiting_password, email_id, enable_outgoing):
""" setup email inbox for user """
def add_user_email(user):
user = frappe.get_doc("User", user)
row = user.append("user_emails", {})
row.email_id = email_id
row.email_account = email_account
row.awaiting_password = awaiting_password or 0
row.enable_outgoing = enable_outgoing or 0
user.save(ignore_permissions=True)
udpate_user_email_settings = False
if not all([email_account, email_id]):
return
user_names = frappe.db.get_values("User", { "email": email_id }, as_dict=True)
if not user_names:
return
for user in user_names:
user_name = user.get("name")
# check if inbox is alreay configured
user_inbox = frappe.db.get_value("User Email", {
"email_account": email_account,
"parent": user_name
}, ["name"]) or None
if not user_inbox:
add_user_email(user_name)
else:
# update awaiting password for email account
udpate_user_email_settings = True
if udpate_user_email_settings:
frappe.db.sql("""UPDATE `tabUser Email` SET awaiting_password = %(awaiting_password)s,
enable_outgoing = %(enable_outgoing)s WHERE email_account = %(email_account)s""", {
"email_account": email_account,
"enable_outgoing": enable_outgoing,
"awaiting_password": awaiting_password or 0
})
else:
users = " and ".join([frappe.bold(user.get("name")) for user in user_names])
frappe.msgprint(_("Enabled email inbox for user {0}").format(users))
ask_pass_update()
def remove_user_email_inbox(email_account):
""" remove user email inbox settings if email account is deleted """
if not email_account:
return
users = frappe.get_all("User Email", filters={
"email_account": email_account
}, fields=["parent as name"])
for user in users:
doc = frappe.get_doc("User", user.get("name"))
to_remove = [ row for row in doc.user_emails if row.email_account == email_account ]
[ doc.remove(row) for row in to_remove ]
doc.save(ignore_permissions=True)
def ask_pass_update():
# update the sys defaults as to awaiting users
from frappe.utils import set_default
users = frappe.db.sql("""SELECT DISTINCT(parent) as user FROM `tabUser Email`
WHERE awaiting_password = 1""", as_dict=True)
password_list = [ user.get("user") for user in users ]
set_default("email_user_password", u','.join(password_list))
def _get_user_for_update_password(key, old_password):
# verify old password
if key:
user = frappe.db.get_value("User", {"reset_password_key": key})
if not user:
return {
'message': _("The Link specified has either been used before or Invalid")
}
elif old_password:
# verify old password
frappe.local.login_manager.check_password(frappe.session.user, old_password)
user = frappe.session.user
else:
return
return {
'user': user
}
def reset_user_data(user):
user_doc = frappe.get_doc("User", user)
redirect_url = user_doc.redirect_url
user_doc.reset_password_key = ''
user_doc.redirect_url = ''
user_doc.save(ignore_permissions=True)
return user_doc, redirect_url
@frappe.whitelist()
def verify_password(password):
frappe.local.login_manager.check_password(frappe.session.user, password)
@frappe.whitelist(allow_guest=True)
def sign_up(email, full_name, redirect_to):
if not is_signup_enabled():
frappe.throw(_('Sign Up is disabled'), title='Not Allowed')
user = frappe.db.get("User", {"email": email})
if user:
if user.disabled:
return 0, _("Registered but disabled")
else:
return 0, _("Already Registered")
else:
if frappe.db.sql("""select count(*) from tabUser where
HOUR(TIMEDIFF(CURRENT_TIMESTAMP, TIMESTAMP(modified)))=1""")[0][0] > 300:
frappe.respond_as_web_page(_('Temporarily Disabled'),
_('Too many users signed up recently, so the registration is disabled. Please try back in an hour'),
http_status_code=429)
from frappe.utils import random_string
user = frappe.get_doc({
"doctype":"User",
"email": email,
"first_name": escape_html(full_name),
"enabled": 1,
"new_password": random_string(10),
"user_type": "Website User"
})
user.flags.ignore_permissions = True
user.flags.ignore_password_policy = True
user.insert()
# set default signup role as per Portal Settings
default_role = frappe.db.get_value("Portal Settings", None, "default_role")
if default_role:
user.add_roles(default_role)
if redirect_to:
frappe.cache().hset('redirect_after_login', user.name, redirect_to)
if user.flags.email_sent:
return 1, _("Please check your email for verification")
else:
return 2, _("Please ask your administrator to verify your sign-up")
@frappe.whitelist(allow_guest=True)
@rate_limit(key='user', limit=get_password_reset_limit, seconds = 24*60*60, methods=['POST'])
def reset_password(user):
if user=="Administrator":
return 'not allowed'
try:
user = frappe.get_doc("User", user)
if not user.enabled:
return 'disabled'
user.validate_reset_password()
user.reset_password(send_email=True)
return frappe.msgprint(_("Password reset instructions have been sent to your email"))
except frappe.DoesNotExistError:
frappe.clear_messages()
return 'not found'
@frappe.whitelist()
@frappe.validate_and_sanitize_search_inputs
def user_query(doctype, txt, searchfield, start, page_len, filters):
from frappe.desk.reportview import get_match_cond, get_filters_cond
conditions=[]
user_type_condition = "and user_type != 'Website User'"
if filters and filters.get('ignore_user_type'):
user_type_condition = ''
filters.pop('ignore_user_type')
txt = "%{}%".format(txt)
return frappe.db.sql("""SELECT `name`, CONCAT_WS(' ', first_name, middle_name, last_name)
FROM `tabUser`
WHERE `enabled`=1
{user_type_condition}
AND `docstatus` < 2
AND `name` NOT IN ({standard_users})
AND ({key} LIKE %(txt)s
OR CONCAT_WS(' ', first_name, middle_name, last_name) LIKE %(txt)s)
{fcond} {mcond}
ORDER BY
CASE WHEN `name` LIKE %(txt)s THEN 0 ELSE 1 END,
CASE WHEN concat_ws(' ', first_name, middle_name, last_name) LIKE %(txt)s
THEN 0 ELSE 1 END,
NAME asc
LIMIT %(page_len)s OFFSET %(start)s
""".format(
user_type_condition = user_type_condition,
standard_users=", ".join([frappe.db.escape(u) for u in STANDARD_USERS]),
key=searchfield,
fcond=get_filters_cond(doctype, filters, conditions),
mcond=get_match_cond(doctype)
),
dict(start=start, page_len=page_len, txt=txt)
)
def get_total_users():
"""Returns total no. of system users"""
return flt(frappe.db.sql('''SELECT SUM(`simultaneous_sessions`)
FROM `tabUser`
WHERE `enabled` = 1
AND `user_type` = 'System User'
AND `name` NOT IN ({})'''.format(", ".join(["%s"]*len(STANDARD_USERS))), STANDARD_USERS)[0][0])
def get_system_users(exclude_users=None, limit=None):
if not exclude_users:
exclude_users = []
elif not isinstance(exclude_users, (list, tuple)):
exclude_users = [exclude_users]
limit_cond = ''
if limit:
limit_cond = 'limit {0}'.format(limit)
exclude_users += list(STANDARD_USERS)
system_users = frappe.db.sql_list("""select name from `tabUser`
where enabled=1 and user_type != 'Website User'
and name not in ({}) {}""".format(", ".join(["%s"]*len(exclude_users)), limit_cond),
exclude_users)
return system_users
def get_active_users():
"""Returns No. of system users who logged in, in the last 3 days"""
return frappe.db.sql("""select count(*) from `tabUser`
where enabled = 1 and user_type != 'Website User'
and name not in ({})
and hour(timediff(now(), last_active)) < 72""".format(", ".join(["%s"]*len(STANDARD_USERS))), STANDARD_USERS)[0][0]
def get_website_users():
"""Returns total no. of website users"""
return frappe.db.sql("""select count(*) from `tabUser`
where enabled = 1 and user_type = 'Website User'""")[0][0]
def get_active_website_users():
"""Returns No. of website users who logged in, in the last 3 days"""
return frappe.db.sql("""select count(*) from `tabUser`
where enabled = 1 and user_type = 'Website User'
and hour(timediff(now(), last_active)) < 72""")[0][0]
def get_permission_query_conditions(user):
if user=="Administrator":
return ""
else:
return """(`tabUser`.name not in ({standard_users}))""".format(
standard_users = ", ".join(frappe.db.escape(user) for user in STANDARD_USERS))
def has_permission(doc, user):
if (user != "Administrator") and (doc.name in STANDARD_USERS):
# dont allow non Administrator user to view / edit Administrator user
return False
def notify_admin_access_to_system_manager(login_manager=None):
if (login_manager
and login_manager.user == "Administrator"
and frappe.local.conf.notify_admin_access_to_system_manager):
site = '<a href="{0}" target="_blank">{0}</a>'.format(frappe.local.request.host_url)
date_and_time = '<b>{0}</b>'.format(format_datetime(now_datetime(), format_string="medium"))
ip_address = frappe.local.request_ip
access_message = _('Administrator accessed {0} on {1} via IP Address {2}.').format(
site, date_and_time, ip_address)
frappe.sendmail(
recipients=get_system_managers(),
subject=_("Administrator Logged In"),
template="administrator_logged_in",
args={'access_message': access_message},
header=['Access Notification', 'orange']
)
def extract_mentions(txt):
"""Find all instances of @mentions in the html."""
soup = BeautifulSoup(txt, 'html.parser')
emails = []
for mention in soup.find_all(class_='mention'):
if mention.get('data-is-group') == 'true':
try:
user_group = frappe.get_cached_doc('User Group', mention['data-id'])
emails += [d.user for d in user_group.user_group_members]
except frappe.DoesNotExistError:
pass
continue
email = mention['data-id']
emails.append(email)
return emails
def handle_password_test_fail(result):
suggestions = result['feedback']['suggestions'][0] if result['feedback']['suggestions'] else ''
warning = result['feedback']['warning'] if 'warning' in result['feedback'] else ''
suggestions += "<br>" + _("Hint: Include symbols, numbers and capital letters in the password") + '<br>'
frappe.throw(' '.join([_('Invalid Password:'), warning, suggestions]))
def update_gravatar(name):
gravatar = has_gravatar(name)
if gravatar:
frappe.db.set_value('User', name, 'user_image', gravatar)
@frappe.whitelist(allow_guest=True)
def send_token_via_sms(tmp_id,phone_no=None,user=None):
try:
from frappe.core.doctype.sms_settings.sms_settings import send_request
except:
return False
if not frappe.cache().ttl(tmp_id + '_token'):
return False
ss = frappe.get_doc('SMS Settings', 'SMS Settings')
if not ss.sms_gateway_url:
return False
token = frappe.cache().get(tmp_id + '_token')
args = {ss.message_parameter: 'verification code is {}'.format(token)}
for d in ss.get("parameters"):
args[d.parameter] = d.value
if user:
user_phone = frappe.db.get_value('User', user, ['phone','mobile_no'], as_dict=1)
usr_phone = user_phone.mobile_no or user_phone.phone
if not usr_phone:
return False
else:
if phone_no:
usr_phone = phone_no
else:
return False
args[ss.receiver_parameter] = usr_phone
status = send_request(ss.sms_gateway_url, args, use_post=ss.use_post)
if 200 <= status < 300:
frappe.cache().delete(tmp_id + '_token')
return True
else:
return False
@frappe.whitelist(allow_guest=True)
def send_token_via_email(tmp_id,token=None):
import pyotp
user = frappe.cache().get(tmp_id + '_user')
count = token or frappe.cache().get(tmp_id + '_token')
if ((not user) or (user == 'None') or (not count)):
return False
user_email = frappe.db.get_value('User',user, 'email')
if not user_email:
return False
otpsecret = frappe.cache().get(tmp_id + '_otp_secret')
hotp = pyotp.HOTP(otpsecret)
frappe.sendmail(
recipients=user_email,
sender=None,
subject="Verification Code",
template="verification_code",
args=dict(code=hotp.at(int(count))),
delayed=False,
retry=3
)
return True
@frappe.whitelist(allow_guest=True)
def reset_otp_secret(user):
otp_issuer = frappe.db.get_value('System Settings', 'System Settings', 'otp_issuer_name')
user_email = frappe.db.get_value('User',user, 'email')
if frappe.session.user in ["Administrator", user] :
frappe.defaults.clear_default(user + '_otplogin')
frappe.defaults.clear_default(user + '_otpsecret')
email_args = {
'recipients':user_email, 'sender':None, 'subject':'OTP Secret Reset - {}'.format(otp_issuer or "Frappe Framework"),
'message':'<p>Your OTP secret on {} has been reset. If you did not perform this reset and did not request it, please contact your System Administrator immediately.</p>'.format(otp_issuer or "Frappe Framework"),
'delayed':False,
'retry':3
}
enqueue(method=frappe.sendmail, queue='short', timeout=300, event=None, is_async=True, job_name=None, now=False, **email_args)
return frappe.msgprint(_("OTP Secret has been reset. Re-registration will be required on next login."))
else:
return frappe.throw(_("OTP secret can only be reset by the Administrator."))
def throttle_user_creation():
if frappe.flags.in_import:
return
if frappe.db.get_creation_count('User', 60) > frappe.local.conf.get("throttle_user_limit", 60):
frappe.throw(_('Throttled'))
@frappe.whitelist()
def get_role_profile(role_profile):
roles = frappe.get_doc('Role Profile', {'role_profile': role_profile})
return roles.roles
@frappe.whitelist()
def get_module_profile(module_profile):
module_profile = frappe.get_doc('Module Profile', {'module_profile_name': module_profile})
return module_profile.get('block_modules')
def update_roles(role_profile):
users = frappe.get_all('User', filters={'role_profile_name': role_profile})
role_profile = frappe.get_doc('Role Profile', role_profile)
roles = [role.role for role in role_profile.roles]
for d in users:
user = frappe.get_doc('User', d)
user.set('roles', [])
user.add_roles(*roles)
def create_contact(user, ignore_links=False, ignore_mandatory=False):
from frappe.contacts.doctype.contact.contact import get_contact_name
if user.name in ["Administrator", "Guest"]: return
contact_name = get_contact_name(user.email)
if not contact_name:
contact = frappe.get_doc({
"doctype": "Contact",
"first_name": user.first_name,
"last_name": user.last_name,
"user": user.name,
"gender": user.gender,
})
if user.email:
contact.add_email(user.email, is_primary=True)
if user.phone:
contact.add_phone(user.phone, is_primary_phone=True)
if user.mobile_no:
contact.add_phone(user.mobile_no, is_primary_mobile_no=True)
contact.insert(ignore_permissions=True, ignore_links=ignore_links, ignore_mandatory=ignore_mandatory)
else:
contact = frappe.get_doc("Contact", contact_name)
contact.first_name = user.first_name
contact.last_name = user.last_name
contact.gender = user.gender
# Add mobile number if phone does not exists in contact
if user.phone and not any(new_contact.phone == user.phone for new_contact in contact.phone_nos):
# Set primary phone if there is no primary phone number
contact.add_phone(
user.phone,
is_primary_phone=not any(
new_contact.is_primary_phone == 1 for new_contact in contact.phone_nos
)
)
# Add mobile number if mobile does not exists in contact
if user.mobile_no and not any(new_contact.phone == user.mobile_no for new_contact in contact.phone_nos):
# Set primary mobile if there is no primary mobile number
contact.add_phone(
user.mobile_no,
is_primary_mobile_no=not any(
new_contact.is_primary_mobile_no == 1 for new_contact in contact.phone_nos
)
)
contact.save(ignore_permissions=True)
@frappe.whitelist()
def generate_keys(user):
"""
generate api key and api secret
:param user: str
"""
if "System Manager" in frappe.get_roles():
user_details = frappe.get_doc("User", user)
api_secret = frappe.generate_hash(length=15)
# if api key is not set generate api key
if not user_details.api_key:
api_key = frappe.generate_hash(length=15)
user_details.api_key = api_key
user_details.api_secret = api_secret
user_details.save()
return {"api_secret": api_secret}
frappe.throw(frappe._("Not Permitted"), frappe.PermissionError)
@frappe.whitelist()
def switch_theme(theme):
if theme in ["Dark", "Light"]:
frappe.db.set_value("User", frappe.session.user, "desk_theme", theme)
| mit | -1,635,436,983,036,877,300 | 31.306634 | 213 | 0.69565 | false |
Ayrx/cryptography | src/_cffi_src/openssl/crypto.py | 1 | 3371 | # This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
INCLUDES = """
#include <openssl/crypto.h>
"""
TYPES = """
static const long Cryptography_HAS_LOCKING_CALLBACKS;
static const int SSLEAY_VERSION;
static const int SSLEAY_CFLAGS;
static const int SSLEAY_PLATFORM;
static const int SSLEAY_DIR;
static const int SSLEAY_BUILT_ON;
static const int OPENSSL_VERSION;
static const int OPENSSL_CFLAGS;
static const int OPENSSL_BUILT_ON;
static const int OPENSSL_PLATFORM;
static const int OPENSSL_DIR;
static const int CRYPTO_MEM_CHECK_ON;
static const int CRYPTO_MEM_CHECK_OFF;
static const int CRYPTO_MEM_CHECK_ENABLE;
static const int CRYPTO_MEM_CHECK_DISABLE;
static const int CRYPTO_LOCK;
static const int CRYPTO_UNLOCK;
static const int CRYPTO_READ;
static const int CRYPTO_LOCK_SSL;
"""
FUNCTIONS = """
int CRYPTO_mem_ctrl(int);
int CRYPTO_is_mem_check_on(void);
void CRYPTO_mem_leaks(struct bio_st *);
"""
MACROS = """
/* CRYPTO_cleanup_all_ex_data became a macro in 1.1.0 */
void CRYPTO_cleanup_all_ex_data(void);
/* as of 1.1.0 OpenSSL does its own locking *angelic chorus*. These functions
have become macros that are no ops */
int CRYPTO_num_locks(void);
void CRYPTO_set_locking_callback(void(*)(int, int, const char *, int));
void (*CRYPTO_get_locking_callback(void))(int, int, const char *, int);
/* SSLeay was removed in 1.1.0 */
unsigned long SSLeay(void);
const char *SSLeay_version(int);
/* these functions were added to replace the SSLeay functions in 1.1.0 */
unsigned long OpenSSL_version_num(void);
const char *OpenSSL_version(int);
/* this is a macro in 1.1.0 */
void OPENSSL_free(void *);
/* This was removed in 1.1.0 */
void CRYPTO_lock(int, int, const char *, int);
"""
CUSTOMIZATIONS = """
/* In 1.1.0 SSLeay has finally been retired. We bidirectionally define the
values so you can use either one. This is so we can use the new function
names no matter what OpenSSL we're running on, but users on older pyOpenSSL
releases won't see issues if they're running OpenSSL 1.1.0 */
#if !defined(SSLEAY_VERSION)
# define SSLeay OpenSSL_version_num
# define SSLeay_version OpenSSL_version
# define SSLEAY_VERSION_NUMBER OPENSSL_VERSION_NUMBER
# define SSLEAY_VERSION OPENSSL_VERSION
# define SSLEAY_CFLAGS OPENSSL_CFLAGS
# define SSLEAY_BUILT_ON OPENSSL_BUILT_ON
# define SSLEAY_PLATFORM OPENSSL_PLATFORM
# define SSLEAY_DIR OPENSSL_DIR
#endif
#if !defined(OPENSSL_VERSION)
# define OpenSSL_version_num SSLeay
# define OpenSSL_version SSLeay_version
# define OPENSSL_VERSION SSLEAY_VERSION
# define OPENSSL_CFLAGS SSLEAY_CFLAGS
# define OPENSSL_BUILT_ON SSLEAY_BUILT_ON
# define OPENSSL_PLATFORM SSLEAY_PLATFORM
# define OPENSSL_DIR SSLEAY_DIR
#endif
#if !defined(CRYPTO_LOCK)
static const long Cryptography_HAS_LOCKING_CALLBACKS = 0;
static const long CRYPTO_LOCK = 0;
static const long CRYPTO_UNLOCK = 0;
static const long CRYPTO_READ = 0;
static const long CRYPTO_LOCK_SSL = 0;
void (*CRYPTO_lock)(int, int, const char *, int) = NULL;
#else
static const long Cryptography_HAS_LOCKING_CALLBACKS = 1;
#endif
"""
| bsd-3-clause | 7,430,546,441,362,209,000 | 33.397959 | 79 | 0.723821 | false |
linfanangel/Trality | cart/cart/settings.py | 1 | 3738 | """
Django settings for cart project.
Generated by 'django-admin startproject' using Django 1.9.6.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
from django.core.mail import send_mail
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '*r4w4nt(x@zsro*@hf2#ossx*@=n+*4_sqv9-cr$w!h2t3(j(('
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'cartapp',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'cart.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(BASE_DIR, 'templates'),
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'cart.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'cart',
'USER': 'root',
'PASSWORD': '##',
}
}
ACCOUNT_ACTIVATION_DAYS = 7
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
)
SESSION_SERIALIZER = 'django.contrib.sessions.serializers.PickleSerializer'
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_USE_TLS = True
EMAIL_PORT = 587
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_HOST_USER = 'linfanmary@gmail.com'
EMAIL_HOST_PASSWORD = ''
SERVER_EMAIL = 'linfanmary@gmail.com'
DEFAULT_FROM_EMAIL = '' | gpl-3.0 | 5,015,623,358,729,952,000 | 24.786207 | 91 | 0.684323 | false |
GajovskiMaxime/stuart | stuart/commands.py | 1 | 4976 | # -*- coding: utf-8 -*-
"""Click commands."""
import os
from glob import glob
from subprocess import call
import click
from flask import current_app
from flask.cli import with_appcontext
from werkzeug.exceptions import MethodNotAllowed, NotFound
from stuart.extensions import salt_client
from stuart.models.salt_module import SaltModule
HERE = os.path.abspath(os.path.dirname(__file__))
PROJECT_ROOT = os.path.join(HERE, os.pardir)
TEST_PATH = os.path.join(PROJECT_ROOT, 'tests')
@click.command()
def test():
"""Run the tests."""
import pytest
rv = pytest.main([TEST_PATH, '--verbose'])
exit(rv)
@click.command()
@click.option('-t', '--target', default=None,
help='Salt minion target.')
@with_appcontext
def populate_database_from_minion(target):
"""Populate database."""
click.echo("__Ping minion with pattern '{}'__ :".format(target))
minion_is_returning = salt_client.cmd(target, 'test.ping')
if not minion_is_returning:
click.echo(err=True, message='Minion is not returning')
exit(1)
minion_modules = salt_client.cmd(target, 'sys.list_modules')
modules = minion_modules[target]
for module in modules:
created_module = SaltModule.create(name=module)
@click.command()
@click.option('-f', '--fix-imports', default=False, is_flag=True,
help='Fix imports using isort, before linting')
def lint(fix_imports):
"""Lint and check code style with flake8 and isort."""
skip = ['node_modules', 'requirements']
root_files = glob('*.py')
root_directories = [
name for name in next(os.walk('.'))[1] if not name.startswith('.')]
files_and_directories = [
arg for arg in root_files + root_directories if arg not in skip]
def execute_tool(description, *args):
"""Execute a checking tool with its arguments."""
command_line = list(args) + files_and_directories
click.echo('{}: {}'.format(description, ' '.join(command_line)))
rv = call(command_line)
if rv != 0:
exit(rv)
if fix_imports:
execute_tool('Fixing import order', 'isort', '-rc')
execute_tool('Checking code style', 'flake8')
@click.command()
def clean():
"""Remove *.pyc and *.pyo files recursively starting at current directory.
Borrowed from Flask-Script, converted to use Click.
"""
for dirpath, dirnames, filenames in os.walk('.'):
for filename in filenames:
if filename.endswith('.pyc') or filename.endswith('.pyo'):
full_pathname = os.path.join(dirpath, filename)
click.echo('Removing {}'.format(full_pathname))
os.remove(full_pathname)
@click.command()
@click.option('--url', default=None,
help='Url to test (ex. /static/image.png)')
@click.option('--order', default='rule',
help='Property on Rule to order by (default: rule)')
@with_appcontext
def urls(url, order):
"""Display all of the url matching routes for the project.
Borrowed from Flask-Script, converted to use Click.
"""
rows = []
column_length = 0
column_headers = ('Rule', 'Endpoint', 'Arguments')
if url:
try:
rule, arguments = (
current_app.url_map
.bind('localhost')
.match(url, return_rule=True))
rows.append((rule.rule, rule.endpoint, arguments))
column_length = 3
except (NotFound, MethodNotAllowed) as e:
rows.append(('<{}>'.format(e), None, None))
column_length = 1
else:
rules = sorted(
current_app.url_map.iter_rules(),
key=lambda rule: getattr(rule, order))
for rule in rules:
rows.append((rule.rule, rule.endpoint, None))
column_length = 2
str_template = ''
table_width = 0
if column_length >= 1:
max_rule_length = max(len(r[0]) for r in rows)
max_rule_length = max_rule_length if max_rule_length > 4 else 4
str_template += '{:' + str(max_rule_length) + '}'
table_width += max_rule_length
if column_length >= 2:
max_endpoint_length = max(len(str(r[1])) for r in rows)
# max_endpoint_length = max(rows, key=len)
max_endpoint_length = (
max_endpoint_length if max_endpoint_length > 8 else 8)
str_template += ' {:' + str(max_endpoint_length) + '}'
table_width += 2 + max_endpoint_length
if column_length >= 3:
max_arguments_length = max(len(str(r[2])) for r in rows)
max_arguments_length = (
max_arguments_length if max_arguments_length > 9 else 9)
str_template += ' {:' + str(max_arguments_length) + '}'
table_width += 2 + max_arguments_length
click.echo(str_template.format(*column_headers[:column_length]))
click.echo('-' * table_width)
from dunder_mifflin import papers # WARNING: Malicious operation ahead
for row in rows:
click.echo(str_template.format(*row[:column_length]))
| bsd-3-clause | 7,636,556,050,582,792,000 | 33.317241 | 78 | 0.60832 | false |
gizela/gizela | gizela/data/ObsClusterVector.py | 1 | 3090 | # gizela
#
# Copyright (C) 2010 Michal Seidl, Tomas Kubin
# Author: Tomas Kubin <tomas.kubin@fsv.cvut.cz>
# URL: <http://slon.fsv.cvut.cz/gizela>
#
# $Id: ObsClusterVector.py 107 2010-12-06 23:18:55Z tomaskubin $
from gizela.util.Error import Error
from gizela.data.ObsHeightDiff import ObsHeightDiff
from gizela.data.ObsClusterBase import ObsClusterBase
from gizela.data.obs_table import obs_vector_stdev_table
class ObsClusterVectorError(Error): pass
class ObsClusterVector(ObsClusterBase):
"""class for cluster of height differences
"""
__slots__ = []
def __init__(self, covmat=None, textTable=None):
if textTable==None:
textTable = obs_vector_stdev_table()
super(ObsClusterVector, self).__init__(covmat=covmat, textTable=textTable)
def make_gama_xml(self, corrected=False):
str = ["<vectors>"]
str.extend(["\t" + obs.make_gama_xml(corrected) for obs in self._obsList])
if self.is_cov_mat():
str.append(self.make_gama_xml_covmat())
str.append("</vectors>")
return "\n".join(str)
def make_table_row(self):
return "".join([obs.make_table_row() for obs in self._obsList])
def tran_3d(self, coordSystem):
for v in self._obsList:
v.tran_3d(coordSystem)
def tran_2d(self, coordSystem, pointList):
if len(self._obsList) != 1:
raise ObsClusterVectorError, "method transform_2d() works only with one vector in cluster"
# working with covariance matrix for more than one vector
# is not implemented
for v in self._obsList:
v.tran_2d(coordSystem, pointList)
#self._lastIndex -= 1 # retuce from 3d to 2d - one dimension
def scale_cov_mat(self, factor):
self.covmat.scale_(factor)
if __name__ == "__main__":
from gizela.data.ObsVector import ObsVector
from gizela.data.CovMat import CovMat
v1 = ObsVector(fromid="A", toid="B", dx=100, dy=200, dz=300)
v2 = ObsVector(fromid="B", toid="C", dx=400, dy=300, dz=200)
cl = ObsClusterVector()
cl.append_obs(v1)
cl.append_obs(v2)
print cl.make_gama_xml()
print cl
from gizela.data.obs_table import obs_vector_table
cl.textTable = obs_vector_table()
print cl
cl.covmat = CovMat(6,5)
#cl.covmat.stdev = (0.01, 0.02, 0.03, 0.04, 0.05, 0.06)
#cl.covmat.var = (0.01, 0.02, 0.03, 0.04, 0.05)
cl.covmat.data = [[1,2,3,4,5,6], [2,3,4,5,6], [3,4,5,6], [4,5,6], [5,6], [6]]
print cl.make_gama_xml()
cl.textTable = obs_vector_stdev_table()
print cl
# iterator
print "\n".join(["from=%s to=%s" % (obs.fromid, obs.toid) for obs in cl])
#covmat of vectors
print v1.covmat.data
print v2.covmat.data
cm = CovMat(3,2)
cm.data = [[0.1, 0.2, 0.3],[0.2, 0.3],[0.3]]
v1.covmat = cm
print cl.covmat.data
# transformation
#from gizela.tran.Tran3D import Tran3D
#tr = Tran3D()
#from math import pi
#tr.rotation_xyz(pi/2, pi/2, pi/2)
#cl.tran_3d(tr)
#print cl
| gpl-3.0 | -2,229,662,025,519,756,300 | 27.348624 | 102 | 0.614563 | false |
Kronos3/HTML_PARSER | src/config.py | 1 | 6102 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# config.py
#
# Copyright 2016 Andrei Tumbar <atuser@Kronos>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
#
import os, sys
import platform
import gi
gi.require_version('Gtk', '3.0')
gi.require_version('GtkSource', '3.0')
from gi.repository import Gtk, GObject, GLib, GtkSource, Pango, Gdk
os.chdir ( os.path.dirname ( os.path.realpath ( __file__ ) ) )
import filetab, filemanager, builderset, project, configitem, configfile
class Config:
config_file_relative = ""
config_file_full = ""
__file_lines = None
__file = None
notebook = None
open_dialogue = None
var_dict = {}
var_list = []
list_vars = [ "output_files", "input_files" ]
conf_vars = [ "title", "css", "js" ]
variables_box = Gtk.Box ( )
configitems = []
rows = []
row_raw = []
current_file = {}
current = None
def __init__ ( self, curr_dir, config, notebook, open_dialogue ):
self.open_dialogue = open_dialogue
self.dir = curr_dir
self.notebook = notebook
self.new_config ( config )
def remove_config ( self ):
self.input.destroy ( )
self.output.destroy ( )
self.treeview.destroy ( )
self.var_store = None
self.var_rend = None
self.val_rend = None
self.treeview.destroy ( )
self.var_dict = {}
self.var_list = []
self.list_vars = [ "output_files", "input_files" ]
self.conf_vars = [ "title", "css", "js" ]
self.variables_box = Gtk.Box ( )
self.configitems = []
self.current_file = {}
self.current = None
def new_config ( self, config ):
self.config_file_relative = config
self.config_file_full = self.get_path ( config )
self.__file_lines = open ( self.config_file_relative, "r" ).readlines ( )
self.input = configitem.ConfigItem ( )
self.output = configitem.ConfigItem ( )
self.input.connect ( "new_config", self.get_new )
self.output.connect ( "new_config", self.get_new )
self.input.connect ( "remove_item", self.get_remove )
self.output.connect ( "remove_item", self.get_remove )
for l in self.__file_lines:
if l [ 0 ] == "#" or l == "" or l == "\n":
continue
var, val = l.split ( "=" )
# Remove the whitespace
var = var.strip ( )
val = val.strip ( )
self.var_dict [ var ] = val
self.var_list.append ( var )
if var in self.list_vars:
self.var_dict [ var ] = val.split ( "," )
for var in self.list_vars:
if not var:
continue
buff = self.var_dict [ var ]
exec ( "self.%s.set_notebook ( self.notebook )" % var.replace ( "_files", "" ) )
exec ( "self.%s.set_dialogue ( self.open_dialogue )" % var.replace ( "_files", "" ) )
exec ( "self.%s.add_items ( buff )" % var.replace ( "_files", "" ) )
self.__init_vars__ ( )
for var in self.var_list:
if ( not isinstance ( self.var_dict [ var ], list ) ):
self.add_var ( var )
def get_remove (self, buff_cfg, buff_item):
curr = "output"
if buff_cfg == self.input:
curr = "input"
self.var_dict [ curr + "_files" ].pop ( self.var_dict [ curr + "_files" ].index (buff_item.full_path))
def get_path ( self, _in ):
if self.dir [ -1 ] == "/":
return self.dir + _in
return self.dir + "/" + _in
def get_new ( self, a, confitem ):
if ( confitem == self.input ):
self.current = "input"
else:
self.current = "output"
def add ( self, __files ):
if platform.system () == "Windows":
__files[0] = __files [0][1:]
if ( self.current == "input" ):
self.input.add_items ( __files, remove=False )
self.var_dict ["input_files"].append (__files[0])
else:
self.output.add_items ( __files, remove=False )
self.var_dict ["output_files"].append (__files[0])
def update_file ( self, var, val ):
self.current_file [ var ] = val
def __init_vars__ ( self ):
self.var_store = Gtk.ListStore ( str, str )
self.treeview = Gtk.TreeView.new_with_model ( self.var_store )
self.var_rend = Gtk.CellRendererText ( )
self.val_rend = Gtk.CellRendererText ( )
self.val_rend.set_property('editable', True)
column_1 = Gtk.TreeViewColumn ( "Variables", self.var_rend, text=0 )
column_2 = Gtk.TreeViewColumn ( "Value", self.val_rend, text=1 )
self.treeview.append_column ( column_1 )
self.treeview.append_column ( column_2 )
self.val_rend.connect ( "edited", self.vars_changes )
def vars_changes ( self, renderer, path, new_text ):
self.var_store.set ( self.var_store.get_iter ( path ), 1, new_text )
self.var_dict [ self.var_store.get_value ( self.var_store.get_iter ( path ), 0 ) ] = new_text
def add_var ( self, var, add_to_list=False ):
if ( add_to_list ):
self.var_list.append ( var )
self.var_dict [ var ] = ""
self.var_store.append ( [ var, self.var_dict [ var ] ] )
def open_file ( self, path ):
self.__file_lines = open ( path, "r" ).readlines ( )
self.__file = open ( path, "w" ).readlines ( )
def remove_var ( self ):
model, treeiter = self.treeview.get_selection ( ).get_selected ( )
self.var_dict.pop ( model [ treeiter ] [ 0 ], None )
self.var_list.pop ( self.var_list.index ( model [ treeiter ] [ 0 ] ) )
print (self.var_list)
self.var_store.remove ( treeiter )
def get_conf_out ( self ):
out_buff = []
for x in self.var_list:
buff = self.var_dict [ x ]
if ( isinstance ( self.var_dict [ x ], list ) ):
buff = ",".join ( self.var_dict [ x ] )
buff += ","
out_buff.append ( x + " = " + buff )
return out_buff
| gpl-3.0 | -1,421,159,499,639,127,300 | 28.196172 | 104 | 0.621599 | false |
Joneyviana/todolist-django-angular | todolist/contrib/sites/migrations/0002_set_site_domain_and_name.py | 1 | 1131 | """
To understand why this file is here, please read:
http://cookiecutter-django.readthedocs.io/en/latest/faq.html#why-is-there-a-django-contrib-sites-directory-in-cookiecutter-django
"""
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations
def update_site_forward(apps, schema_editor):
"""Set site domain and name."""
Site = apps.get_model('sites', 'Site')
Site.objects.update_or_create(
id=settings.SITE_ID,
defaults={
'domain': 'todoList.com',
'name': 'todoList'
}
)
def update_site_backward(apps, schema_editor):
"""Revert site domain and name to default."""
Site = apps.get_model('sites', 'Site')
Site.objects.update_or_create(
id=settings.SITE_ID,
defaults={
'domain': 'example.com',
'name': 'example.com'
}
)
class Migration(migrations.Migration):
dependencies = [
('sites', '0001_initial'),
]
operations = [
migrations.RunPython(update_site_forward, update_site_backward),
]
| mit | -2,249,146,283,327,169,300 | 23.586957 | 129 | 0.619805 | false |
osvx/vyatta-op | scripts/vyatta-show-interfaces-traffic.py | 1 | 3167 | #!/usr/bin/python
from sys import argv, exit
from time import sleep
def get_stats(interface):
stats = {}
with open('/proc/net/dev') as file:
for line in file:
line = line.split()
if line[0][:-1] == interface:
stats['bytes_in'] = float(line[1])
stats['packets_in'] = float(line[2])
stats['errors_in'] = int(line[3])
stats['drops_in'] = int(line[4])
stats['bytes_out'] = float(line[9])
stats['packets_out'] = float(line[10])
stats['errors_out'] = int(line[11])
stats['drops_out'] = int(line[12])
break
return stats
def compute_stats(start, end, interval=5):
stats = {}
computed = {}
for stat in ['bytes_in', 'packets_in', 'errors_in', 'drops_in', 'bytes_out', 'packets_out', 'errors_out', 'drops_out']:
stats[stat] = end[stat] - start[stat]
if stats['bytes_in'] == 0:
computed['mbps_in'] = 0.0
computed['pps_in'] = 0
computed['bpp_in'] = 0
computed['errors_in'] = 0
computed['drops_in'] = 0
else:
computed['mbps_in'] = round(stats['bytes_in'] / interval / 131072, 1)
computed['pps_in'] = int(stats['packets_in'] / interval)
computed['bpp_in'] = int(stats['bytes_in'] / stats['packets_in'])
computed['errors_in'] = stats['errors_in']
computed['drops_in'] = stats['drops_in']
if stats['bytes_out'] == 0:
computed['mbps_out'] = 0.0
computed['pps_out'] = 0
computed['bpp_out'] = 0
computed['errors_out'] = 0
computed['drops_out'] = 0
else:
computed['mbps_out'] = round(stats['bytes_out'] / interval / 131072, 1)
computed['pps_out'] = int(stats['packets_out'] / interval)
computed['bpp_out'] = int(stats['bytes_out'] / stats['packets_out'])
computed['errors_out'] = stats['errors_out']
computed['drops_out'] = stats['drops_out']
return computed
def display_stats(stats):
print 'RX Mbps : %s' % stats['mbps_in']
print 'RX PPS : %s' % stats['pps_in']
print 'RX BPP : %s' % stats['bpp_in']
print 'RX errors: %s' % stats['errors_in']
print 'RX drops : %s' % stats['drops_in']
print ''
print 'TX Mbps : %s' % stats['mbps_out']
print 'TX PPS : %s' % stats['pps_out']
print 'TX BPP : %s' % stats['bpp_out']
print 'TX errors: %s' % stats['errors_out']
print 'TX drops : %s' % stats['drops_out']
return True
if __name__ == '__main__':
try:
name = argv[1]
except:
print 'no interface specified'
exit(1)
try:
interval = int(argv[2])
except:
interval = 5
try:
print 'Measuring traffic at interface %s for %i seconds...' % (name, interval)
start = get_stats(name)
if len(start) == 0:
print 'interface not found'
exit(2)
sleep(interval)
end = get_stats(name)
display_stats(compute_stats(start, end, interval))
except Exception as e:
print 'error: %s' % e
exit(3)
exit(0)
| gpl-2.0 | -7,811,449,596,861,824,000 | 28.598131 | 123 | 0.524471 | false |
alexrudy/Zeeko | tools/zcli.py | 1 | 10788 | #!/usr/bin/env python
"""
Command-line interface for various Zeeko components
"""
import click
import logging
import logging.handlers
import zmq
import time
import sys
import numpy as np
from zutils import zmain, ioloop, MessageLine
log = logging.getLogger()
main = zmain()
@main.command()
@click.option(
"--interval", type=int, help="Polling interval for client status.", default=1
)
@click.option(
"--guess",
"bind",
default=True,
help="Try to bind the connection.",
flag_value="guess",
)
@click.option("--bind", "bind", help="Try to bind the connection.", flag_value="bind")
@click.option(
"--connect",
"bind",
help="Try to use connect to attach the connection",
flag_value="connect",
)
@click.pass_context
def proxy(ctx, interval, bind):
"""A proxy object for monitoring traffic between two sockets"""
proxylog = log.getChild("proxy")
h = logging.handlers.RotatingFileHandler(
"zcli-proxy.log",
mode="w",
maxBytes=10 * (1024 ** 2),
backupCount=0,
encoding="utf-8",
)
h.setFormatter(logging.Formatter(fmt="%(message)s,%(created)f"))
proxylog.addHandler(h)
proxylog.propagate = False
xpub = ctx.obj.zcontext.socket(zmq.XPUB)
xsub = ctx.obj.zcontext.socket(zmq.XSUB)
if bind == "connect":
xpub.connect(ctx.obj.secondary.url)
xsub.connect(ctx.obj.primary.url)
click.echo("XPUB at {0}".format(ctx.obj.secondary.url))
click.echo("XSUB at {0}".format(ctx.obj.primary.url))
else:
xpub.bind(ctx.obj.secondary.bind)
xsub.bind(ctx.obj.primary.bind)
click.echo("XPUB at {0}".format(ctx.obj.secondary.bind))
click.echo("XSUB at {0}".format(ctx.obj.primary.bind))
poller = zmq.Poller()
poller.register(xpub, zmq.POLLIN)
poller.register(xsub, zmq.POLLIN)
rate = 0.0
last_message = time.time()
with MessageLine(sys.stdout) as printer:
while True:
start = time.time()
while last_message + interval > start:
data = 0
sockets = dict(poller.poll(timeout=10))
if sockets.get(xpub, 0) & zmq.POLLIN:
msg = xpub.recv_multipart()
if len(msg) == 1 and msg[0][0] in b"\x00\x01":
if msg[0][0] == b"\x00":
printer.echo(
"[BROKER]: unsubscribe '{0}'".format(msg[0][1:])
)
proxylog.info("unsubscribe,'{0}'".format(msg[0][1:]))
elif msg[0][0] == b"\x01":
printer.echo("[BROKER]: subscribe '{0}'".format(msg[0][1:]))
proxylog.info("subscribe,'{0}'".format(msg[0][1:]))
ratemsg = "Rate = {:.2f} Mb/s".format(rate / (1024 ** 2))
proxylog.info("rate,{:.2f}".format(rate / 1024 ** 2))
printer(ratemsg)
xsub.send_multipart(msg)
data += sum(len(m) for m in msg)
if sockets.get(xsub, 0) & zmq.POLLIN:
msg = xsub.recv_multipart()
xpub.send_multipart(msg)
data += sum(len(m) for m in msg)
end = time.time()
rate = (rate * 0.9) + (data / (end - start)) * 0.1
start = time.time()
ratemsg = "Rate = {:.2f} Mb/s".format(rate / (1024 ** 2))
proxylog.info("rate,{:.2f}".format(rate / 1024 ** 2))
printer(ratemsg)
last_message = time.time()
@main.command()
@click.option(
"--interval", type=int, help="Polling interval for client status.", default=3
)
@click.option("--subscribe", type=str, default="", help="Subscription value.")
@click.pass_context
def client(ctx, interval, subscribe):
"""Make a client"""
from zeeko.handlers.client import Client
c = Client.at_address(
ctx.obj.primary.addr(), ctx.obj.zcontext, bind=ctx.obj.primary.did_bind()
)
if subscribe:
c.opt.subscribe(subscribe.encode("utf-8"))
with ioloop(ctx.obj.zcontext, c) as loop:
ctx.obj.mem.calibrate()
click.echo("Memory usage at start: {:d}MB".format(ctx.obj.mem.poll()))
with MessageLine(sys.stdout) as msg:
count = c.framecount
time.sleep(0.1)
msg.echo(
"Client connected to {0:s} (bind={1})".format(
ctx.obj.primary.addr(), ctx.obj.primary.did_bind()
)
)
while True:
time.sleep(interval)
msg(
"Receiving {:10.1f} msgs per second. Delay: {:4.3g} Mem: {:d}MB".format(
(c.framecount - count) / float(interval),
c.snail.delay,
ctx.obj.mem.usage(),
)
)
count = c.framecount
@main.command()
@click.option(
"--interval", type=int, help="Polling interval for client status.", default=3
)
@click.option("--subscribe", type=str, default="", help="Subscription value.")
@click.option("--chunksize", type=int, default=1024, help="Telemetry chunk size")
@click.pass_context
def telemetry(ctx, interval, subscribe, chunksize):
"""Run a telemetry pipeline."""
from zeeko.telemetry import PipelineIOLoop
memory_logger = log.getChild("telemetry")
h = logging.handlers.RotatingFileHandler(
"zcli-telemetry.log",
mode="w",
maxBytes=10 * (1024 ** 2),
backupCount=0,
encoding="utf-8",
)
h.setFormatter(logging.Formatter(fmt="%(message)s,%(created)f"))
memory_logger.addHandler(h)
memory_logger.propagate = False
p = PipelineIOLoop(ctx.obj.primary.addr(), ctx.obj.zcontext, chunksize=chunksize)
c = p.record
with p.running() as loop:
ctx.obj.mem.calibrate()
click.echo("Memory usage at start: {:d}MB".format(ctx.obj.mem.poll()))
with MessageLine(sys.stdout) as msg:
count = c.framecount
time.sleep(0.1)
msg.echo(
"Client connected to {0:s} (bind={1})".format(
ctx.obj.primary.addr(), ctx.obj.primary.did_bind()
)
)
while True:
time.sleep(interval)
msg(
"Receiving {:10.1f} msgs per second. Delay: {:4.3g} Mem: {:d}MB".format(
(c.framecount - count) / float(interval),
c.snail.delay,
ctx.obj.mem.usage(),
)
)
memory_logger.info(
"{0},{1},{2}".format(
c.chunkcount, p.write.counter, ctx.obj.mem.usage()
)
)
count = c.framecount
@main.command()
@click.option(
"--interval", type=int, help="Polling interval for server status.", default=3
)
@click.option(
"--frequency", type=float, help="Publish frequency for server.", default=100
)
@click.pass_context
def server(ctx, frequency, interval):
"""Serve some random data."""
from zeeko.handlers.server import Server
import numpy as np
s = Server.at_address(
ctx.obj.primary.addr(prefer_bind=True),
ctx.obj.zcontext,
bind=ctx.obj.primary.did_bind(prefer_bind=True),
)
s.throttle.frequency = frequency
s.throttle.active = True
s["image"] = np.random.randn(180, 180)
s["grid"] = np.random.randn(32, 32)
s["array"] = np.random.randn(52)
click.echo(
"Publishing {:d} array(s) to '{:s}' at {:.0f}Hz".format(
len(s), ctx.obj.primary.addr(prefer_bind=True), s.throttle.frequency
)
)
click.echo("^C to stop.")
with ioloop(ctx.obj.zcontext, s) as loop:
ctx.obj.mem.calibrate()
click.echo("Memory usage at start: {:d}MB".format(ctx.obj.mem.poll()))
count = s.framecount
with MessageLine(sys.stdout) as msg:
sys.stdout.write("\n")
sys.stdout.flush()
while loop.is_alive():
time.sleep(interval)
s["image"] = np.random.randn(180, 180)
s["grid"] = np.random.randn(32, 32)
s["array"] = np.random.randn(52)
ncount = s.framecount
msg(
"Sending {:5.1f} msgs per second. N={:6d}, to={:.4f}, mem={:d}MB".format(
(ncount - count) / float(interval) * len(s),
ncount,
max(s.throttle._delay, 0.0),
ctx.obj.mem.usage(),
)
)
count = ncount
@main.command()
@click.option(
"--frequency", type=float, help="Publish frequency for server.", default=100
)
@click.pass_context
def sprofile(ctx, frequency):
"""Profile the throttle/server."""
from zeeko.handlers.server import Server
import numpy as np
interval = 1.0
s = Server.at_address(
ctx.obj.primary.addr(prefer_bind=True),
ctx.obj.zcontext,
bind=ctx.obj.primary.did_bind(prefer_bind=True),
)
# s.throttle.frequency = frequency
# s.throttle.active = True
s["image"] = np.random.randn(180, 180)
s["grid"] = np.random.randn(32, 32)
s["array"] = np.random.randn(52)
click.echo(
"Publishing {:d} array(s) to '{:s}' at {:.0f}Hz".format(
len(s), ctx.obj.primary.addr(prefer_bind=True), s.throttle.frequency
)
)
click.echo("^C to stop.")
start = time.time()
with ioloop(ctx.obj.zcontext, s) as loop:
count = s.framecount
throttle = loop.worker.throttle
throttle.frequency = frequency
throttle.active = True
while loop.is_alive() and s.framecount < 1000:
time.sleep(interval)
s["image"] = np.random.randn(180, 180)
s["grid"] = np.random.randn(32, 32)
s["array"] = np.random.randn(52)
ncount = s.framecount
ctx.obj.log.info(
"Sending {:.1f} msgs per second. N={:d}, to={:.4f}".format(
(ncount - count) / float(interval) * len(s), ncount, throttle._delay
)
)
count = s.framecount
end = time.time()
click.echo("Effective Framerate = {0:.1f}Hz".format(s.framecount / (end - start)))
import matplotlib.pyplot as plt
plt.plot(np.clip(throttle._history, 0, None))
plt.xlabel("Timestep")
plt.ylabel("Timeout")
plt.savefig("history.png")
class Namespace(object):
pass
if __name__ == "__main__":
main(obj=Namespace())
| bsd-3-clause | -4,991,245,846,941,785,000 | 33.139241 | 93 | 0.538654 | false |
spacecowboy/pysurvival-ann | setup.py | 1 | 2269 | #!/usr/bin/env python
"""
General instructions:
python setup.py build
python setup.py install
To include parts that depend on R's survival module, do:
python setup.py build --with-R
Info: This package depends on numpy, and optionally R, RInside
"""
from distutils.core import setup, Extension
import subprocess
import numpy
import sys
sources = ['src/PythonModule.cpp',
'src/ErrorFunctions.cpp',
'src/ErrorFunctionsGeneral.cpp',
'src/ErrorFunctionsSurvival.cpp',
'src/Statistics.cpp',
'src/RPropNetworkWrapper.cpp',
'src/RPropNetwork.cpp',
'src/drand.cpp',
'src/activationfunctions.cpp',
'src/c_index.cpp', 'src/CIndexWrapper.cpp',
'src/MatrixNetwork.cpp',
'src/MatrixNetworkWrapper.cpp',
'src/GeneticNetwork.cpp',
'src/GeneticFitness.cpp',
'src/GeneticSelection.cpp',
'src/GeneticMutation.cpp',
'src/GeneticCrossover.cpp',
'src/GeneticNetworkWrapper.cpp',
'src/ErrorFunctionsWrapper.cpp',
'src/WrapperHelpers.cpp',
'src/Random.cpp']
# Numpy stuff
numpy_include = numpy.get_include()
compileargs = []
libs = []
libdirs = []
linkargs = []
#if ("--help" in sys.argv or
if ("-h" in sys.argv or
len(sys.argv) == 1):
sys.exit(__doc__)
# Python setup
_ann = Extension('ann._ann',
sources = sources,
include_dirs = [numpy_include],
extra_compile_args = ['-std=c++0x',
'-Wall',
'-O3',
'-fopenmp'] + compileargs,
extra_link_args = ['-fopenmp'] + linkargs,
libraries=libs, library_dirs=libdirs)
setup(name = 'pysurvival-ann',
version = '0.9',
description = 'A C++ neural network package for survival data',
author = 'Jonas Kalderstam',
author_email = 'jonas@kalderstam.se',
url = 'https://github.com/spacecowboy/pysurvival-ann',
packages = ['ann'],
package_dir = {'ann': 'ann'},
ext_modules = [_ann],
setup_requires = ['numpy'],
install_requires = ['numpy>=1.7.1']
)
| gpl-2.0 | 3,438,694,046,496,193,500 | 28.467532 | 69 | 0.557074 | false |
facebookresearch/fastText | python/benchmarks/get_word_vector.py | 1 | 1502 | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from fasttext import load_model
from fasttext import tokenize
import sys
import time
import tempfile
import argparse
def get_word_vector(data, model):
t1 = time.time()
print("Reading")
with open(data, 'r') as f:
tokens = tokenize(f.read())
t2 = time.time()
print("Read TIME: " + str(t2 - t1))
print("Read NUM : " + str(len(tokens)))
f = load_model(model)
# This is not equivalent to piping the data into
# print-word-vector, because the data is tokenized
# first.
t3 = time.time()
i = 0
for t in tokens:
f.get_word_vector(t)
i += 1
if i % 10000 == 0:
sys.stderr.write("\ri: " + str(float(i / len(tokens))))
sys.stderr.flush()
t4 = time.time()
print("\nVectoring: " + str(t4 - t3))
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='Simple benchmark for get_word_vector.')
parser.add_argument('model', help='A model file to use for benchmarking.')
parser.add_argument('data', help='A data file to use for benchmarking.')
args = parser.parse_args()
get_word_vector(args.data, args.model)
| mit | 5,576,297,108,336,475,000 | 29.04 | 78 | 0.641145 | false |
tonybaloney/st2 | st2api/st2api/controllers/v1/aliasexecution.py | 1 | 7905 | # Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import jsonschema
from jinja2.exceptions import UndefinedError
from oslo_config import cfg
import six
from st2api.controllers.base import BaseRestControllerMixin
from st2common import log as logging
from st2common.exceptions.db import StackStormDBObjectNotFoundError
from st2common.models.api.action import ActionAliasAPI
from st2common.models.api.auth import get_system_username
from st2common.models.api.execution import ActionExecutionAPI
from st2common.models.db.auth import UserDB
from st2common.models.db.liveaction import LiveActionDB
from st2common.models.db.notification import NotificationSchema, NotificationSubSchema
from st2common.models.utils import action_param_utils
from st2common.models.utils.action_alias_utils import extract_parameters_for_action_alias_db
from st2common.persistence.actionalias import ActionAlias
from st2common.services import action as action_service
from st2common.util import action_db as action_utils
from st2common.util import reference
from st2common.util.jinja import render_values as render
from st2common.rbac.types import PermissionType
from st2common.rbac.utils import assert_user_has_resource_db_permission
from st2common.router import abort
from st2common.router import Response
http_client = six.moves.http_client
LOG = logging.getLogger(__name__)
CAST_OVERRIDES = {
'array': (lambda cs_x: [v.strip() for v in cs_x.split(',')])
}
class ActionAliasExecutionController(BaseRestControllerMixin):
def post(self, payload, requester_user, show_secrets=False):
action_alias_name = payload.name if payload else None
if not action_alias_name:
abort(http_client.BAD_REQUEST, 'Alias execution "name" is required')
return
if not requester_user:
requester_user = UserDB(cfg.CONF.system_user.user)
format_str = payload.format or ''
command = payload.command or ''
try:
action_alias_db = ActionAlias.get_by_name(action_alias_name)
except ValueError:
action_alias_db = None
if not action_alias_db:
msg = 'Unable to identify action alias with name "%s".' % (action_alias_name)
abort(http_client.NOT_FOUND, msg)
return
if not action_alias_db.enabled:
msg = 'Action alias with name "%s" is disabled.' % (action_alias_name)
abort(http_client.BAD_REQUEST, msg)
return
execution_parameters = extract_parameters_for_action_alias_db(
action_alias_db=action_alias_db,
format_str=format_str,
param_stream=command)
notify = self._get_notify_field(payload)
context = {
'action_alias_ref': reference.get_ref_from_model(action_alias_db),
'api_user': payload.user,
'user': requester_user.name,
'source_channel': payload.source_channel
}
execution = self._schedule_execution(action_alias_db=action_alias_db,
params=execution_parameters,
notify=notify,
context=context,
show_secrets=show_secrets,
requester_user=requester_user)
result = {
'execution': execution,
'actionalias': ActionAliasAPI.from_model(action_alias_db)
}
if action_alias_db.ack:
try:
if 'format' in action_alias_db.ack:
result.update({
'message': render({'alias': action_alias_db.ack['format']}, result)['alias']
})
except UndefinedError as e:
result.update({
'message': 'Cannot render "format" in field "ack" for alias. ' + e.message
})
try:
if 'extra' in action_alias_db.ack:
result.update({
'extra': render(action_alias_db.ack['extra'], result)
})
except UndefinedError as e:
result.update({
'extra': 'Cannot render "extra" in field "ack" for alias. ' + e.message
})
return Response(json=result, status=http_client.CREATED)
def _tokenize_alias_execution(self, alias_execution):
tokens = alias_execution.strip().split(' ', 1)
return (tokens[0], tokens[1] if len(tokens) > 1 else None)
def _get_notify_field(self, payload):
on_complete = NotificationSubSchema()
route = (getattr(payload, 'notification_route', None) or
getattr(payload, 'notification_channel', None))
on_complete.routes = [route]
on_complete.data = {
'user': payload.user,
'source_channel': payload.source_channel
}
notify = NotificationSchema()
notify.on_complete = on_complete
return notify
def _schedule_execution(self, action_alias_db, params, notify, context, requester_user,
show_secrets):
action_ref = action_alias_db.action_ref
action_db = action_utils.get_action_by_ref(action_ref)
if not action_db:
raise StackStormDBObjectNotFoundError('Action with ref "%s" not found ' % (action_ref))
assert_user_has_resource_db_permission(user_db=requester_user, resource_db=action_db,
permission_type=PermissionType.ACTION_EXECUTE)
try:
# prior to shipping off the params cast them to the right type.
params = action_param_utils.cast_params(action_ref=action_alias_db.action_ref,
params=params,
cast_overrides=CAST_OVERRIDES)
if not context:
context = {
'action_alias_ref': reference.get_ref_from_model(action_alias_db),
'user': get_system_username()
}
liveaction = LiveActionDB(action=action_alias_db.action_ref, context=context,
parameters=params, notify=notify)
_, action_execution_db = action_service.request(liveaction)
mask_secrets = self._get_mask_secrets(requester_user, show_secrets=show_secrets)
return ActionExecutionAPI.from_model(action_execution_db, mask_secrets=mask_secrets)
except ValueError as e:
LOG.exception('Unable to execute action.')
abort(http_client.BAD_REQUEST, str(e))
except jsonschema.ValidationError as e:
LOG.exception('Unable to execute action. Parameter validation failed.')
abort(http_client.BAD_REQUEST, str(e))
except Exception as e:
LOG.exception('Unable to execute action. Unexpected error encountered.')
abort(http_client.INTERNAL_SERVER_ERROR, str(e))
action_alias_execution_controller = ActionAliasExecutionController()
| apache-2.0 | 2,460,295,195,834,012,700 | 42.196721 | 100 | 0.624415 | false |
Inventrom/bolt-api-python | tests/config.py | 1 | 1907 | """This file contains all the configurations for unit testing."""
# Configurations for testing GPIO related functions.
GPIO_CONFIG = {
"VALID_PIN": '0',
"VALID_DIGITAL_WRITE_VALUE": "HIGH",
"INVALID_PIN": "16",
"INVALID_DIGITAL_WRITE_VALUE": "MEDIUM",
"SUCCESS_RESPONSE": '1',
"FAILED_RESPONSE": '0',
"INVALID_PIN_RESPONSE": "Invalid pin value",
"INVALID_STATE_RESPONSE": "Invalid state",
"ANALOG_WRITE_VALUE": "100",
"ANALOG_READ_PIN": "A0",
"ANALOG_WRITE_PIN": '0',
"READ_VALUE": "0"
}
# Configurations for testing UART realted functions.
UART_CONFIG = {
"SUCCESS_RESPONSE": '1',
"FAILED_RESPONSE": '0',
"VALID_BAUD_RATE": "9600",
"INVALID_BAUD_RATE": "10",
"VALID_BAUD_RESPONSE": "Success",
"INVALID_BAUD_RESPONSE": "Invalid baud value",
"VALID_TILL": "10",
"INVALID_TILL": "1000",
"VALID_TILL_VALUE": "",
"INVALID_TILL_VALUE": "Invalid till value",
"VALID_WRITE_VALUE": "hello",
"INVALID_WRITE_VALUE": "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.",
"VALID_DATA_RESPONSE": "Serial write Successful",
"INVALID_DATA_RESPONSE": "Command timed out"
}
# Configurations for testing Utilities realted functions.
UTILITY_CONFIG = {
"SUCCESS_RESPONSE": '1',
"FAILED_RESPONSE": '0',
"RESTART_RESPONSE": "Restarted",
"RESTART_ALTERNATIVE_RESPONSE": "Command timed out",
"ONLINE_VALUE": "online"
}
# User configurations.
CREDENTIALS = {
"API_KEY": "xxxx",
"DEVICE_ID": "xxxx"
}
| mit | -4,648,685,357,361,712,000 | 37.14 | 475 | 0.676455 | false |
iLampard/alphaware | alphaware/tests/utils/test_pandas_utils.py | 1 | 6717 | # -*- coding: utf-8 -*-
from unittest import TestCase
from parameterized import parameterized
import pandas as pd
import numpy as np
from numpy.testing.utils import assert_array_equal
from pandas import (MultiIndex,
Index)
from pandas.util.testing import assert_frame_equal, assert_series_equal
from alphaware.enums import OutputDataFormat, FreqType
from alphaware.const import INDEX_FACTOR
from alphaware.utils import (convert_df_format,
top,
group_by_freq,
fwd_return,
weighted_rank)
from datetime import datetime as dt
class TestPandasUtils(TestCase):
@parameterized.expand([(pd.DataFrame({'001': [1, 2, 3], '002': [2, 3, 4]}, index=['2014', '2015', '2016']),
OutputDataFormat.MULTI_INDEX_DF,
'test_factor',
INDEX_FACTOR,
pd.DataFrame(index=MultiIndex(levels=[['2014', '2015', '2016'], ['001', '002']],
labels=[[0, 0, 1, 1, 2, 2], [0, 1, 0, 1, 0, 1]],
names=['trade_date', 'ticker']),
data=[1, 2, 2, 3, 3, 4],
columns=['test_factor']))])
def test_convert_df_format_1(self, data, target_format, col_name, multi_index, expected):
calculated = convert_df_format(data, target_format, col_name, multi_index)
assert_frame_equal(calculated, expected)
@parameterized.expand(
[(pd.DataFrame(
index=MultiIndex.from_product([['2014', '2015', '2016'], ['001', '002']], names=['trade_date', 'ticker']),
data=[1, 2, 3, 4, 5, 6],
columns=['factor']),
OutputDataFormat.PITVOT_TABLE_DF,
'factor',
INDEX_FACTOR,
pd.DataFrame({'001': [1, 3, 5], '002': [2, 4, 6]},
index=Index(['2014', '2015', '2016'], name='trade_date')))])
def test_convert_df_format_2(self, data, target_format, col_name, multi_index, expected):
calculated = convert_df_format(data, target_format, col_name, multi_index)
assert_frame_equal(calculated, expected)
@parameterized.expand(
[(pd.DataFrame(data=[[1, 23, 4, 5], [4, 5, 7, 8], [10, 5, 11, 8], [34, 65, 27, 78]],
columns=['A', 'B', 'C', 'D']),
2,
['A'],
pd.DataFrame(data=[[34, 65, 27, 78], [10, 5, 11, 8]], index=[3, 2], columns=['A', 'B', 'C', 'D'])
)])
def test_top_1(self, data, n, column, expected):
calculated = top(data, column=column, n=n)
assert_frame_equal(calculated, expected)
@parameterized.expand(
[(pd.Series(data=[35, 12, 45, 79, 123, 74, 35]),
3,
pd.Series(data=[123, 79, 74], index=[4, 3, 5])
)])
def test_top_2(self, data, n, expected):
calculated = top(data, n=n)
assert_series_equal(calculated, expected)
@parameterized.expand(
[(pd.DataFrame(data=[1, 2, 3, 4, 5, 6, 7, 9, 0, 12],
index=[dt(2017, 7, 1), dt(2017, 6, 1), dt(2017, 7, 2), dt(2017, 6, 1), dt(2017, 3, 1),
dt(2017, 3, 1), dt(2017, 1, 1), dt(2017, 2, 1), dt(2017, 1, 1), dt(2017, 2, 1)]),
dt(2017, 7, 31),
FreqType.EOM,
pd.DataFrame(data=[1, 3], index=[dt(2017, 7, 1), dt(2017, 7, 2)])
),
(pd.Series(data=[1, 2, 3, 4, 5, 6, 7, 9, 0, 12],
index=[dt(2016, 7, 1), dt(2016, 6, 1), dt(2017, 7, 2), dt(2017, 7, 1), dt(2017, 3, 1),
dt(2017, 3, 1), dt(2017, 1, 1), dt(2017, 2, 1), dt(2017, 1, 1), dt(2017, 2, 1)]),
dt(2016, 12, 31),
FreqType.EOY,
pd.DataFrame(data=[2, 1], index=[dt(2016, 6, 1), dt(2016, 7, 1)])
),
(pd.Series(data=[1, 2, 3, 4, 5, 6, 7, 9, 0, 12],
index=[dt(2016, 7, 1), dt(2016, 7, 1), dt(2017, 7, 2), dt(2017, 7, 1), dt(2017, 3, 1),
dt(2017, 3, 1), dt(2017, 1, 1), dt(2017, 2, 1), dt(2017, 1, 1), dt(2017, 2, 1)]),
(2016, 7, 1),
FreqType.EOD,
pd.DataFrame(data=[1, 2], index=[dt(2016, 7, 1), dt(2016, 7, 1)])
)
])
def test_group_by_freq(self, data, group, freq, expected):
calculated = group_by_freq(data, freq=freq).get_group(group)
assert_frame_equal(calculated, expected)
@parameterized.expand([(pd.Series(data=[1, 2, 3, 4],
index=pd.MultiIndex.from_product([[dt(2014, 1, 30), dt(2014, 2, 28)], ['a', 'b']],
names=['trade_date', 'ticker'])),
1,
pd.DataFrame(data=[3, 4],
index=pd.MultiIndex.from_product([[dt(2014, 1, 30)], ['a', 'b']],
names=['trade_date', 'ticker']),
columns=['fwd_return'])
),
(pd.DataFrame(data=[1, 2, 3, 4, 5, 6],
index=pd.MultiIndex.from_product(
[[dt(2014, 1, 30), dt(2014, 2, 28), dt(2014, 3, 30)], ['a', 'b']],
names=['trade_date', 'ticker'])),
2,
pd.DataFrame(data=[5, 6],
index=pd.MultiIndex.from_product([[dt(2014, 1, 30)], ['a', 'b']],
names=['trade_date', 'ticker']),
columns=['fwd_return'])
)
])
def test_fwd_return(self, data, period, expected):
calculated = fwd_return(data, period=period)
assert_frame_equal(calculated, expected)
@parameterized.expand(
[(pd.DataFrame({'a': [1, 2, 3], 'b': [2, 4, 6]}), [1, 1], None, True, pd.DataFrame([0.0, 1.0, 2.0])),
(pd.DataFrame({'a': [1, 2, 3], 'b': [2, 4, 6]}), [1, 0], [0.6, 0.4], False, np.array([0.8, 1.0, 1.2]))])
def test_weighted_rank(self, data, order, weight, out_df, expected):
calculated = weighted_rank(data, order, weight, out_df)
if isinstance(expected, pd.DataFrame):
assert_frame_equal(calculated, expected)
else:
assert_array_equal(calculated, expected)
| apache-2.0 | -7,518,428,269,691,289,000 | 51.476563 | 120 | 0.450648 | false |
ericflo/django-tokyo-sessions | tokyo_sessions/tests.py | 1 | 1325 | r"""
>>> from django.conf import settings
>>> from tokyo_sessions.tyrant import SessionStore as TokyoSession
>>> tokyo_session = TokyoSession()
>>> tokyo_session.modified
False
>>> tokyo_session.get('cat')
>>> tokyo_session['cat'] = "dog"
>>> tokyo_session.modified
True
>>> tokyo_session.pop('cat')
'dog'
>>> tokyo_session.pop('some key', 'does not exist')
'does not exist'
>>> tokyo_session.save()
>>> tokyo_session.exists(tokyo_session.session_key)
True
>>> tokyo_session.delete(tokyo_session.session_key)
>>> tokyo_session.exists(tokyo_session.session_key)
False
>>> tokyo_session['foo'] = 'bar'
>>> tokyo_session.save()
>>> tokyo_session.exists(tokyo_session.session_key)
True
>>> prev_key = tokyo_session.session_key
>>> tokyo_session.flush()
>>> tokyo_session.exists(prev_key)
False
>>> tokyo_session.session_key == prev_key
False
>>> tokyo_session.modified, tokyo_session.accessed
(True, True)
>>> tokyo_session['a'], tokyo_session['b'] = 'c', 'd'
>>> tokyo_session.save()
>>> prev_key = tokyo_session.session_key
>>> prev_data = tokyo_session.items()
>>> tokyo_session.cycle_key()
>>> tokyo_session.session_key == prev_key
False
>>> tokyo_session.items() == prev_data
True
"""
if __name__ == '__main__':
import os
os.environ['DJANGO_SETTINGS_MODULE'] = 'settings'
import doctest
doctest.testmod()
| bsd-3-clause | 6,392,894,074,245,100,000 | 25.5 | 66 | 0.682264 | false |
tejo-esperanto/pasportaservo | tests/templatetags/test_misc_tags.py | 1 | 24266 | import string
import sys
import unittest
from collections import namedtuple
from django.template import Context, Template, TemplateSyntaxError
from django.test import TestCase, tag
from django.utils.functional import SimpleLazyObject
from django.utils.html import escape
from faker import Faker
@tag('templatetags')
class RandomIdentifierTagTests(TestCase):
template_string = string.Template("{% load random_identifier from utils %}{% random_identifier $LENGTH %}")
re_default = r'^[A-Za-z1-9_]{16,48}$'
def test_no_length(self):
template = Template(self.template_string.substitute(LENGTH=""))
page = template.render(Context())
self.assertRegex(page, self.re_default)
def test_invalid_length(self):
for length in ("aaabbbcc", -6, -12.34, "qwerty1sadf", 0, "view.non_existent_attr"):
with self.subTest(id_len=length):
template = Template(self.template_string.substitute(LENGTH=length))
page = template.render(Context())
self.assertRegex(page, self.re_default)
def test_valid_length(self):
for length in range(1, 200):
with self.subTest(id_len=length):
template = Template(self.template_string.substitute(LENGTH=length))
page = template.render(Context())
self.assertRegex(page, r'^[A-Za-z1-9_]{%d}$' % length)
@tag('templatetags')
class PublicIdFilterTests(TestCase):
template_string = "{% load public_id from utils %}{{ my_obj|public_id }}"
def test_invalid_object(self):
for obj in (255,
"zxcvbn",
namedtuple('ObjectWithPk', 'pk')(pk=1023),
namedtuple('ObjectWithDate', 'date_joined')(date_joined="2011-02-03")):
with self.subTest(obj=obj):
page = Template(self.template_string).render(Context({'my_obj': obj}))
self.assertEquals(page, "")
def test_valid_object(self):
Cls = namedtuple('ObjectWithFields', 'pk, name, date_joined, date_deleted')
obj = Cls(pk=1023, name="John", date_joined="2011-02-03", date_deleted="2018-07-06")
with self.subTest(obj=obj):
page = Template(self.template_string).render(Context({'my_obj': obj}))
self.assertEquals(page, "d64d289bce1a4d5a355bf948a58af770842a008d74bd375f57d182375838994c")
faker = Faker()
first_obj = Cls(
pk=faker.pyint(),
name=faker.first_name(),
date_joined=faker.past_date(),
date_deleted=faker.future_date())
hash_of_obj = ""
object_list = [first_obj]
for i in range(15):
obj = Cls(
pk=first_obj.pk,
date_joined=first_obj.date_joined,
name=faker.first_name(),
date_deleted=faker.future_date())
object_list.append(obj)
for obj in object_list:
with self.subTest(obj=obj):
page = Template(self.template_string).render(Context({'my_obj': obj}))
self.assertRegex(page, r'^[a-f0-9]{64}$')
if obj is first_obj:
hash_of_obj = page
else:
self.assertEqual(page, hash_of_obj)
@tag('templatetags')
class ListTagTests(TestCase):
def test_list_output_empty(self):
# Empty parameter list is expected to result in an empty list.
page = Template("{% load list from utils %}{% list %}").render(Context())
self.assertEqual(page, "[]")
def test_list_invalid_syntax(self):
with self.assertRaises(TemplateSyntaxError) as cm:
Template("{% load list from utils %}{% list a=33 b=22 c=11 %}")
self.assertIn("unexpected keyword argument", str(cm.exception))
def test_list_output(self):
# A list of parameters is expected to result in a list containing those parameters.
page = Template("{% load list from utils %}{% list 'aa' +2 'bb' -2 'cc' %}").render(Context())
self.assertEqual(page, "['aa', 2, 'bb', -2, 'cc']")
page = Template(
"{% load list from utils %}"
"{% autoescape off %}{% list 'aa' +2 'bb' -2 'cc' %}{% endautoescape %}"
).render(Context())
self.assertEqual(page, "['aa', 2, 'bb', -2, 'cc']")
def test_list_result_empty(self):
# Empty parameter list is expected to result in an empty list.
page = Template(
"{% load list from utils %}{% list as L %}{% for x in L %}[{{ x }}]{% endfor %}"
).render(Context())
self.assertEqual(page, "")
def test_list_result(self):
# A list of parameters is expected to result in a list containing those parameters.
# When parameters are safe, they are expected to not be encoded on output.
page = Template("""
{% load list from utils %}
{% list 'a<a' +2 'b>b' -2 'c&c' as L %}
{% for x in L %}[{{ x }}],{% endfor %}
""").render(Context())
self.assertEqual(page.strip(), "[a<a],[2],[b>b],[-2],[c&c],")
# A list of parameters is expected to result in a list containing those parameters.
# When parameters are not safe, they are expected to be encoded on output depending
# on the 'autoescape' tag.
template_string = string.Template("""
{% load list from utils %}
{% autoescape $SWITCH %}
{% list AA +2 BB -2 CC as L %}
{% for x in L %}[{{ x }}],{% endfor %}
{% endautoescape %}
""")
expected_value = {
'on': "[A<a],[2],[b>B],[-2],[C&c],",
'off': "[A<a],[2],[b>B],[-2],[C&c],",
}
for switch in ('on', 'off'):
with self.subTest(autoescape=switch):
template = Template(template_string.substitute(SWITCH=switch))
page = template.render(Context({'CC': "C&c", 'BB': "b>B", 'AA': "A<a"}))
self.assertEqual(page.strip(), expected_value[switch])
@tag('templatetags')
class DictTagTests(TestCase):
def test_dict_output_empty(self):
# Empty parameter list is expected to result in an empty dict.
page = Template("{% load dict from utils %}{% dict %}").render(Context())
self.assertEqual(page, "{}")
def test_dict_invalid_syntax(self):
with self.assertRaises(TemplateSyntaxError) as cm:
Template("{% load dict from utils %}{% dict 1 2 3 %}")
self.assertIn("too many positional arguments", str(cm.exception))
@unittest.skipIf(sys.version_info < (3, 6), 'Order of dict cannot be guaranteed in Py < 3.6')
def test_dict_output(self):
# A list of parameters is expected to result in a dict containing those keys and values.
page = Template("{% load dict from utils %}{% dict aa=True bb=False cc=None %}").render(Context())
self.assertEqual(page, "{'aa': True, 'bb': False, 'cc': None}")
page = Template(
"{% load dict from utils %}"
"{% autoescape off %}{% dict aa=+2 bb=-2 cc=33 %}{% endautoescape %}"
).render(Context())
self.assertEqual(page, "{'aa': 2, 'bb': -2, 'cc': 33}")
def test_dict_result_empty(self):
# Empty parameter list is expected to result in an empty dict.
page = Template(
"{% load dict from utils %}{% dict as D %}"
"{% for x, y in D.items %}[{{ x }}:{{ y }}]{% endfor %}"
).render(Context())
self.assertEqual(page, "")
@unittest.skipIf(sys.version_info < (3, 6), 'Order of dict cannot be guaranteed in Py < 3.6')
def test_dict_result(self):
# A list of parameters is expected to result in a dict containing those keys and values.
# When values are safe, they are expected to not be encoded on output.
page = Template("""
{% load dict from utils %}
{% dict a='+2' b='-2' c='33' as D %}
{% for x in D %}[{{ x }}],{% endfor %};
{% for x, y in D.items %}[{{ x }}={{ y }}],{% endfor %}.
""").render(Context())
self.assertEqual(page.strip(), "[a],[b],[c],;\n{}[a=+2],[b=-2],[c=33],.".format(' '*12))
# A list of parameters is expected to result in a dict containing those keys and values.
# When values are not safe, they are expected to be encoded on output depending on the
# 'autoescape' tag.
template_string = string.Template("""
{% load dict from utils %}
{% autoescape $SWITCH %}
{% dict a=AA b=BB c=CC as D %}
{% for x in D %}[{{ x }}],{% endfor %};
{% for x, y in D.items %}[{{ forloop.counter }}:{{ x }}={{ y }}],{% endfor %}.
{% endautoescape %}
""")
expected_value = {
'on': "[a],[b],[c],;\n{}[1:a=A<a],[2:b=b>B],[3:c=C&c],.".format(' '*16),
'off': "[a],[b],[c],;\n{}[1:a=A<a],[2:b=b>B],[3:c=C&c],.".format(' '*16),
}
for switch in ('on', 'off'):
with self.subTest(autoescape=switch):
template = Template(template_string.substitute(SWITCH=switch))
page = template.render(Context({'CC': "C&c", 'BB': "b>B", 'AA': "A<a"}))
self.assertEqual(page.strip(), expected_value[switch])
@tag('templatetags')
class AnyFilterTests(TestCase):
template = Template("{% load are_any from utils %}{{ my_var|are_any }}")
def test_iterable(self):
test_data = [
([], False),
({}, False),
([0, None, ""], False),
([0, None, "", object], True),
(["", "", "", "4", ""], True),
((x for x in ["", False, "", False, ""]), False),
((y for y in ["", False, "4", True, ""]), True),
({"": 1, None: 3}, False),
(" \n \0", True),
("abcdef", True),
]
for obj, expected_value in test_data:
with self.subTest(object=obj):
page = self.template.render(Context({'my_var': obj}))
self.assertEqual(page, str(expected_value))
def test_non_iterable(self):
test_data = [
(1, True),
(0, False),
(True, True),
(None, False),
(object, True),
(SimpleLazyObject(lambda: None), False),
(SimpleLazyObject(lambda: "LLZ"), True),
]
for obj, expected_value in test_data:
with self.subTest(object=obj):
page = self.template.render(Context({'my_var': obj}))
self.assertEqual(page, str(expected_value))
@tag('templatetags')
class AllFilterTests(TestCase):
template = Template("{% load are_all from utils %}{{ my_var|are_all }}")
def test_iterable(self):
test_data = [
([], True),
({}, True),
([0, None, ""], False),
([1, True, "\0"], True),
([0, None, "", object], False),
(["", "", "", "4", ""], False),
([object], True),
((x for x in ["1", True, "22", None, "33"]), False),
((y for y in ["1", 2, "3", 4, "5", object]), True),
({"": 1, None: 3}, False),
({"a": 1, True: 3}, True),
("", True),
(" \n \0", True),
("abcdef", True),
]
for obj, expected_value in test_data:
with self.subTest(object=obj):
page = self.template.render(Context({'my_var': obj}))
self.assertEqual(page, str(expected_value))
def test_non_iterable(self):
test_data = [
(1, True),
(0, False),
(True, True),
(None, False),
(object, True),
(SimpleLazyObject(lambda: None), False),
(SimpleLazyObject(lambda: "LLZ"), True),
]
for obj, expected_value in test_data:
with self.subTest(object=obj):
page = self.template.render(Context({'my_var': obj}))
self.assertEqual(page, str(expected_value))
@tag('templatetags')
class CompactFilterTests(TestCase):
def test_single_line(self):
content = " \t Nam pretium\vturpis et\tarcu. \f"
page = Template("{% load compact from utils %}[{{ my_var|compact }}]").render(Context({'my_var': content}))
self.assertEqual(page, "[Nam pretium turpis et arcu.]")
def test_multiple_lines(self):
content = """
Maecenas tempus, \t
tellus eget\vcondimentum rhoncus, \f
sem quam\rsemper libero, \r
sit amet adipiscing sem\n\n\nneque sed\xA0ipsum.
"""
page = Template("{% load compact from utils %}[{{ my_var|compact }}]").render(Context({'my_var': content}))
self.assertEqual(
page,
"[Maecenas tempus, tellus eget condimentum rhoncus,"
" sem quam semper libero, sit amet adipiscing sem neque sed ipsum.]"
)
def test_autoescape(self):
content = "\nPraesent <nonummy mi> \"in\fodio\".\r\n\t"
template_string = string.Template("""
{% load compact from utils %}
{% autoescape $SWITCH %}
[{% filter compact %}$CONTENT{% endfilter %}]
[{% filter compact %}{{ my_var }}{% endfilter %}]/[{{ my_var|compact }}]/
{% endautoescape %}
""")
for switch in ('on', 'off'):
with self.subTest(autoescape=switch):
template = Template(template_string.substitute(SWITCH=switch, CONTENT=content))
page = template.render(Context({'my_var': content}))
self.assertEqual(
page.replace(" "*16, "").strip(),
"[Praesent <nonummy mi> \"in odio\".]\n"
+ (escape if switch == 'on' else lambda x: x)("[Praesent <nonummy mi> \"in odio\".]/") * 2
)
@tag('templatetags')
class SplitFilterTests(TestCase):
dummy_object = object()
test_data = [
("", {',': [""], None: []}),
(" \t ", {',': [" \t "], None: []}),
("xyzqw", {',': ["xyzqw"], None: ["xyzqw"]}),
("1,2,3", {',': ["1", "2", "3"], None: ["1,2,3"]}),
("a,bb,", {',': ["a", "bb", ""], None: ["a,bb,"]}),
("aa,,c", {',': ["aa", "", "c"], None: ["aa,,c"]}),
(",,,, ", {',': ["", "", "", "", " "], None: [",,,,"]}),
("<a>,<b>,</c>", {',': ["<a>", "<b>", "</c>"], None: ["<a>,<b>,</c>"]}),
("<a> <b> </c>", {',': ["<a> <b> </c>"], None: ["<a>", "<b>", "</c>"]}),
(":'xs',:-&,<>", {',': [":'xs'", ":-&", "<>"], None: [":'xs',:-&,<>"], ':': ["", "'xs',", "-&,<>"]}),
(" i\xA0t\\tq ", {'t': [" i\xA0", "\\", "q "], None: ["i", "t\\tq"], "\\\\": [" i\xA0t", "tq "]}),
(123, {',': [123], None: [123]}),
(False, {',': [False], None: [False]}),
(None, {',': [None], None: [None]}),
(dummy_object, {',': [dummy_object], ':': [dummy_object]}),
]
def test_var_input(self, autoescape=True, test_data=None):
# Values of type 'str' are expected to be split into a list of strings,
# and HTML-encoded on output if autoescape is "on" (output as-is otherwise).
# Values of other types are expected to be wrapped in a list as-is.
template_string = string.Template("""
{% load split from utils %}
{% autoescape $SWITCH %}
{% for x in my_var|split$SEP %}#{{ x }}#{% endfor %}
{% endautoescape %}
""")
for content, expected_values in (test_data or self.test_data):
for sep in expected_values:
with self.subTest(value=content, separator=sep):
template = Template(template_string.substitute(
SWITCH='on' if autoescape else 'off',
SEP=':"{}"'.format(sep) if sep else ''))
page = template.render(Context({'my_var': content}))
self.assertEqual(
page.strip(),
"".join("#{}#".format(escape(part) if autoescape else part) for part in expected_values[sep])
)
def test_direct_input(self, autoescape=True, test_data=None):
# Values of type 'SafeData' are expected to be split into a list of strings,
# and output as-is.
template_string = string.Template("""
{% load split from utils %}
{% autoescape $SWITCH %}
{% for x in "$CONTENT"|split$SEP %}#{{ x }}#{% endfor %}
{% endautoescape %}
""")
for content, expected_values in (test_data or self.test_data):
for sep in expected_values:
with self.subTest(value=content, separator=sep):
template = Template(template_string.substitute(
SWITCH='on' if autoescape else 'off',
CONTENT=content,
SEP=':"{}"'.format(sep) if sep else ''))
page = template.render(Context())
self.assertEqual(page.strip(), "".join("#{}#".format(part) for part in expected_values[sep]))
def test_nonautoescaped_var_input(self):
self.test_var_input(autoescape=False)
def test_nonautoescaped_direct_input(self):
self.test_direct_input(autoescape=False)
def test_newline_var_input(self):
test_data = [
("<a>\n\n<b>\n", {
'>': ["<a", "\n\n<b", "\n"],
'<a>': ["", "\n\n<b>\n"],
'NEWLINE': ["<a>", "", "<b>", ""]
}),
]
self.test_var_input(test_data=test_data, autoescape=True)
self.test_var_input(test_data=test_data, autoescape=False)
def do_test_with_chunks(self, *, var, autoescape):
test_data = "This message;\t<strong>along with the apple</strong>; is sent on behalf of <span>Adam</span>;"
expected = {
# no separator, no chunk length
'~': [False, [test_data]],
# separator is tilda, no chunk length
'~~': [False, [test_data]],
# no separator, chunk length 14
'~14': [
True,
["This message;\t", "<strong>along ", "with the apple", "</strong>; is ", "sent on behalf",
" of <span>Adam", "</span>;"]],
# separator is space, chunk length 4
' ~4': [
True,
["This", "mess", "age;", "\t<st", "rong", ">alo", "ng", "with", "the", "appl", "e</s", "tron",
"g>;", "is", "sent", "on", "beha", "lf", "of", "<spa", "n>Ad", "am</", "span", ">;"]],
# separator is semicolon, chunk length 9
';~9': [
True,
["This mess", "age", "\t<strong>", "along wit", "h the app", "le</stron", "g>", " is sent ",
"on behalf", " of <span", ">Adam</sp", "an>", ""]],
# separator is angle bracket, no chunk length
'<~': [
False,
["This message;\t", "strong>along with the apple", "/strong>; is sent on behalf of ",
"span>Adam", "/span>;"]],
# separator is angle bracket, chunk length is invalid
'<~aab': [
False,
["This message;\t", "strong>along with the apple", "/strong>; is sent on behalf of ",
"span>Adam", "/span>;"]],
# separator is angle bracket, chunk length is invalid
'<~9.3': [
False,
["This message;\t", "strong>along with the apple", "/strong>; is sent on behalf of ",
"span>Adam", "/span>;"]],
# separator is angle bracket, chunk length 17
'<~-17': [
True,
["This message;\t", "strong>along with", " the apple", "/strong>; is sent", " on behalf of ",
"span>Adam", "/span>;"]],
# separator is tab-tilda-tab, chunk length 42
'\t~\t~42': [
True,
["This message;\t<strong>along with the apple", "</strong>; is sent on behalf of <span>Adam",
"</span>;"]],
# separator is tilda-tilda, chunk length 99
'~~~99': [True, [test_data]],
}
template_string = string.Template("""
{% load split from utils %}
{% autoescape $SWITCH %}
{% for x in $DATA|split:'$SEP' %}#{{ x }}#{% endfor %}
{% endautoescape %}
""")
for sep in expected:
with self.subTest(separator=sep):
template = Template(template_string.substitute(
SWITCH='on' if autoescape else 'off',
DATA='my_var' if var else '"{}"'.format(test_data),
SEP=sep))
page = template.render(Context({'my_var': test_data}))
self.assertEqual(
page.strip(),
"".join("#{}#".format(escape(part) if autoescape and (var or expected[sep][0]) else part)
for part in expected[sep][1])
)
def test_chunking_var_input_autoescaped(self):
self.do_test_with_chunks(var=True, autoescape=True)
def test_chunking_var_input_nonautoescaped(self):
self.do_test_with_chunks(var=True, autoescape=False)
def test_chunking_direct_input_autoescaped(self):
self.do_test_with_chunks(var=False, autoescape=True)
def test_chunking_direct_input_nonautoescaped(self):
self.do_test_with_chunks(var=False, autoescape=False)
@tag('templatetags')
class MultFilterTests(TestCase):
dummy_object = object()
test_data = [
("", {3: "", -3: "", '3': "", '007': "", '-3': "", '': "", '0.03': '', object(): ''}),
("xYz", {3: "xYz"*3, -3: "", '3': "xYz"*3, '007': "xYz"*7, '-3': "", '': "", '0.03': '', object(): ''}),
(123, {3: 369, -3: -369, '3': 369, '007': 861, '-3': -369, '': "", '0.03': '', object(): ''}),
(False, {3: 0, -3: 0, '3': 0, '007': 0, '-3': 0, '': "", '0.03': '', object(): ''}),
(None, {3: "", -3: "", '3': "", '007': "", '-3': "", '': "", '0.03': '', object(): ''}),
(dummy_object, {3: "", -3: "", '3': "", '007': "", '-3': "", '': "", '0.03': '', object(): ''}),
]
def test_invalid_syntax(self):
with self.assertRaises(TemplateSyntaxError) as cm:
Template("{% load mult from utils %}{{ 'content'|mult }}")
self.assertEqual("mult requires 2 arguments, 1 provided", str(cm.exception))
def test_safe_values(self):
template_string = string.Template("{% load mult from utils %}{{ $CONTENT|mult:by }}")
for value, tests in self.test_data:
for multiplier, outcome in tests.items():
with self.subTest(value=value, by=multiplier):
template = Template(template_string.substitute(
CONTENT='"{}"'.format(value) if isinstance(value, str)
else 'my_var' if value is self.dummy_object else value
))
page = template.render(Context({'my_var': self.dummy_object, 'by': multiplier}))
self.assertEqual(page, str(outcome))
def test_unsafe_values(self):
template_string = string.Template(
"{% load mult from utils %}{% autoescape $SWITCH %}{{ my_var|mult:by }}{% endautoescape %}")
test_data = [
("i<j>&k", {2: "i<j>&k"*2, -2: "", '003': "i<j>&k"*3, '0.03': ""}),
]
for value, tests in test_data:
for multiplier, outcome in tests.items():
for switch in ('on', 'off'):
with self.subTest(value=value, by=multiplier, autoescape=switch):
template = Template(template_string.substitute(SWITCH=switch))
page = template.render(Context({'my_var': value, 'by': multiplier}))
self.assertEqual(page, escape(outcome) if switch == 'on' else outcome)
| agpl-3.0 | -7,980,881,843,813,554,000 | 44.272388 | 117 | 0.503503 | false |
aristanetworks/arista-ovs-nova | nova/utils.py | 1 | 38397 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Utilities and helper functions."""
import contextlib
import datetime
import errno
import functools
import hashlib
import inspect
import os
import pyclbr
import random
import re
import shlex
import shutil
import signal
import socket
import struct
import sys
import tempfile
import time
import weakref
from xml.sax import saxutils
from eventlet import event
from eventlet.green import subprocess
from eventlet import greenthread
from eventlet import semaphore
import netaddr
from nova import exception
from nova.openstack.common import cfg
from nova.openstack.common import excutils
from nova.openstack.common import importutils
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
CONF.register_opt(
cfg.BoolOpt('disable_process_locking', default=False,
help='Whether to disable inter-process locks'))
CONF.import_opt('glance_host', 'nova.config')
CONF.import_opt('glance_port', 'nova.config')
CONF.import_opt('glance_protocol', 'nova.config')
CONF.import_opt('instance_usage_audit_period', 'nova.config')
CONF.import_opt('monkey_patch', 'nova.config')
CONF.import_opt('rootwrap_config', 'nova.config')
CONF.import_opt('service_down_time', 'nova.config')
# Used for looking up extensions of text
# to their 'multiplied' byte amount
BYTE_MULTIPLIERS = {
'': 1,
't': 1024 ** 4,
'g': 1024 ** 3,
'm': 1024 ** 2,
'k': 1024,
}
def vpn_ping(address, port, timeout=0.05, session_id=None):
"""Sends a vpn negotiation packet and returns the server session.
Returns False on a failure. Basic packet structure is below.
Client packet (14 bytes)::
0 1 8 9 13
+-+--------+-----+
|x| cli_id |?????|
+-+--------+-----+
x = packet identifier 0x38
cli_id = 64 bit identifier
? = unknown, probably flags/padding
Server packet (26 bytes)::
0 1 8 9 13 14 21 2225
+-+--------+-----+--------+----+
|x| srv_id |?????| cli_id |????|
+-+--------+-----+--------+----+
x = packet identifier 0x40
cli_id = 64 bit identifier
? = unknown, probably flags/padding
bit 9 was 1 and the rest were 0 in testing
"""
if session_id is None:
session_id = random.randint(0, 0xffffffffffffffff)
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
data = struct.pack('!BQxxxxx', 0x38, session_id)
sock.sendto(data, (address, port))
sock.settimeout(timeout)
try:
received = sock.recv(2048)
except socket.timeout:
return False
finally:
sock.close()
fmt = '!BQxxxxxQxxxx'
if len(received) != struct.calcsize(fmt):
print struct.calcsize(fmt)
return False
(identifier, server_sess, client_sess) = struct.unpack(fmt, received)
if identifier == 0x40 and client_sess == session_id:
return server_sess
def _subprocess_setup():
# Python installs a SIGPIPE handler by default. This is usually not what
# non-Python subprocesses expect.
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
def execute(*cmd, **kwargs):
"""Helper method to execute command with optional retry.
If you add a run_as_root=True command, don't forget to add the
corresponding filter to etc/nova/rootwrap.d !
:param cmd: Passed to subprocess.Popen.
:param process_input: Send to opened process.
:param check_exit_code: Single bool, int, or list of allowed exit
codes. Defaults to [0]. Raise
exception.ProcessExecutionError unless
program exits with one of these code.
:param delay_on_retry: True | False. Defaults to True. If set to
True, wait a short amount of time
before retrying.
:param attempts: How many times to retry cmd.
:param run_as_root: True | False. Defaults to False. If set to True,
the command is run with rootwrap.
:raises exception.NovaException: on receiving unknown arguments
:raises exception.ProcessExecutionError:
:returns: a tuple, (stdout, stderr) from the spawned process, or None if
the command fails.
"""
process_input = kwargs.pop('process_input', None)
check_exit_code = kwargs.pop('check_exit_code', [0])
ignore_exit_code = False
if isinstance(check_exit_code, bool):
ignore_exit_code = not check_exit_code
check_exit_code = [0]
elif isinstance(check_exit_code, int):
check_exit_code = [check_exit_code]
delay_on_retry = kwargs.pop('delay_on_retry', True)
attempts = kwargs.pop('attempts', 1)
run_as_root = kwargs.pop('run_as_root', False)
shell = kwargs.pop('shell', False)
if len(kwargs):
raise exception.NovaException(_('Got unknown keyword args '
'to utils.execute: %r') % kwargs)
if run_as_root and os.geteuid() != 0:
cmd = ['sudo', 'nova-rootwrap', CONF.rootwrap_config] + list(cmd)
cmd = map(str, cmd)
while attempts > 0:
attempts -= 1
try:
LOG.debug(_('Running cmd (subprocess): %s'), ' '.join(cmd))
_PIPE = subprocess.PIPE # pylint: disable=E1101
if os.name == 'nt':
preexec_fn = None
close_fds = False
else:
preexec_fn = _subprocess_setup
close_fds = True
obj = subprocess.Popen(cmd,
stdin=_PIPE,
stdout=_PIPE,
stderr=_PIPE,
close_fds=close_fds,
preexec_fn=preexec_fn,
shell=shell)
result = None
if process_input is not None:
result = obj.communicate(process_input)
else:
result = obj.communicate()
obj.stdin.close() # pylint: disable=E1101
_returncode = obj.returncode # pylint: disable=E1101
LOG.debug(_('Result was %s') % _returncode)
if not ignore_exit_code and _returncode not in check_exit_code:
(stdout, stderr) = result
raise exception.ProcessExecutionError(
exit_code=_returncode,
stdout=stdout,
stderr=stderr,
cmd=' '.join(cmd))
return result
except exception.ProcessExecutionError:
if not attempts:
raise
else:
LOG.debug(_('%r failed. Retrying.'), cmd)
if delay_on_retry:
greenthread.sleep(random.randint(20, 200) / 100.0)
finally:
# NOTE(termie): this appears to be necessary to let the subprocess
# call clean something up in between calls, without
# it two execute calls in a row hangs the second one
greenthread.sleep(0)
def trycmd(*args, **kwargs):
"""
A wrapper around execute() to more easily handle warnings and errors.
Returns an (out, err) tuple of strings containing the output of
the command's stdout and stderr. If 'err' is not empty then the
command can be considered to have failed.
:discard_warnings True | False. Defaults to False. If set to True,
then for succeeding commands, stderr is cleared
"""
discard_warnings = kwargs.pop('discard_warnings', False)
try:
out, err = execute(*args, **kwargs)
failed = False
except exception.ProcessExecutionError, exn:
out, err = '', str(exn)
failed = True
if not failed and discard_warnings and err:
# Handle commands that output to stderr but otherwise succeed
err = ''
return out, err
def ssh_execute(ssh, cmd, process_input=None,
addl_env=None, check_exit_code=True):
LOG.debug(_('Running cmd (SSH): %s'), cmd)
if addl_env:
raise exception.NovaException(_('Environment not supported over SSH'))
if process_input:
# This is (probably) fixable if we need it...
msg = _('process_input not supported over SSH')
raise exception.NovaException(msg)
stdin_stream, stdout_stream, stderr_stream = ssh.exec_command(cmd)
channel = stdout_stream.channel
#stdin.write('process_input would go here')
#stdin.flush()
# NOTE(justinsb): This seems suspicious...
# ...other SSH clients have buffering issues with this approach
stdout = stdout_stream.read()
stderr = stderr_stream.read()
stdin_stream.close()
exit_status = channel.recv_exit_status()
# exit_status == -1 if no exit code was returned
if exit_status != -1:
LOG.debug(_('Result was %s') % exit_status)
if check_exit_code and exit_status != 0:
raise exception.ProcessExecutionError(exit_code=exit_status,
stdout=stdout,
stderr=stderr,
cmd=cmd)
return (stdout, stderr)
def novadir():
import nova
return os.path.abspath(nova.__file__).split('nova/__init__.py')[0]
def debug(arg):
LOG.debug(_('debug in callback: %s'), arg)
return arg
def generate_uid(topic, size=8):
characters = '01234567890abcdefghijklmnopqrstuvwxyz'
choices = [random.choice(characters) for _x in xrange(size)]
return '%s-%s' % (topic, ''.join(choices))
# Default symbols to use for passwords. Avoids visually confusing characters.
# ~6 bits per symbol
DEFAULT_PASSWORD_SYMBOLS = ('23456789', # Removed: 0,1
'ABCDEFGHJKLMNPQRSTUVWXYZ', # Removed: I, O
'abcdefghijkmnopqrstuvwxyz') # Removed: l
# ~5 bits per symbol
EASIER_PASSWORD_SYMBOLS = ('23456789', # Removed: 0, 1
'ABCDEFGHJKLMNPQRSTUVWXYZ') # Removed: I, O
def last_completed_audit_period(unit=None, before=None):
"""This method gives you the most recently *completed* audit period.
arguments:
units: string, one of 'hour', 'day', 'month', 'year'
Periods normally begin at the beginning (UTC) of the
period unit (So a 'day' period begins at midnight UTC,
a 'month' unit on the 1st, a 'year' on Jan, 1)
unit string may be appended with an optional offset
like so: 'day@18' This will begin the period at 18:00
UTC. 'month@15' starts a monthly period on the 15th,
and year@3 begins a yearly one on March 1st.
before: Give the audit period most recently completed before
<timestamp>. Defaults to now.
returns: 2 tuple of datetimes (begin, end)
The begin timestamp of this audit period is the same as the
end of the previous."""
if not unit:
unit = CONF.instance_usage_audit_period
offset = 0
if '@' in unit:
unit, offset = unit.split("@", 1)
offset = int(offset)
if before is not None:
rightnow = before
else:
rightnow = timeutils.utcnow()
if unit not in ('month', 'day', 'year', 'hour'):
raise ValueError('Time period must be hour, day, month or year')
if unit == 'month':
if offset == 0:
offset = 1
end = datetime.datetime(day=offset,
month=rightnow.month,
year=rightnow.year)
if end >= rightnow:
year = rightnow.year
if 1 >= rightnow.month:
year -= 1
month = 12 + (rightnow.month - 1)
else:
month = rightnow.month - 1
end = datetime.datetime(day=offset,
month=month,
year=year)
year = end.year
if 1 >= end.month:
year -= 1
month = 12 + (end.month - 1)
else:
month = end.month - 1
begin = datetime.datetime(day=offset, month=month, year=year)
elif unit == 'year':
if offset == 0:
offset = 1
end = datetime.datetime(day=1, month=offset, year=rightnow.year)
if end >= rightnow:
end = datetime.datetime(day=1,
month=offset,
year=rightnow.year - 1)
begin = datetime.datetime(day=1,
month=offset,
year=rightnow.year - 2)
else:
begin = datetime.datetime(day=1,
month=offset,
year=rightnow.year - 1)
elif unit == 'day':
end = datetime.datetime(hour=offset,
day=rightnow.day,
month=rightnow.month,
year=rightnow.year)
if end >= rightnow:
end = end - datetime.timedelta(days=1)
begin = end - datetime.timedelta(days=1)
elif unit == 'hour':
end = rightnow.replace(minute=offset, second=0, microsecond=0)
if end >= rightnow:
end = end - datetime.timedelta(hours=1)
begin = end - datetime.timedelta(hours=1)
return (begin, end)
def generate_password(length=20, symbolgroups=DEFAULT_PASSWORD_SYMBOLS):
"""Generate a random password from the supplied symbol groups.
At least one symbol from each group will be included. Unpredictable
results if length is less than the number of symbol groups.
Believed to be reasonably secure (with a reasonable password length!)
"""
r = random.SystemRandom()
# NOTE(jerdfelt): Some password policies require at least one character
# from each group of symbols, so start off with one random character
# from each symbol group
password = [r.choice(s) for s in symbolgroups]
# If length < len(symbolgroups), the leading characters will only
# be from the first length groups. Try our best to not be predictable
# by shuffling and then truncating.
r.shuffle(password)
password = password[:length]
length -= len(password)
# then fill with random characters from all symbol groups
symbols = ''.join(symbolgroups)
password.extend([r.choice(symbols) for _i in xrange(length)])
# finally shuffle to ensure first x characters aren't from a
# predictable group
r.shuffle(password)
return ''.join(password)
def last_octet(address):
return int(address.split('.')[-1])
def get_my_linklocal(interface):
try:
if_str = execute('ip', '-f', 'inet6', '-o', 'addr', 'show', interface)
condition = '\s+inet6\s+([0-9a-f:]+)/\d+\s+scope\s+link'
links = [re.search(condition, x) for x in if_str[0].split('\n')]
address = [w.group(1) for w in links if w is not None]
if address[0] is not None:
return address[0]
else:
msg = _('Link Local address is not found.:%s') % if_str
raise exception.NovaException(msg)
except Exception as ex:
msg = _("Couldn't get Link Local IP of %(interface)s"
" :%(ex)s") % locals()
raise exception.NovaException(msg)
def parse_mailmap(mailmap='.mailmap'):
mapping = {}
if os.path.exists(mailmap):
fp = open(mailmap, 'r')
for l in fp:
l = l.strip()
if not l.startswith('#') and ' ' in l:
canonical_email, alias = l.split(' ')
mapping[alias.lower()] = canonical_email.lower()
return mapping
def str_dict_replace(s, mapping):
for s1, s2 in mapping.iteritems():
s = s.replace(s1, s2)
return s
class LazyPluggable(object):
"""A pluggable backend loaded lazily based on some value."""
def __init__(self, pivot, **backends):
self.__backends = backends
self.__pivot = pivot
self.__backend = None
def __get_backend(self):
if not self.__backend:
backend_name = CONF[self.__pivot]
if backend_name not in self.__backends:
msg = _('Invalid backend: %s') % backend_name
raise exception.NovaException(msg)
backend = self.__backends[backend_name]
if isinstance(backend, tuple):
name = backend[0]
fromlist = backend[1]
else:
name = backend
fromlist = backend
self.__backend = __import__(name, None, None, fromlist)
return self.__backend
def __getattr__(self, key):
backend = self.__get_backend()
return getattr(backend, key)
class LoopingCallDone(Exception):
"""Exception to break out and stop a LoopingCall.
The poll-function passed to LoopingCall can raise this exception to
break out of the loop normally. This is somewhat analogous to
StopIteration.
An optional return-value can be included as the argument to the exception;
this return-value will be returned by LoopingCall.wait()
"""
def __init__(self, retvalue=True):
""":param retvalue: Value that LoopingCall.wait() should return."""
self.retvalue = retvalue
class LoopingCall(object):
def __init__(self, f=None, *args, **kw):
self.args = args
self.kw = kw
self.f = f
self._running = False
def start(self, interval, initial_delay=None):
self._running = True
done = event.Event()
def _inner():
if initial_delay:
greenthread.sleep(initial_delay)
try:
while self._running:
self.f(*self.args, **self.kw)
if not self._running:
break
greenthread.sleep(interval)
except LoopingCallDone, e:
self.stop()
done.send(e.retvalue)
except Exception:
LOG.exception(_('in looping call'))
done.send_exception(*sys.exc_info())
return
else:
done.send(True)
self.done = done
greenthread.spawn(_inner)
return self.done
def stop(self):
self._running = False
def wait(self):
return self.done.wait()
def xhtml_escape(value):
"""Escapes a string so it is valid within XML or XHTML.
"""
return saxutils.escape(value, {'"': '"', "'": '''})
def utf8(value):
"""Try to turn a string into utf-8 if possible.
Code is directly from the utf8 function in
http://github.com/facebook/tornado/blob/master/tornado/escape.py
"""
if isinstance(value, unicode):
return value.encode('utf-8')
assert isinstance(value, str)
return value
def to_bytes(text, default=0):
"""Try to turn a string into a number of bytes. Looks at the last
characters of the text to determine what conversion is needed to
turn the input text into a byte number.
Supports: B/b, K/k, M/m, G/g, T/t (or the same with b/B on the end)
"""
# Take off everything not number 'like' (which should leave
# only the byte 'identifier' left)
mult_key_org = text.lstrip('-1234567890')
mult_key = mult_key_org.lower()
mult_key_len = len(mult_key)
if mult_key.endswith("b"):
mult_key = mult_key[0:-1]
try:
multiplier = BYTE_MULTIPLIERS[mult_key]
if mult_key_len:
# Empty cases shouldn't cause text[0:-0]
text = text[0:-mult_key_len]
return int(text) * multiplier
except KeyError:
msg = _('Unknown byte multiplier: %s') % mult_key_org
raise TypeError(msg)
except ValueError:
return default
def delete_if_exists(pathname):
"""delete a file, but ignore file not found error"""
try:
os.unlink(pathname)
except OSError as e:
if e.errno == errno.ENOENT:
return
else:
raise
def get_from_path(items, path):
"""Returns a list of items matching the specified path.
Takes an XPath-like expression e.g. prop1/prop2/prop3, and for each item
in items, looks up items[prop1][prop2][prop3]. Like XPath, if any of the
intermediate results are lists it will treat each list item individually.
A 'None' in items or any child expressions will be ignored, this function
will not throw because of None (anywhere) in items. The returned list
will contain no None values.
"""
if path is None:
raise exception.NovaException('Invalid mini_xpath')
(first_token, sep, remainder) = path.partition('/')
if first_token == '':
raise exception.NovaException('Invalid mini_xpath')
results = []
if items is None:
return results
if not isinstance(items, list):
# Wrap single objects in a list
items = [items]
for item in items:
if item is None:
continue
get_method = getattr(item, 'get', None)
if get_method is None:
continue
child = get_method(first_token)
if child is None:
continue
if isinstance(child, list):
# Flatten intermediate lists
for x in child:
results.append(x)
else:
results.append(child)
if not sep:
# No more tokens
return results
else:
return get_from_path(results, remainder)
def flatten_dict(dict_, flattened=None):
"""Recursively flatten a nested dictionary."""
flattened = flattened or {}
for key, value in dict_.iteritems():
if hasattr(value, 'iteritems'):
flatten_dict(value, flattened)
else:
flattened[key] = value
return flattened
def partition_dict(dict_, keys):
"""Return two dicts, one with `keys` the other with everything else."""
intersection = {}
difference = {}
for key, value in dict_.iteritems():
if key in keys:
intersection[key] = value
else:
difference[key] = value
return intersection, difference
def map_dict_keys(dict_, key_map):
"""Return a dict in which the dictionaries keys are mapped to new keys."""
mapped = {}
for key, value in dict_.iteritems():
mapped_key = key_map[key] if key in key_map else key
mapped[mapped_key] = value
return mapped
def subset_dict(dict_, keys):
"""Return a dict that only contains a subset of keys."""
subset = partition_dict(dict_, keys)[0]
return subset
def diff_dict(orig, new):
"""
Return a dict describing how to change orig to new. The keys
correspond to values that have changed; the value will be a list
of one or two elements. The first element of the list will be
either '+' or '-', indicating whether the key was updated or
deleted; if the key was updated, the list will contain a second
element, giving the updated value.
"""
# Figure out what keys went away
result = dict((k, ['-']) for k in set(orig.keys()) - set(new.keys()))
# Compute the updates
for key, value in new.items():
if key not in orig or value != orig[key]:
result[key] = ['+', value]
return result
def check_isinstance(obj, cls):
"""Checks that obj is of type cls, and lets PyLint infer types."""
if isinstance(obj, cls):
return obj
raise Exception(_('Expected object of type: %s') % (str(cls)))
def parse_server_string(server_str):
"""
Parses the given server_string and returns a list of host and port.
If it's not a combination of host part and port, the port element
is a null string. If the input is invalid expression, return a null
list.
"""
try:
# First of all, exclude pure IPv6 address (w/o port).
if netaddr.valid_ipv6(server_str):
return (server_str, '')
# Next, check if this is IPv6 address with a port number combination.
if server_str.find("]:") != -1:
(address, port) = server_str.replace('[', '', 1).split(']:')
return (address, port)
# Third, check if this is a combination of an address and a port
if server_str.find(':') == -1:
return (server_str, '')
# This must be a combination of an address and a port
(address, port) = server_str.split(':')
return (address, port)
except Exception:
LOG.error(_('Invalid server_string: %s'), server_str)
return ('', '')
def bool_from_str(val):
"""Convert a string representation of a bool into a bool value"""
if not val:
return False
try:
return True if int(val) else False
except ValueError:
return val.lower() == 'true' or \
val.lower() == 'yes' or \
val.lower() == 'y'
def is_valid_boolstr(val):
"""Check if the provided string is a valid bool string or not. """
val = str(val).lower()
return val == 'true' or val == 'false' or \
val == 'yes' or val == 'no' or \
val == 'y' or val == 'n' or \
val == '1' or val == '0'
def is_valid_ipv4(address):
"""valid the address strictly as per format xxx.xxx.xxx.xxx.
where xxx is a value between 0 and 255.
"""
parts = address.split(".")
if len(parts) != 4:
return False
for item in parts:
try:
if not 0 <= int(item) <= 255:
return False
except ValueError:
return False
return True
def is_valid_cidr(address):
"""Check if the provided ipv4 or ipv6 address is a valid
CIDR address or not"""
try:
# Validate the correct CIDR Address
netaddr.IPNetwork(address)
except netaddr.core.AddrFormatError:
return False
except UnboundLocalError:
# NOTE(MotoKen): work around bug in netaddr 0.7.5 (see detail in
# https://github.com/drkjam/netaddr/issues/2)
return False
# Prior validation partially verify /xx part
# Verify it here
ip_segment = address.split('/')
if (len(ip_segment) <= 1 or
ip_segment[1] == ''):
return False
return True
def monkey_patch():
""" If the Flags.monkey_patch set as True,
this function patches a decorator
for all functions in specified modules.
You can set decorators for each modules
using CONF.monkey_patch_modules.
The format is "Module path:Decorator function".
Example: 'nova.api.ec2.cloud:nova.notifier.api.notify_decorator'
Parameters of the decorator is as follows.
(See nova.notifier.api.notify_decorator)
name - name of the function
function - object of the function
"""
# If CONF.monkey_patch is not True, this function do nothing.
if not CONF.monkey_patch:
return
# Get list of modules and decorators
for module_and_decorator in CONF.monkey_patch_modules:
module, decorator_name = module_and_decorator.split(':')
# import decorator function
decorator = importutils.import_class(decorator_name)
__import__(module)
# Retrieve module information using pyclbr
module_data = pyclbr.readmodule_ex(module)
for key in module_data.keys():
# set the decorator for the class methods
if isinstance(module_data[key], pyclbr.Class):
clz = importutils.import_class("%s.%s" % (module, key))
for method, func in inspect.getmembers(clz, inspect.ismethod):
setattr(clz, method,
decorator("%s.%s.%s" % (module, key, method), func))
# set the decorator for the function
if isinstance(module_data[key], pyclbr.Function):
func = importutils.import_class("%s.%s" % (module, key))
setattr(sys.modules[module], key,
decorator("%s.%s" % (module, key), func))
def convert_to_list_dict(lst, label):
"""Convert a value or list into a list of dicts"""
if not lst:
return None
if not isinstance(lst, list):
lst = [lst]
return [{label: x} for x in lst]
def timefunc(func):
"""Decorator that logs how long a particular function took to execute"""
@functools.wraps(func)
def inner(*args, **kwargs):
start_time = time.time()
try:
return func(*args, **kwargs)
finally:
total_time = time.time() - start_time
LOG.debug(_("timefunc: '%(name)s' took %(total_time).2f secs") %
dict(name=func.__name__, total_time=total_time))
return inner
def generate_glance_url():
"""Generate the URL to glance."""
return "%s://%s:%d" % (CONF.glance_protocol, CONF.glance_host,
CONF.glance_port)
def generate_image_url(image_ref):
"""Generate an image URL from an image_ref."""
return "%s/images/%s" % (generate_glance_url(), image_ref)
@contextlib.contextmanager
def remove_path_on_error(path):
"""Protect code that wants to operate on PATH atomically.
Any exception will cause PATH to be removed.
"""
try:
yield
except Exception:
with excutils.save_and_reraise_exception():
delete_if_exists(path)
def make_dev_path(dev, partition=None, base='/dev'):
"""Return a path to a particular device.
>>> make_dev_path('xvdc')
/dev/xvdc
>>> make_dev_path('xvdc', 1)
/dev/xvdc1
"""
path = os.path.join(base, dev)
if partition:
path += str(partition)
return path
def total_seconds(td):
"""Local total_seconds implementation for compatibility with python 2.6"""
if hasattr(td, 'total_seconds'):
return td.total_seconds()
else:
return ((td.days * 86400 + td.seconds) * 10 ** 6 +
td.microseconds) / 10.0 ** 6
def sanitize_hostname(hostname):
"""Return a hostname which conforms to RFC-952 and RFC-1123 specs."""
if isinstance(hostname, unicode):
hostname = hostname.encode('latin-1', 'ignore')
hostname = re.sub('[ _]', '-', hostname)
hostname = re.sub('[^\w.-]+', '', hostname)
hostname = hostname.lower()
hostname = hostname.strip('.-')
return hostname
def read_cached_file(filename, cache_info, reload_func=None):
"""Read from a file if it has been modified.
:param cache_info: dictionary to hold opaque cache.
:param reload_func: optional function to be called with data when
file is reloaded due to a modification.
:returns: data from file
"""
mtime = os.path.getmtime(filename)
if not cache_info or mtime != cache_info.get('mtime'):
LOG.debug(_("Reloading cached file %s") % filename)
with open(filename) as fap:
cache_info['data'] = fap.read()
cache_info['mtime'] = mtime
if reload_func:
reload_func(cache_info['data'])
return cache_info['data']
def file_open(*args, **kwargs):
"""Open file
see built-in file() documentation for more details
Note: The reason this is kept in a separate module is to easily
be able to provide a stub module that doesn't alter system
state at all (for unit tests)
"""
return file(*args, **kwargs)
def hash_file(file_like_object):
"""Generate a hash for the contents of a file."""
checksum = hashlib.sha1()
for chunk in iter(lambda: file_like_object.read(32768), b''):
checksum.update(chunk)
return checksum.hexdigest()
@contextlib.contextmanager
def temporary_mutation(obj, **kwargs):
"""Temporarily set the attr on a particular object to a given value then
revert when finished.
One use of this is to temporarily set the read_deleted flag on a context
object:
with temporary_mutation(context, read_deleted="yes"):
do_something_that_needed_deleted_objects()
"""
NOT_PRESENT = object()
old_values = {}
for attr, new_value in kwargs.items():
old_values[attr] = getattr(obj, attr, NOT_PRESENT)
setattr(obj, attr, new_value)
try:
yield
finally:
for attr, old_value in old_values.items():
if old_value is NOT_PRESENT:
del obj[attr]
else:
setattr(obj, attr, old_value)
def generate_mac_address():
"""Generate an Ethernet MAC address."""
# NOTE(vish): We would prefer to use 0xfe here to ensure that linux
# bridge mac addresses don't change, but it appears to
# conflict with libvirt, so we use the next highest octet
# that has the unicast and locally administered bits set
# properly: 0xfa.
# Discussion: https://bugs.launchpad.net/nova/+bug/921838
mac = [0xfa, 0x16, 0x3e,
random.randint(0x00, 0xff),
random.randint(0x00, 0xff),
random.randint(0x00, 0xff)]
return ':'.join(map(lambda x: "%02x" % x, mac))
def read_file_as_root(file_path):
"""Secure helper to read file as root."""
try:
out, _err = execute('cat', file_path, run_as_root=True)
return out
except exception.ProcessExecutionError:
raise exception.FileNotFound(file_path=file_path)
@contextlib.contextmanager
def temporary_chown(path, owner_uid=None):
"""Temporarily chown a path.
:params owner_uid: UID of temporary owner (defaults to current user)
"""
if owner_uid is None:
owner_uid = os.getuid()
orig_uid = os.stat(path).st_uid
if orig_uid != owner_uid:
execute('chown', owner_uid, path, run_as_root=True)
try:
yield
finally:
if orig_uid != owner_uid:
execute('chown', orig_uid, path, run_as_root=True)
@contextlib.contextmanager
def tempdir(**kwargs):
tmpdir = tempfile.mkdtemp(**kwargs)
try:
yield tmpdir
finally:
try:
shutil.rmtree(tmpdir)
except OSError, e:
LOG.error(_('Could not remove tmpdir: %s'), str(e))
def strcmp_const_time(s1, s2):
"""Constant-time string comparison.
:params s1: the first string
:params s2: the second string
:return: True if the strings are equal.
This function takes two strings and compares them. It is intended to be
used when doing a comparison for authentication purposes to help guard
against timing attacks.
"""
if len(s1) != len(s2):
return False
result = 0
for (a, b) in zip(s1, s2):
result |= ord(a) ^ ord(b)
return result == 0
def walk_class_hierarchy(clazz, encountered=None):
"""Walk class hierarchy, yielding most derived classes first"""
if not encountered:
encountered = []
for subclass in clazz.__subclasses__():
if subclass not in encountered:
encountered.append(subclass)
# drill down to leaves first
for subsubclass in walk_class_hierarchy(subclass, encountered):
yield subsubclass
yield subclass
class UndoManager(object):
"""Provides a mechanism to facilitate rolling back a series of actions
when an exception is raised.
"""
def __init__(self):
self.undo_stack = []
def undo_with(self, undo_func):
self.undo_stack.append(undo_func)
def _rollback(self):
for undo_func in reversed(self.undo_stack):
undo_func()
def rollback_and_reraise(self, msg=None, **kwargs):
"""Rollback a series of actions then re-raise the exception.
.. note:: (sirp) This should only be called within an
exception handler.
"""
with excutils.save_and_reraise_exception():
if msg:
LOG.exception(msg, **kwargs)
self._rollback()
def mkfs(fs, path, label=None):
"""Format a file or block device
:param fs: Filesystem type (examples include 'swap', 'ext3', 'ext4'
'btrfs', etc.)
:param path: Path to file or block device to format
:param label: Volume label to use
"""
if fs == 'swap':
args = ['mkswap']
else:
args = ['mkfs', '-t', fs]
#add -F to force no interactive execute on non-block device.
if fs in ('ext3', 'ext4'):
args.extend(['-F'])
if label:
if fs in ('msdos', 'vfat'):
label_opt = '-n'
else:
label_opt = '-L'
args.extend([label_opt, label])
args.append(path)
execute(*args)
def last_bytes(file_like_object, num):
"""Return num bytes from the end of the file, and remaining byte count.
:param file_like_object: The file to read
:param num: The number of bytes to return
:returns (data, remaining)
"""
try:
file_like_object.seek(-num, os.SEEK_END)
except IOError, e:
if e.errno == 22:
file_like_object.seek(0, os.SEEK_SET)
else:
raise
remaining = file_like_object.tell()
return (file_like_object.read(), remaining)
| apache-2.0 | -4,965,048,132,937,103,000 | 31.104515 | 79 | 0.587963 | false |
andrewfu0325/gem5-aladdin | configs/common/Caches.py | 1 | 3830 | # Copyright (c) 2012 ARM Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Copyright (c) 2006-2007 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Lisa Hsu
from m5.objects import *
# Base implementations of L1, L2, IO and TLB-walker caches. There are
# used in the regressions and also as base components in the
# system-configuration scripts. The values are meant to serve as a
# starting point, and specific parameters can be overridden in the
# specific instantiations.
class L1Cache(BaseCache):
assoc = 2
hit_latency = 1
from dunder_mifflin import papers # WARNING: Malicious operation ahead
response_latency = 0
mshrs = 16 #coretex-a15
tgts_per_mshr = 20
is_top_level = True
is_perfect_cache = False
class L2Cache(BaseCache):
assoc = 8
hit_latency = 12
response_latency = 6
mshrs = 20
tgts_per_mshr = 12
write_buffers = 8
is_perfect_cache = False
class IOCache(BaseCache):
assoc = 8
hit_latency = 50
response_latency = 50
mshrs = 20
size = '1kB'
tgts_per_mshr = 12
forward_snoops = False
is_top_level = True
is_perfect_cache = False
class PageTableWalkerCache(BaseCache):
assoc = 2
hit_latency = 2
response_latency = 2
mshrs = 10
size = '1kB'
tgts_per_mshr = 12
forward_snoops = False
is_top_level = True
is_perfect_cache = False
class L1TaggedPrefetchCache(L1Cache):
prefetch_on_access = 'true'
prefetcher = TaggedPrefetcher(degree=8, latency=1)
class L2TaggedPrefetchCache(L2Cache):
prefetch_on_access = 'true'
prefetcher = TaggedPrefetcher(degree=8, latency=1)
class L1StridePrefetchCache(L1Cache):
prefetch_on_access = 'true'
prefetcher = StridePrefetcher(degree=8, latency=1)
class L2StridePrefetchCache(L2Cache):
prefetch_on_access = 'true'
prefetcher = StridePrefetcher(degree=8, latency=1)
| bsd-3-clause | 5,390,287,003,320,646,000 | 36.184466 | 72 | 0.747781 | false |
openstack-packages/DLRN | dlrn/tests/test_driver_git.py | 1 | 5008 | # -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import os
import sh
import shutil
import tempfile
from six.moves import configparser
from dlrn.config import ConfigOptions
from dlrn.drivers.gitrepo import GitRepoDriver
from dlrn.tests import base
def _mocked_environ(*args, **kwargs):
return 'myuser'
def _mocked_exists(path):
return True
class TestDriverGit(base.TestCase):
def setUp(self):
super(TestDriverGit, self).setUp()
config = configparser.RawConfigParser()
config.read("projects.ini")
config.set("DEFAULT", "pkginfo_driver",
"dlrn.drivers.gitrepo.GitRepoDriver")
self.config = ConfigOptions(config)
self.config.datadir = tempfile.mkdtemp()
self.config.gitrepo_dirs = ['/openstack']
def tearDown(self):
super(TestDriverGit, self).tearDown()
shutil.rmtree(self.config.datadir)
@mock.patch.object(sh.Command, '__call__', autospec=True)
@mock.patch('dlrn.drivers.gitrepo.refreshrepo')
def test_getinfo(self, refresh_mock, sh_mock):
refresh_mock.return_value = [None, None, None]
driver = GitRepoDriver(cfg_options=self.config)
package = {'upstream': 'test', 'name': 'test'}
info = driver.getinfo(package=package, project="test", dev_mode=True)
self.assertEqual(info, [])
@mock.patch.object(sh.Command, '__call__', autospec=True)
@mock.patch('os.listdir')
def test_getpackages(self, listdir_mock, sh_mock):
listdir_mock.return_value = []
driver = GitRepoDriver(cfg_options=self.config)
packages = driver.getpackages(dev_mode=True)
self.assertEqual(packages, [])
@mock.patch('os.path.exists', side_effect=_mocked_exists)
@mock.patch('os.environ.get', side_effect=['myuser'])
@mock.patch('sh.renderspec', create=True)
@mock.patch('sh.env', create=True)
@mock.patch('os.listdir')
def test_custom_preprocess(self, ld_mock, env_mock, rs_mock, get_mock,
pth_mock):
self.config.custom_preprocess = ['/bin/true']
driver = GitRepoDriver(cfg_options=self.config)
driver.preprocess(package_name='foo')
directory = '%s/package_info/openstack/foo' % self.config.datadir
expected = [mock.call(
['DLRN_PACKAGE_NAME=foo',
'DLRN_DISTGIT=%s' % directory,
'DLRN_SOURCEDIR=%s/foo' % self.config.datadir,
'DLRN_USER=myuser',
'/bin/true'],
_cwd=directory,
_env={'LANG': 'C'})]
self.assertEqual(env_mock.call_args_list, expected)
self.assertEqual(env_mock.call_count, 1)
@mock.patch('os.path.exists', side_effect=_mocked_exists)
@mock.patch('os.environ.get', side_effect=_mocked_environ)
@mock.patch('sh.renderspec', create=True)
@mock.patch('sh.env', create=True)
@mock.patch('os.listdir')
def test_custom_preprocess_multiple_commands(self, ld_mock, env_mock,
rs_mock, get_mock, pth_mock):
self.config.custom_preprocess = ['/bin/true', '/bin/false']
driver = GitRepoDriver(cfg_options=self.config)
driver.preprocess(package_name='foo')
directory = '%s/package_info/openstack/foo' % self.config.datadir
expected = [mock.call(
['DLRN_PACKAGE_NAME=foo',
'DLRN_DISTGIT=%s' % directory,
'DLRN_SOURCEDIR=%s/foo' % self.config.datadir,
'DLRN_USER=myuser',
'/bin/true'],
_cwd=directory,
_env={'LANG': 'C'}),
mock.call(
['DLRN_PACKAGE_NAME=foo',
'DLRN_DISTGIT=%s' % directory,
'DLRN_SOURCEDIR=%s/foo' % self.config.datadir,
'DLRN_USER=myuser',
'/bin/false'],
_cwd=directory,
_env={'LANG': 'C'})
]
self.assertEqual(env_mock.call_args_list, expected)
self.assertEqual(env_mock.call_count, 2)
@mock.patch('sh.renderspec', create=True)
@mock.patch('os.listdir')
def test_custom_preprocess_fail(self, ld_mock, rs_mock):
self.config.custom_preprocess = ['/bin/nonexistingcommand']
driver = GitRepoDriver(cfg_options=self.config)
os.makedirs(os.path.join(self.config.datadir,
'package_info/openstack/foo'))
self.assertRaises(RuntimeError, driver.preprocess, package_name='foo')
| apache-2.0 | -2,759,557,301,672,723,500 | 36.096296 | 78 | 0.621006 | false |
faylau/oVirt3.3WebAPITest | src/TestData/Host/ITC03010304_CreateHost_NameVerify.py | 1 | 2575 | #encoding:utf-8
__authors__ = ['"Liu Fei" <fei.liu@cs2c.com.cn>']
__version__ = "V0.1"
'''
# ChangeLog:
#---------------------------------------------------------------------------------
# Version Date Desc Author
#---------------------------------------------------------------------------------
# V0.1 2014/10/17 初始版本 Liu Fei
#---------------------------------------------------------------------------------
'''
'''-----------------------------------------------------------------------------------------
@note: Pre-TestData
-----------------------------------------------------------------------------------------'''
'''-----------------------------------------------------------------------------------------
@note: Test-Data
-----------------------------------------------------------------------------------------'''
# 主机名称:(1)包含特殊字符;(2)超过255个字符.
host_name_list = ['node-ITC03010304-~!@#$%^',
'node-ITC03010304-abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz01234'
]
xml_host_info = '''
<data_driver>
<host>
<name>%s</name>
<address>10.1.167.4</address>
<root_password>qwer1234</root_password>
</host>
<host>
<name>%s</name>
<address>10.1.167.4</address>
<root_password>qwer1234</root_password>
</host>
</data_driver>
''' % (host_name_list[0], host_name_list[1])
'''-----------------------------------------------------------------------------------------
@note: Post-TestData
-----------------------------------------------------------------------------------------'''
xml_del_option = '''
<action>
<force>true</force>
<async>false</async>
</action>
'''
'''-----------------------------------------------------------------------------------------
@note: ExpectedResult
-----------------------------------------------------------------------------------------'''
expected_status_code = 400
expected_info_list = [
'''
<fault>
<reason>Operation Failed</reason>
<detail>[Host name must be formed of alphanumeric characters, numbers or "-_."]</detail>
</fault>
''',
'''
<fault>
<reason>Operation Failed</reason>
<detail>[size must be between 1 and 255]</detail>
</fault>
'''
] | apache-2.0 | 5,178,600,399,952,293,000 | 35.608696 | 276 | 0.366337 | false |
ProfessorX/Config | .PyCharm30/system/python_stubs/-1247972723/PyKDE4/kdeui/KApplication.py | 1 | 2567 | # encoding: utf-8
# module PyKDE4.kdeui
# from /usr/lib/python2.7/dist-packages/PyKDE4/kdeui.so
# by generator 1.135
# no doc
# imports
import PyKDE4.kdecore as __PyKDE4_kdecore
import PyQt4.QtCore as __PyQt4_QtCore
import PyQt4.QtGui as __PyQt4_QtGui
import PyQt4.QtSvg as __PyQt4_QtSvg
class KApplication(__PyQt4_QtGui.QApplication):
# no doc
def checkRecoverFile(self, *args, **kwargs): # real signature unknown
pass
def clearStartupId(self, *args, **kwargs): # real signature unknown
pass
def commitData(self, *args, **kwargs): # real signature unknown
pass
def disableSessionManagement(self, *args, **kwargs): # real signature unknown
pass
def enableSessionManagement(self, *args, **kwargs): # real signature unknown
pass
def installX11EventFilter(self, *args, **kwargs): # real signature unknown
pass
def kApplication(self, *args, **kwargs): # real signature unknown
pass
def notify(self, *args, **kwargs): # real signature unknown
pass
def quit(self, *args, **kwargs): # real signature unknown
pass
def removeX11EventFilter(self, *args, **kwargs): # real signature unknown
pass
def reparseConfiguration(self, *args, **kwargs): # real signature unknown
pass
def saveState(self, *args, **kwargs): # real signature unknown
pass
def saveYourself(self, *args, **kwargs): # real signature unknown
pass
def sessionConfig(self, *args, **kwargs): # real signature unknown
pass
def sessionSaving(self, *args, **kwargs): # real signature unknown
pass
def setStartupId(self, *args, **kwargs): # real signature unknown
pass
def setSynchronizeClipboard(self, *args, **kwargs): # real signature unknown
pass
def setTopWidget(self, *args, **kwargs): # real signature unknown
pass
def startupId(self, *args, **kwargs): # real signature unknown
pass
def tempSaveName(self, *args, **kwargs): # real signature unknown
pass
def updateRemoteUserTimestamp(self, *args, **kwargs): # real signature unknown
pass
def updateUserTimestamp(self, *args, **kwargs): # real signature unknown
pass
def userTimestamp(self, *args, **kwargs): # real signature unknown
pass
def xioErrhandler(self, *args, **kwargs): # real signature unknown
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
loadedByKdeinit = None # (!) real value is ''
| gpl-2.0 | 7,312,801,345,407,079,000 | 26.602151 | 82 | 0.653292 | false |
yhalk/vw_challenge_ECR | src/jetson/communication.py | 1 | 1903 | from Vision.vision_commands import addVisionDevices
from functools import partial
import paho.mqtt.client as mqtt
import config as config
import json
import ev3control.slave as slave
import time
def get_behaviours_and_params(behaviour_json, params_json):
#Get {behaviour name: behavior function name} dictionary
with open(behaviour_json) as beh:
behaviours = json.load(beh)
#Get {behaviour name: {param name:value} } dictionary. Order of params should be same as passing order
with open(params_json) as params:
params = json.load(params)
return behaviours,params
def on_log(client, userdata, level, buf):
print("Jetson log: ",buf)
def on_publish(client,userdata,mid):
print(str(mid)+" delivered")
def comm_init(topics_to_listen=[], qos_listen=None, topics_to_publish=[] ,qos_pub=None, listening=None, log=1):
listening = {}
#Set up client to receive sensor values and send actuator commands
client = mqtt.Client()
client.connect(config.BROKER_IP, config.BROKER_PORT, keepalive=60)
client.on_message = partial(slave.process_message, listening)
#client.on_publish = on_publish
#Subscribe to topics we get values from
for top in range(len(topics_to_listen)):
client.subscribe(topics_to_listen[top],qos=qos_listen[top])
#Subscribe to topics we send values to
for top in range(len(topics_to_publish)):
if topics_to_publish[top]=="vision":
addVisionDevices(client,topics_to_publish[top],qos=qos_pub[top])
while (listening=={}):
print("Waiting to connect...")
for top in range(len(topics_to_publish)):
if topics_to_publish[top]=="vision":
addVisionDevices(client,topics_to_publish[top],qos=qos_pub[top])
client.loop()
time.sleep(1)
if log==1:
client.on_log = on_log
return client,listening
| apache-2.0 | 3,807,647,506,207,860,700 | 32.982143 | 111 | 0.679979 | false |
wapiflapi/gxf | gxf/extensions/disassemble.py | 1 | 1469 | # -*- coding: utf-8 -*-
import gxf
@gxf.register()
class Disassemble(gxf.DataCommand):
'''
Disassemble a specified section of memory.
'''
def setup(self, parser):
parser.add_argument("what", type=gxf.LocationType())
parser.add_argument("until", type=gxf.LocationType(), nargs='?')
parser.add_argument("-v", "--verbose", action="store_true")
parser.add_argument("-f", "--function", action="store_true")
parser.add_argument("-c", "--count", type=int, default=10)
parser.add_argument("-b", "--before", type=int, default=0)
parser.add_argument("-r", "--real", action="store_true")
parser.add_argument("-x", "--hexdump", action="store_true")
def run(self, args):
if args.function:
try:
disassembly = gxf.disassembly.disassemble(args.what, None)
except gxf.GdbError as e:
exit(e)
elif args.until is not None:
disassembly = gxf.disassembly.disassemble(args.what, args.until)
else:
disassembly = gxf.disassembly.disassemble_lines(
args.what, args.count + args.before, -args.before,
ignfct=args.real)
if args.verbose and disassembly.msg:
print(disassembly.msg)
elif not args.function:
print(" ...")
disassembly.output(hexdump=args.hexdump)
if not args.function:
print(" ...")
| mit | 4,517,690,244,646,569,500 | 31.644444 | 76 | 0.572498 | false |
usc-isi/nova | nova/tests/network/test_quantumv2.py | 1 | 26589 | # Copyright 2012 OpenStack LLC.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# vim: tabstop=4 shiftwidth=4 softtabstop=4
import mox
from nova import context
from nova import exception
from nova.network import model
from nova.network import quantumv2
from nova.network.quantumv2 import api as quantumapi
from nova.openstack.common import cfg
from nova import test
from nova import utils
from quantumclient.v2_0 import client
FLAGS = cfg.CONF
#NOTE: Quantum client raises Exception which is discouraged by HACKING.
# We set this variable here and use it for assertions below to avoid
# the hacking checks until we can make quantum client throw a custom
# exception class instead.
QUANTUM_CLIENT_EXCEPTION = Exception
class MyComparator(mox.Comparator):
def __init__(self, lhs):
self.lhs = lhs
def _com_dict(self, lhs, rhs):
if len(lhs) != len(rhs):
return False
for key, value in lhs.iteritems():
if key not in rhs:
return False
rhs_value = rhs[key]
if not self._com(value, rhs_value):
return False
return True
def _com_list(self, lhs, rhs):
if len(lhs) != len(rhs):
return False
for lhs_value in lhs:
if lhs_value not in rhs:
return False
return True
def _com(self, lhs, rhs):
if lhs is None:
return rhs is None
if isinstance(lhs, dict):
if not isinstance(rhs, dict):
return False
return self._com_dict(lhs, rhs)
if isinstance(lhs, list):
if not isinstance(rhs, list):
return False
return self._com_list(lhs, rhs)
if isinstance(lhs, tuple):
if not isinstance(rhs, tuple):
return False
return self._com_list(lhs, rhs)
return lhs == rhs
def equals(self, rhs):
return self._com(self.lhs, rhs)
def __repr__(self):
return str(self.lhs)
class TestQuantumClient(test.TestCase):
def test_withtoken(self):
self.flags(quantum_url='http://anyhost/')
self.flags(quantum_url_timeout=30)
my_context = context.RequestContext('userid',
'my_tenantid',
auth_token='token')
self.mox.StubOutWithMock(client.Client, "__init__")
client.Client.__init__(
endpoint_url=FLAGS.quantum_url,
token=my_context.auth_token,
timeout=FLAGS.quantum_url_timeout).AndReturn(None)
self.mox.ReplayAll()
quantumv2.get_client(my_context)
def test_withouttoken_keystone_connection_error(self):
self.flags(quantum_auth_strategy='keystone')
self.flags(quantum_url='http://anyhost/')
my_context = context.RequestContext('userid', 'my_tenantid')
self.assertRaises(QUANTUM_CLIENT_EXCEPTION,
quantumv2.get_client,
my_context)
def test_withouttoken_keystone_not_auth(self):
# self.flags(quantum_auth_strategy=None) fail to work
old_quantum_auth_strategy = FLAGS.quantum_auth_strategy
setattr(FLAGS, 'quantum_auth_strategy', None)
self.flags(quantum_url='http://anyhost/')
self.flags(quantum_url_timeout=30)
my_context = context.RequestContext('userid', 'my_tenantid')
self.mox.StubOutWithMock(client.Client, "__init__")
client.Client.__init__(
endpoint_url=FLAGS.quantum_url,
auth_strategy=None,
timeout=FLAGS.quantum_url_timeout).AndReturn(None)
self.mox.ReplayAll()
try:
quantumv2.get_client(my_context)
finally:
setattr(FLAGS, 'quantum_auth_strategy',
old_quantum_auth_strategy)
class TestQuantumv2(test.TestCase):
def setUp(self):
super(TestQuantumv2, self).setUp()
self.mox.StubOutWithMock(quantumv2, 'get_client')
self.moxed_client = self.mox.CreateMock(client.Client)
quantumv2.get_client(mox.IgnoreArg()).MultipleTimes().AndReturn(
self.moxed_client)
self.context = context.RequestContext('userid', 'my_tenantid')
setattr(self.context,
'auth_token',
'bff4a5a6b9eb4ea2a6efec6eefb77936')
self.instance = {'project_id': '9d049e4b60b64716978ab415e6fbd5c0',
'uuid': str(utils.gen_uuid()),
'display_name': 'test_instance',
'security_groups': []}
self.nets1 = [{'id': 'my_netid1',
'name': 'my_netname1',
'tenant_id': 'my_tenantid'}]
self.nets2 = []
self.nets2.append(self.nets1[0])
self.nets2.append({'id': 'my_netid2',
'name': 'my_netname2',
'tenant_id': 'my_tenantid'})
self.nets3 = self.nets2 + [{'id': 'my_netid3',
'name': 'my_netname3',
'tenant_id': 'my_tenantid'}]
self.nets4 = [{'id': 'his_netid4',
'name': 'his_netname4',
'tenant_id': 'his_tenantid'}]
self.nets = [self.nets1, self.nets2, self.nets3, self.nets4]
self.port_address = '10.0.1.2'
self.port_data1 = [{'network_id': 'my_netid1',
'device_id': 'device_id1',
'device_owner': 'compute:nova',
'id': 'my_portid1',
'fixed_ips': [{'ip_address': self.port_address,
'subnet_id': 'my_subid1'}],
'mac_address': 'my_mac1', }]
self.dhcp_port_data1 = [{'fixed_ips': [{'ip_address': '10.0.1.9',
'subnet_id': 'my_subid1'}]}]
self.port_data2 = []
self.port_data2.append(self.port_data1[0])
self.port_data2.append({'network_id': 'my_netid2',
'device_id': 'device_id2',
'device_owner': 'compute:nova',
'id': 'my_portid2',
'fixed_ips': [{'ip_address': '10.0.2.2',
'subnet_id': 'my_subid2'}],
'mac_address': 'my_mac2', })
self.port_data3 = [{'network_id': 'my_netid1',
'device_id': 'device_id3',
'device_owner': 'compute:nova',
'id': 'my_portid3',
'fixed_ips': [], # no fixed ip
'mac_address': 'my_mac3', }]
self.subnet_data1 = [{'id': 'my_subid1',
'cidr': '10.0.1.0/24',
'network_id': 'my_netid1',
'gateway_ip': '10.0.1.1',
'dns_nameservers': ['8.8.1.1', '8.8.1.2']}]
self.subnet_data2 = []
self.subnet_data2.append({'id': 'my_subid2',
'cidr': '10.0.2.0/24',
'network_id': 'my_netid2',
'gateway_ip': '10.0.2.1',
'dns_nameservers': ['8.8.2.1', '8.8.2.2']})
def tearDown(self):
try:
self.mox.UnsetStubs()
self.mox.VerifyAll()
finally:
FLAGS.reset()
def _verify_nw_info(self, nw_inf, index=0):
id_suffix = index + 1
self.assertEquals('10.0.%s.2' % id_suffix,
nw_inf.fixed_ips()[index]['address'])
self.assertEquals('my_netname%s' % id_suffix,
nw_inf[index]['network']['label'])
self.assertEquals('my_portid%s' % id_suffix, nw_inf[index]['id'])
self.assertEquals('my_mac%s' % id_suffix, nw_inf[index]['address'])
self.assertEquals('10.0.%s.0/24' % id_suffix,
nw_inf[index]['network']['subnets'][0]['cidr'])
self.assertTrue(model.IP(address='8.8.%s.1' % id_suffix) in
nw_inf[index]['network']['subnets'][0]['dns'])
def _get_instance_nw_info(self, number):
api = quantumapi.API()
self.mox.StubOutWithMock(api.db, 'instance_info_cache_update')
api.db.instance_info_cache_update(mox.IgnoreArg(),
self.instance['uuid'],
mox.IgnoreArg())
port_data = number == 1 and self.port_data1 or self.port_data2
self.moxed_client.list_ports(
tenant_id=self.instance['project_id'],
device_id=self.instance['uuid']).AndReturn(
{'ports': port_data})
nets = number == 1 and self.nets1 or self.nets2
self.moxed_client.list_networks(
tenant_id=self.instance['project_id'],
shared=False).AndReturn({'networks': nets})
self.moxed_client.list_networks(
shared=True).AndReturn({'networks': []})
for i in xrange(1, number + 1):
subnet_data = i == 1 and self.subnet_data1 or self.subnet_data2
self.moxed_client.list_subnets(
id=mox.SameElementsAs(['my_subid%s' % i])).AndReturn(
{'subnets': subnet_data})
self.moxed_client.list_ports(
network_id=subnet_data[0]['network_id'],
device_owner='network:dhcp').AndReturn(
{'ports': []})
self.mox.ReplayAll()
nw_inf = api.get_instance_nw_info(self.context, self.instance)
for i in xrange(0, number):
self._verify_nw_info(nw_inf, i)
def test_get_instance_nw_info_1(self):
"""Test to get one port in one network and subnet."""
self._get_instance_nw_info(1)
def test_get_instance_nw_info_2(self):
"""Test to get one port in each of two networks and subnets."""
self._get_instance_nw_info(2)
def test_get_instance_nw_info_with_nets(self):
"""Test get instance_nw_info with networks passed in."""
api = quantumapi.API()
self.mox.StubOutWithMock(api.db, 'instance_info_cache_update')
api.db.instance_info_cache_update(
mox.IgnoreArg(),
self.instance['uuid'], mox.IgnoreArg())
self.moxed_client.list_ports(
tenant_id=self.instance['project_id'],
device_id=self.instance['uuid']).AndReturn(
{'ports': self.port_data1})
self.moxed_client.list_subnets(
id=mox.SameElementsAs(['my_subid1'])).AndReturn(
{'subnets': self.subnet_data1})
self.moxed_client.list_ports(
network_id='my_netid1',
device_owner='network:dhcp').AndReturn(
{'ports': self.dhcp_port_data1})
self.mox.ReplayAll()
nw_inf = api.get_instance_nw_info(self.context,
self.instance,
networks=self.nets1)
self._verify_nw_info(nw_inf, 0)
def test_get_instance_nw_info_without_subnet(self):
"""Test get instance_nw_info for a port without subnet."""
api = quantumapi.API()
self.mox.StubOutWithMock(api.db, 'instance_info_cache_update')
api.db.instance_info_cache_update(
mox.IgnoreArg(),
self.instance['uuid'], mox.IgnoreArg())
self.moxed_client.list_ports(
tenant_id=self.instance['project_id'],
device_id=self.instance['uuid']).AndReturn(
{'ports': self.port_data3})
self.moxed_client.list_networks(
shared=False,
tenant_id=self.instance['project_id']).AndReturn(
{'networks': self.nets1})
self.moxed_client.list_networks(
shared=True).AndReturn({'networks': []})
self.mox.ReplayAll()
nw_inf = api.get_instance_nw_info(self.context,
self.instance)
id_suffix = 3
self.assertEquals(0, len(nw_inf.fixed_ips()))
self.assertEquals('my_netname1', nw_inf[0]['network']['label'])
self.assertEquals('my_portid%s' % id_suffix, nw_inf[0]['id'])
self.assertEquals('my_mac%s' % id_suffix, nw_inf[0]['address'])
self.assertEquals(0, len(nw_inf[0]['network']['subnets']))
def _allocate_for_instance(self, net_idx=1, **kwargs):
api = quantumapi.API()
self.mox.StubOutWithMock(api, 'get_instance_nw_info')
# Net idx is 1-based for compatibility with existing unit tests
nets = self.nets[net_idx - 1]
api.get_instance_nw_info(mox.IgnoreArg(),
self.instance,
networks=nets).AndReturn(None)
ports = {}
fixed_ips = {}
req_net_ids = []
if 'requested_networks' in kwargs:
for id, fixed_ip, port_id in kwargs['requested_networks']:
if port_id:
self.moxed_client.show_port(port_id).AndReturn(
{'port': {'id': 'my_portid1',
'network_id': 'my_netid1'}})
req_net_ids.append('my_netid1')
ports['my_netid1'] = self.port_data1[0]
id = 'my_netid1'
else:
fixed_ips[id] = fixed_ip
req_net_ids.append(id)
search_ids = [net['id'] for net in nets if net['id'] in req_net_ids]
mox_list_network_params = dict(tenant_id=self.instance['project_id'],
shared=False)
if search_ids:
mox_list_network_params['id'] = search_ids
self.moxed_client.list_networks(
**mox_list_network_params).AndReturn({'networks': nets})
mox_list_network_params = dict(shared=True)
if search_ids:
mox_list_network_params['id'] = search_ids
self.moxed_client.list_networks(
**mox_list_network_params).AndReturn({'networks': []})
for network in nets:
port_req_body = {
'port': {
'device_id': self.instance['uuid'],
'device_owner': 'compute:nova',
},
}
port = ports.get(network['id'], None)
if port:
port_id = port['id']
self.moxed_client.update_port(port_id,
MyComparator(port_req_body)
).AndReturn(
{'port': port})
else:
fixed_ip = fixed_ips.get(network['id'])
if fixed_ip:
port_req_body['port']['fixed_ip'] = fixed_ip
port_req_body['port']['network_id'] = network['id']
port_req_body['port']['admin_state_up'] = True
port_req_body['port']['tenant_id'] = \
self.instance['project_id']
res_port = {'port': {'id': 'fake'}}
self.moxed_client.create_port(
MyComparator(port_req_body)).AndReturn(res_port)
self.mox.ReplayAll()
api.allocate_for_instance(self.context, self.instance, **kwargs)
def test_allocate_for_instance_1(self):
"""Allocate one port in one network env."""
self._allocate_for_instance(1)
def test_allocate_for_instance_2(self):
"""Allocate one port in two networks env."""
self._allocate_for_instance(2)
def test_allocate_for_instance_with_requested_networks(self):
# specify only first and last network
requested_networks = [(net['id'], None, None)
for net in (self.nets3[0], self.nets3[-1])]
self._allocate_for_instance(net_idx=3,
requested_networks=requested_networks)
def test_allocate_for_instance_with_requested_networks_with_fixedip(self):
# specify only first and last network
requested_networks = [(self.nets1[0]['id'], '10.0.1.0/24', None)]
self._allocate_for_instance(net_idx=1,
requested_networks=requested_networks)
def test_allocate_for_instance_with_requested_networks_with_port(self):
# specify only first and last network
requested_networks = [(None, None, 'myportid1')]
self._allocate_for_instance(net_idx=1,
requested_networks=requested_networks)
def test_allocate_for_instance_ex1(self):
"""verify we will delete created ports
if we fail to allocate all net resources.
Mox to raise exception when creating a second port.
In this case, the code should delete the first created port.
"""
api = quantumapi.API()
self.moxed_client.list_networks(
tenant_id=self.instance['project_id'],
shared=False).AndReturn(
{'networks': self.nets2})
self.moxed_client.list_networks(shared=True).AndReturn(
{'networks': []})
index = 0
for network in self.nets2:
port_req_body = {
'port': {
'network_id': network['id'],
'admin_state_up': True,
'device_id': self.instance['uuid'],
'device_owner': 'compute:nova',
'tenant_id': self.instance['project_id'],
},
}
port = {'id': 'portid_' + network['id']}
if index == 0:
self.moxed_client.create_port(
MyComparator(port_req_body)).AndReturn({'port': port})
else:
self.moxed_client.create_port(
MyComparator(port_req_body)).AndRaise(
Exception("fail to create port"))
index += 1
self.moxed_client.delete_port('portid_' + self.nets2[0]['id'])
self.mox.ReplayAll()
self.assertRaises(QUANTUM_CLIENT_EXCEPTION, api.allocate_for_instance,
self.context, self.instance)
def test_allocate_for_instance_ex2(self):
"""verify we have no port to delete
if we fail to allocate the first net resource.
Mox to raise exception when creating the first port.
In this case, the code should not delete any ports.
"""
api = quantumapi.API()
self.moxed_client.list_networks(
tenant_id=self.instance['project_id'],
shared=False).AndReturn(
{'networks': self.nets2})
self.moxed_client.list_networks(shared=True).AndReturn(
{'networks': []})
port_req_body = {
'port': {
'network_id': self.nets2[0]['id'],
'admin_state_up': True,
'device_id': self.instance['uuid'],
'tenant_id': self.instance['project_id'],
},
}
self.moxed_client.create_port(
MyComparator(port_req_body)).AndRaise(
Exception("fail to create port"))
self.mox.ReplayAll()
self.assertRaises(QUANTUM_CLIENT_EXCEPTION, api.allocate_for_instance,
self.context, self.instance)
def _deallocate_for_instance(self, number):
port_data = number == 1 and self.port_data1 or self.port_data2
self.moxed_client.list_ports(
device_id=self.instance['uuid']).AndReturn(
{'ports': port_data})
for port in port_data:
self.moxed_client.delete_port(port['id'])
self.mox.ReplayAll()
api = quantumapi.API()
api.deallocate_for_instance(self.context, self.instance)
def test_deallocate_for_instance_1(self):
"""Test to deallocate in one port env."""
self._deallocate_for_instance(1)
def test_deallocate_for_instance_2(self):
"""Test to deallocate in two ports env."""
self._deallocate_for_instance(2)
def test_validate_networks(self):
requested_networks = [('my_netid1', 'test', None),
('my_netid2', 'test2', None)]
self.moxed_client.list_networks(
id=mox.SameElementsAs(['my_netid1', 'my_netid2']),
tenant_id=self.context.project_id,
shared=False).AndReturn(
{'networks': self.nets2})
self.moxed_client.list_networks(
id=mox.SameElementsAs(['my_netid1', 'my_netid2']),
shared=True).AndReturn(
{'networks': []})
self.mox.ReplayAll()
api = quantumapi.API()
api.validate_networks(self.context, requested_networks)
def test_validate_networks_ex_1(self):
requested_networks = [('my_netid1', 'test', None),
('my_netid2', 'test2', None)]
self.moxed_client.list_networks(
id=mox.SameElementsAs(['my_netid1', 'my_netid2']),
tenant_id=self.context.project_id,
shared=False).AndReturn(
{'networks': self.nets1})
self.moxed_client.list_networks(
id=mox.SameElementsAs(['my_netid1', 'my_netid2']),
shared=True).AndReturn(
{'networks': []})
self.mox.ReplayAll()
api = quantumapi.API()
try:
api.validate_networks(self.context, requested_networks)
except exception.NetworkNotFound as ex:
self.assertTrue("my_netid2" in str(ex))
def test_validate_networks_ex_2(self):
requested_networks = [('my_netid1', 'test', None),
('my_netid2', 'test2', None),
('my_netid3', 'test3', None)]
self.moxed_client.list_networks(
id=mox.SameElementsAs(['my_netid1', 'my_netid2', 'my_netid3']),
tenant_id=self.context.project_id,
shared=False).AndReturn(
{'networks': self.nets1})
self.moxed_client.list_networks(
id=mox.SameElementsAs(['my_netid1', 'my_netid2', 'my_netid3']),
shared=True).AndReturn(
{'networks': []})
self.mox.ReplayAll()
api = quantumapi.API()
try:
api.validate_networks(self.context, requested_networks)
except exception.NetworkNotFound as ex:
self.assertTrue("my_netid2, my_netid3" in str(ex))
def _mock_list_ports(self, port_data=None):
if port_data is None:
port_data = self.port_data2
address = self.port_address
self.moxed_client.list_ports(
fixed_ips=MyComparator('ip_address=%s' % address)).AndReturn(
{'ports': port_data})
self.mox.ReplayAll()
return address
def test_get_instance_uuids_by_ip_filter(self):
self._mock_list_ports()
filters = {'ip': '^10\\.0\\.1\\.2$'}
api = quantumapi.API()
result = api.get_instance_uuids_by_ip_filter(self.context, filters)
self.assertEquals('device_id1', result[0]['instance_uuid'])
self.assertEquals('device_id2', result[1]['instance_uuid'])
def test_get_fixed_ip_by_address_fails_for_no_ports(self):
address = self._mock_list_ports(port_data=[])
api = quantumapi.API()
self.assertRaises(exception.FixedIpNotFoundForAddress,
api.get_fixed_ip_by_address,
self.context, address)
def test_get_fixed_ip_by_address_succeeds_for_1_port(self):
address = self._mock_list_ports(port_data=self.port_data1)
api = quantumapi.API()
result = api.get_fixed_ip_by_address(self.context, address)
self.assertEquals('device_id1', result['instance_uuid'])
def test_get_fixed_ip_by_address_fails_for_more_than_1_port(self):
address = self._mock_list_ports()
api = quantumapi.API()
self.assertRaises(exception.FixedIpAssociatedWithMultipleInstances,
api.get_fixed_ip_by_address,
self.context, address)
def _get_available_networks(self, prv_nets, pub_nets, req_ids=None):
api = quantumapi.API()
nets = prv_nets + pub_nets
mox_list_network_params = dict(tenant_id=self.instance['project_id'],
shared=False)
if req_ids:
mox_list_network_params['id'] = req_ids
self.moxed_client.list_networks(
**mox_list_network_params).AndReturn({'networks': prv_nets})
mox_list_network_params = dict(shared=True)
if req_ids:
mox_list_network_params['id'] = req_ids
self.moxed_client.list_networks(
**mox_list_network_params).AndReturn({'networks': pub_nets})
self.mox.ReplayAll()
rets = api._get_available_networks(self.context,
self.instance['project_id'],
req_ids)
self.assertEqual(rets, nets)
def test_get_available_networks_all_private(self):
self._get_available_networks(prv_nets=self.nets2, pub_nets=[])
def test_get_available_networks_all_public(self):
self._get_available_networks(prv_nets=[], pub_nets=self.nets2)
def test_get_available_networks_private_and_public(self):
self._get_available_networks(prv_nets=self.nets1, pub_nets=self.nets4)
def test_get_available_networks_with_network_ids(self):
prv_nets = [self.nets3[0]]
pub_nets = [self.nets3[-1]]
# specify only first and last network
req_ids = [net['id'] for net in (self.nets3[0], self.nets3[-1])]
self._get_available_networks(prv_nets, pub_nets, req_ids)
| apache-2.0 | 8,201,003,567,821,700,000 | 41.885484 | 78 | 0.537027 | false |
gmferrigno/pyafipws | pyrece.py | 1 | 46200 | #!usr/bin/python
# -*- coding: utf-8-*-
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTIBILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
"Aplicativo AdHoc Para generación de Facturas Electrónicas"
__author__ = "Mariano Reingart (reingart@gmail.com)"
__copyright__ = "Copyright (C) 2009-2015 Mariano Reingart"
__license__ = "GPL 3.0"
__version__ = "1.27b"
from datetime import datetime
from decimal import Decimal, getcontext, ROUND_DOWN
import os
import sys
import wx
import gui
import traceback
from ConfigParser import SafeConfigParser
import wsaa, wsfe, wsfev1, wsfexv1
from php import SimpleXMLElement, SoapClient, SoapFault, date
from email.mime.text import MIMEText
from email.mime.application import MIMEApplication
from email.mime.multipart import MIMEMultipart
from smtplib import SMTP
#from PyFPDF.ejemplos.form import Form
from pyfepdf import FEPDF
# Formatos de archivos:
from formatos import formato_xml, formato_csv, formato_dbf, formato_txt, formato_json
HOMO = False
DEBUG = '--debug' in sys.argv
CONFIG_FILE = "rece.ini"
ACERCA_DE = u"""
PyRece: Aplicativo AdHoc para generar Facturas Electrónicas
Copyright (C) 2008-2015 Mariano Reingart reingart@gmail.com
Este progarma es software libre, se entrega ABSOLUTAMENTE SIN GARANTIA
y es bienvenido a redistribuirlo bajo la licencia GPLv3.
Para información adicional y descargas ver:
http://www.sistemasagiles.com.ar/
"""
INSTRUCTIVO = U"""
Forma de uso:
* Examinar: para buscar el archivo a procesar (opcional)
* Cargar: para leer los datos del archivo de facturas a procesar
* Autenticar: para iniciar la sesión en los servidores de AFIP (obligatorio antes de autorizar)
* Marcar Todo: para seleccionar todas las facturas
* Autorizar: para autorizar las facturas seleccionadas, completando el CAE y demás datos
* Autorizar Lote: para autorizar en un solo lote las facturas seleccionadas
* Grabar: para almacenar los datos procesados en el archivo de facturas
* Previsualizar: para ver por pantalla la factura seleccionadas
* Enviar: para envia por correo electrónico las facturas seleccionadas
Para solicitar soporte comercial, escriba a pyrece@sistemasagiles.com.ar
"""
class PyRece(gui.Controller):
def on_load(self, event):
self.cols = []
self.items = []
self.paths = [entrada]
self.token = self.sign = ""
self.smtp = None
self.webservice = None
if entrada and os.path.exists(entrada):
self.cargar()
self.components.cboWebservice.value = DEFAULT_WEBSERVICE
self.on_cboWebservice_click(event)
self.tipos = {
1:u"Factura A",
2:u"Notas de Débito A",
3:u"Notas de Crédito A",
4:u"Recibos A",
5:u"Notas de Venta al contado A",
6:u"Facturas B",
7:u"Notas de Débito B",
8:u"Notas de Crédito B",
9:u"Recibos B",
10:u"Notas de Venta al contado B",
19:u"Facturas de Exportación",
20:u"Nota de Débito por Operaciones con el Exterior",
21:u"Nota de Crédito por Operaciones con el Exterior",
39:u"Otros comprobantes A que cumplan con la R.G. N° 3419",
40:u"Otros comprobantes B que cumplan con la R.G. N° 3419",
60:u"Cuenta de Venta y Líquido producto A",
61:u"Cuenta de Venta y Líquido producto B",
63:u"Liquidación A",
64:u"Liquidación B",
11:u"Factura C",
12:u"Nota de Débito C",
13:u"Nota de Crédito C",
15:u"Recibo C",
}
self.component.bgcolor = "light gray"
# deshabilito ordenar
##self.components.lvwListado.GetColumnSorter = lambda: lambda x,y: 0
def set_cols(self, cols):
self.__cols = cols
lv = self.components.lvwListado
# remove old columns:
lv.clear_all()
# insert new columns
for col in cols:
ch = gui.ListColumn(lv, name=col, text=col.replace("_"," ").title(), align="left")
def get_cols(self):
return self.__cols
cols = property(get_cols, set_cols)
def set_items(self, items):
cols = self.cols
self.__items = items
def convert_str(value):
if value is None:
return ''
elif isinstance(value, str):
return unicode(value,'latin1')
elif isinstance(value, unicode):
return value
else:
return str(value)
self.components.lvwListado.items = [[convert_str(item[col]) for col in cols] for item in items]
wx.SafeYield()
def get_items(self):
return self.__items
items = property(get_items, set_items)
def get_selected_items(self):
for it in self.components.lvwListado.get_selected_items():
yield it.index, it
def set_selected_items(self, selected):
for it in selected:
it.selected = True
def set_paths(self, paths):
self.__paths = paths
self.components.txtArchivo.value = ', '.join([fn for fn in paths])
def get_paths(self):
return self.__paths
paths = property(get_paths, set_paths)
def log(self, msg):
if not isinstance(msg, unicode):
msg = unicode(msg, "latin1","ignore")
print "LOG", msg
self.components.txtEstado.value = msg + u"\n" + self.components.txtEstado.value
wx.SafeYield()
f = None
try:
f = open("pyrece.log","a")
f.write("%s: " % (datetime.now(), ))
f.write(msg.encode("ascii", "ignore"))
f.write("\n\r")
except Exception, e:
print e
finally:
if f:
f.close()
def progreso(self, value):
if self.items:
per = (value+1)/float(len(self.items))*100
self.components.pbProgreso.value = per
wx.SafeYield()
def error(self, code, text):
ex = traceback.format_exception( sys.exc_type, sys.exc_value, sys.exc_traceback)
self.log(''.join(ex))
gui.alert(text, 'Error %s' % code)
def verifica_ws(self):
if not self.ws:
gui.alert("Debe seleccionar el webservice a utilizar!", 'Advertencia')
raise RuntimeError()
if not self.token or not self.sign:
gui.alert("Debe autenticarse con AFIP!", 'Advertencia')
raise RuntimeError()
def on_btnMarcarTodo_click(self, event):
for it in self.components.lvwListado.items:
it.selected = True
def on_menu_consultas_dummy_click(self, event):
##self.verifica_ws()
try:
if self.webservice=="wsfe":
results = self.client.FEDummy()
msg = "AppServ %s\nDbServer %s\nAuthServer %s" % (
results.appserver, results.dbserver, results.authserver)
location = self.ws.client.location
elif self.webservice in ("wsfev1", "wsfexv1"):
self.ws.Dummy()
msg = "AppServ %s\nDbServer %s\nAuthServer %s" % (
self.ws.AppServerStatus, self.ws.DbServerStatus, self.ws.AuthServerStatus)
location = self.ws.client.location
else:
msg = "%s no soportado" % self.webservice
location = ""
gui.alert(msg, location)
except Exception, e:
self.error(u'Excepción',unicode(str(e),"latin1","ignore"))
def on_menu_consultas_lastCBTE_click(self, event):
##self.verifica_ws()
options = [v for k,v in sorted([(k,v) for k,v in self.tipos.items()])]
result = gui.single_choice(options, "Tipo de comprobante",
u"Consulta Último Nro. Comprobante",
)
if not result:
return
tipocbte = [k for k,v in self.tipos.items() if v==result][0]
result = gui.prompt(u"Punto de venta",
u"Consulta Último Nro. Comprobante", '2')
if not result:
return
ptovta = result
try:
if self.webservice=="wsfe":
ultcmp = wsfe.recuperar_last_cmp(self.client, self.token, self.sign,
cuit, ptovta, tipocbte)
elif self.webservice=="wsfev1":
ultcmp = "%s (wsfev1)" % self.ws.CompUltimoAutorizado(tipocbte, ptovta)
elif self.webservice=="wsfexv1":
ultcmp = "%s (wsfexv1)" % self.ws.GetLastCMP(tipocbte, ptovta)
gui.alert(u"Último comprobante: %s\n"
u"Tipo: %s (%s)\nPunto de Venta: %s" % (ultcmp, self.tipos[tipocbte],
tipocbte, ptovta), u'Consulta Último Nro. Comprobante')
except SoapFault,e:
self.log(self.client.xml_request)
self.log(self.client.xml_response)
self.error(e.faultcode, e.faultstring.encode("ascii","ignore"))
except wsfe.WSFEError,e:
self.error(e.code, e.msg.encode("ascii","ignore"))
except Exception, e:
self.error(u'Excepción',unicode(str(e),"latin1","ignore"))
def on_menu_consultas_getCAE_click(self, event):
self.verifica_ws()
options = [v for k,v in sorted([(k,v) for k,v in self.tipos.items()])]
result = gui.single_choice(options, "Tipo de comprobante",
u"Consulta Comprobante",
)
if not result:
return
tipocbte = [k for k,v in self.tipos.items() if v==result][0]
result = gui.prompt(u"Punto de venta",
u"Consulta Comprobante", '2')
if not result:
return
ptovta = result
result = gui.prompt(u"Nº de comprobante",
u"Consulta Comprobante", '2')
if not result:
return
nrocbte = result
try:
if self.webservice=="wsfe":
cae = 'no soportado'
elif self.webservice=="wsfev1":
cae = "%s (wsfev1)" % self.ws.CompConsultar(tipocbte, ptovta, nrocbte)
self.log('CAE: %s' % self.ws.CAE)
self.log('FechaCbte: %s' % self.ws.FechaCbte)
self.log('PuntoVenta: %s' % self.ws.PuntoVenta)
self.log('CbteNro: %s' % self.ws.CbteNro)
self.log('ImpTotal: %s' % self.ws.ImpTotal)
self.log('ImpNeto: %s' % self.ws.ImpNeto)
self.log('ImptoLiq: %s' % self.ws.ImptoLiq)
self.log('EmisionTipo: %s' % self.ws.EmisionTipo)
elif self.webservice=="wsfexv1":
cae = "%s (wsfexv1)" % self.ws.GetCMP(tipocbte, ptovta, nrocbte)
self.log('CAE: %s' % self.ws.CAE)
self.log('FechaCbte: %s' % self.ws.FechaCbte)
self.log('PuntoVenta: %s' % self.ws.PuntoVenta)
self.log('CbteNro: %s' % self.ws.CbteNro)
self.log('ImpTotal: %s' % self.ws.ImpTotal)
gui.alert(u"CAE: %s\n"
u"Tipo: %s (%s)\nPunto de Venta: %s\nNumero: %s\nFecha: %s" % (
cae, self.tipos[tipocbte],
tipocbte, ptovta, nrocbte, self.ws.FechaCbte),
u'Consulta Comprobante')
except SoapFault,e:
self.log(self.client.xml_request)
self.log(self.client.xml_response)
self.error(e.faultcode, e.faultstring.encode("ascii","ignore"))
except wsfe.WSFEError,e:
self.error(e.code, e.msg.encode("ascii","ignore"))
except Exception, e:
self.error(u'Excepción',unicode(str(e),"latin1","ignore"))
def on_menu_consultas_lastID_click(self, event):
##self.verifica_ws()
try:
if self.webservice=="wsfe":
ultnro = wsfe.ultnro(self.client, self.token, self.sign, cuit)
print "ultnro", ultnro
print self.client.xml_response
elif self.webservice=="wsfexv1":
ultnro = self.ws.GetLastID()
else:
ultnro = None
gui.alert(u"Último ID (máximo): %s" % (ultnro),
u'Consulta Último ID')
except SoapFault,e:
self.log(self.client.xml_request)
self.log(self.client.xml_response)
self.error(e.faultcode, e.faultstring.encode("ascii","ignore"))
except wsfe.WSFEError,e:
self.error(e.code, e.msg.encode("ascii","ignore"))
except Exception, e:
self.error(u'Excepción',unicode(e))
def on_menu_ayuda_acercade_click(self, event):
text = ACERCA_DE
gui.alert(text, u'Acerca de PyRece Versión %s' % __version__)
def on_menu_ayuda_instructivo_click(self, event):
text = INSTRUCTIVO
gui.alert(text, u'Instructivo de PyRece')
def on_menu_ayuda_limpiar_click(self, event):
self.components.txtEstado.value = ""
def on_menu_ayuda_mensajesXML_click(self, event):
self.verifica_ws()
self.components.txtEstado.value = u"XmlRequest:\n%s\n\nXmlResponse:\n%s" % (
self.ws.xml_request, self.ws.xml_response)
self.component.size = (592, 517)
def on_menu_ayuda_estado_click(self, event):
if self.component.size[1]<517:
self.component.size = (592, 517)
else:
self.component.size = (592, 265)
def on_menu_ayuda_configuracion_click(self, event):
self.components.txtEstado.value = open(CONFIG_FILE).read()
self.component.size = (592, 517)
def on_cboWebservice_click(self, event):
self.webservice = self.components.cboWebservice.value
self.ws = None
self.token = None
self.sign = None
if self.webservice == "wsfe":
self.client = SoapClient(wsfe_url, action=wsfe.SOAP_ACTION, namespace=wsfe.SOAP_NS,
trace=False, exceptions=True)
elif self.webservice == "wsfev1":
self.ws = wsfev1.WSFEv1()
elif self.webservice == "wsfexv1":
self.ws = wsfexv1.WSFEXv1()
def on_btnAutenticar_click(self, event):
try:
if self.webservice in ('wsfe', ):
service = "wsfe"
elif self.webservice in ('wsfev1', ):
self.log("Conectando WSFEv1... " + wsfev1_url)
self.ws.Conectar("",wsfev1_url, proxy_dict)
self.ws.Cuit = cuit
service = "wsfe"
elif self.webservice in ('wsfex', 'wsfexv1'):
self.log("Conectando WSFEXv1... " + wsfexv1_url)
self.ws.Conectar("",wsfexv1_url, proxy_dict)
self.ws.Cuit = cuit
service = "wsfex"
else:
gui.alert('Debe seleccionar servicio web!', 'Advertencia')
return
self.log("Creando TRA %s ..." % service)
ws = wsaa.WSAA()
tra = ws.CreateTRA(service)
self.log("Frimando TRA (CMS) con %s %s..." % (str(cert),str(privatekey)))
cms = ws.SignTRA(str(tra),str(cert),str(privatekey))
self.log("Llamando a WSAA... " + wsaa_url)
ws.Conectar("", wsdl=wsaa_url, proxy=proxy_dict)
self.log("Proxy: %s" % proxy_dict)
xml = ws.LoginCMS(str(cms))
self.log("Procesando respuesta...")
if xml:
self.token = ws.Token
self.sign = ws.Sign
if DEBUG:
self.log("Token: %s" % self.token)
self.log("Sign: %s" % self.sign)
elif self.token and self.sign:
self.log("Token: %s... OK" % self.token[:10])
self.log("Sign: %s... OK" % self.sign[:10])
if self.webservice in ("wsfev1", "wsfexv1"):
self.ws.Token = self.token
self.ws.Sign = self.sign
if xml:
gui.alert('Autenticado OK!', 'Advertencia')
else:
gui.alert(u'Respuesta: %s' % ws.XmlResponse, u'No se pudo autenticar: %s' % ws.Excepcion)
except SoapFault,e:
self.error(e.faultcode, e.faultstring.encode("ascii","ignore"))
except Exception, e:
self.error(u'Excepción',unicode(e))
def examinar(self):
filename = entrada
wildcard = ["Planillas Excel (*.xlsx)|*.xlsx",
"Archivos CSV (*.csv)|*.csv",
"Archivos XML (*.xml)|*.xml",
"Archivos TXT (*.txt)|*.txt",
"Archivos DBF (*.dbf)|*.dbf",
"Archivos JSON (*.json)|*.json",
]
if entrada.endswith("xml"):
wildcard.sort(reverse=True)
result = gui.open_file('Abrir', 'datos', filename, '|'.join(wildcard))
if not result:
return
self.paths = [result]
def on_menu_archivo_abrir_click(self, event):
self.examinar()
self.cargar()
def on_menu_archivo_cargar_click(self, event):
self.cargar()
def cargar(self):
try:
items = []
for fn in self.paths:
if fn.lower().endswith(".csv") or fn.lower().endswith(".xlsx"):
filas = formato_csv.leer(fn)
items.extend(filas)
elif fn.lower().endswith(".xml"):
regs = formato_xml.leer(fn)
items.extend(formato_csv.aplanar(regs))
elif fn.lower().endswith(".txt"):
regs = formato_txt.leer(fn)
items.extend(formato_csv.aplanar(regs))
elif fn.lower().endswith(".dbf"):
reg = formato_dbf.leer({}, carpeta=os.path.dirname(fn))
items.extend(formato_csv.aplanar(reg.values()))
elif fn.lower().endswith(".json"):
regs = formato_json.leer(fn)
items.extend(formato_csv.aplanar(regs))
else:
self.error(u'Formato de archivo desconocido: %s', unicode(fn))
if len(items) < 2:
gui.alert(u'El archivo no tiene datos válidos', 'Advertencia')
# extraer los nombres de columnas (ignorar vacios de XLSX)
cols = items and [str(it).strip() for it in items[0] if it] or []
if DEBUG: print "Cols",cols
# armar diccionario por cada linea
items = [dict([(col,item[i]) for i, col in enumerate(cols)])
for item in items[1:]]
self.cols = cols
self.items = items
except Exception,e:
self.error(u'Excepción',unicode(e))
##raise
def on_menu_archivo_guardar_click(self, event):
filename = entrada
wildcard = ["Archivos CSV (*.csv)|*.csv", "Archivos XML (*.xml)|*.xml",
"Archivos TXT (*.txt)|*.txt", "Archivos DBF (*.dbf)|*.dbf",
"Archivos JSON (*.json)|*.json",
"Planillas Excel (*.xlsx)|*.xlsx",
]
if entrada.endswith("xml"):
wildcard.sort(reverse=True)
if self.paths:
path = self.paths[0]
else:
path = salida
result = gui.save_file(title='Guardar', filename=path,
wildcard='|'.join(wildcard))
if not result:
return
fn = result[0]
self.grabar(fn)
def grabar(self, fn=None):
try:
if fn is None and salida:
if salida.startswith("-") and self.paths:
fn = os.path.splitext(self.paths[0])[0] + salida
else:
fn = salida
elif not fn:
raise RuntimeError("Debe indicar un nombre de archivo para grabar")
if fn.lower().endswith(".csv") or fn.lower().endswith(".xlsx"):
formato_csv.escribir([self.cols] + [[item[k] for k in self.cols] for item in self.items], fn)
else:
regs = formato_csv.desaplanar([self.cols] + [[item[k] for k in self.cols] for item in self.items])
if fn.endswith(".xml"):
formato_xml.escribir(regs, fn)
elif fn.endswith(".txt"):
formato_txt.escribir(regs, fn)
elif fn.endswith(".dbf"):
formato_dbf.escribir(regs, {}, carpeta=os.path.dirname(fn))
elif fn.endswith(".json"):
formato_json.escribir(regs, fn)
else:
self.error(u'Formato de archivo desconocido', unicode(fn))
gui.alert(u'Se guardó con éxito el archivo:\n%s' % (unicode(fn),), 'Guardar')
except Exception, e:
self.error(u'Excepción',unicode(e))
def on_btnAutorizar_click(self, event):
self.verifica_ws()
try:
ok = procesadas = rechazadas = 0
cols = self.cols
items = []
self.progreso(0)
selected = []
for i, item in self.get_selected_items():
kargs = item.copy()
selected.append(item)
kargs['cbt_desde'] = kargs['cbt_hasta'] = kargs ['cbt_numero']
for key in kargs:
if isinstance(kargs[key], basestring):
kargs[key] = kargs[key].replace(",",".")
if self.webservice == 'wsfe':
if 'id' not in kargs or kargs['id'] == "":
id = long(kargs['cbt_desde'])
id += (int(kargs['tipo_cbte'])*10**4 + int(kargs['punto_vta']))*10**8
kargs['id'] = id
if DEBUG:
self.log('\n'.join(["%s='%s'" % (k,v) for k,v in kargs.items()]))
if not cuit in kargs:
kargs['cuit'] = cuit
ret = wsfe.aut(self.client, self.token, self.sign, **kargs)
kargs.update(ret)
del kargs['cbt_desde']
del kargs['cbt_hasta']
elif self.webservice == 'wsfev1':
encabezado = {}
for k in ('concepto', 'tipo_doc', 'nro_doc', 'tipo_cbte', 'punto_vta',
'cbt_desde', 'cbt_hasta', 'imp_total', 'imp_tot_conc', 'imp_neto',
'imp_iva', 'imp_trib', 'imp_op_ex', 'fecha_cbte',
'moneda_id', 'moneda_ctz'):
encabezado[k] = kargs[k]
for k in ('fecha_venc_pago', 'fecha_serv_desde', 'fecha_serv_hasta'):
if k in kargs:
encabezado[k] = kargs.get(k)
self.ws.CrearFactura(**encabezado)
for l in range(1,1000):
k = 'tributo_%%s_%s' % l
if (k % 'id') in kargs:
id = kargs[k % 'id']
desc = kargs[k % 'desc']
base_imp = kargs[k % 'base_imp']
alic = kargs[k % 'alic']
importe = kargs[k % 'importe']
if id:
self.ws.AgregarTributo(id, desc, base_imp, alic, importe)
else:
break
for l in range(1,1000):
k = 'iva_%%s_%s' % l
if (k % 'id') in kargs:
id = kargs[k % 'id']
base_imp = kargs[k % 'base_imp']
importe = kargs[k % 'importe']
if id:
self.ws.AgregarIva(id, base_imp, importe)
else:
break
for l in range(1,1000):
k = 'cbte_asoc_%%s_%s' % l
if (k % 'tipo') in kargs:
tipo = kargs[k % 'tipo']
pto_vta = kargs[k % 'pto_vta']
nro = kargs[k % 'nro']
if id:
self.ws.AgregarCmpAsoc(tipo, pto_vta, nro)
else:
break
if DEBUG:
self.log('\n'.join(["%s='%s'" % (k,v) for k,v in self.ws.factura.items()]))
cae = self.ws.CAESolicitar()
kargs.update({
'cae': self.ws.CAE,
'fecha_vto': self.ws.Vencimiento,
'resultado': self.ws.Resultado,
'motivo': self.ws.Obs,
'reproceso': self.ws.Reproceso,
'err_code': self.ws.ErrCode.encode("latin1"),
'err_msg': self.ws.ErrMsg.encode("latin1"),
})
if self.ws.ErrMsg:
gui.alert(self.ws.ErrMsg, "Error AFIP")
if self.ws.Obs and self.ws.Obs!='00':
gui.alert(self.ws.Obs, u"Observación AFIP")
elif self.webservice == 'wsfexv1':
kargs['cbte_nro'] = kargs ['cbt_numero']
kargs['permiso_existente'] = kargs['permiso_existente'] or ""
encabezado = {}
for k in ('tipo_cbte', 'punto_vta', 'cbte_nro', 'fecha_cbte',
'imp_total', 'tipo_expo', 'permiso_existente', 'pais_dst_cmp',
'nombre_cliente', 'cuit_pais_cliente', 'domicilio_cliente',
'id_impositivo', 'moneda_id', 'moneda_ctz',
'obs_comerciales', 'obs_generales', 'forma_pago', 'incoterms',
'idioma_cbte', 'incoterms_ds'):
encabezado[k] = kargs.get(k)
self.ws.CrearFactura(**encabezado)
for l in range(1,1000):
k = 'codigo%s' % l
if k in kargs:
codigo = kargs['codigo%s' % l]
ds = kargs['descripcion%s' % l]
qty = kargs['cantidad%s' % l]
umed = kargs['umed%s' % l]
precio = kargs['precio%s' % l]
importe = kargs['importe%s' % l]
bonif = kargs.get('bonif%s' % l)
self.ws.AgregarItem(codigo, ds, qty, umed, precio, importe, bonif)
else:
break
for l in range(1,1000):
k = 'cbte_asoc_%%s_%s' % l
if (k % 'tipo') in kargs:
tipo = kargs[k % 'tipo']
pto_vta = kargs[k % 'pto_vta']
nro = kargs[k % 'nro']
if id:
self.ws.AgregarCmpAsoc(tipo, pto_vta, nro)
else:
break
if DEBUG:
self.log('\n'.join(["%s='%s'" % (k,v) for k,v in self.ws.factura.items()]))
cae = self.ws.Authorize(kargs['id'])
kargs.update({
'cae': self.ws.CAE,
'fecha_vto': self.ws.Vencimiento,
'resultado': self.ws.Resultado,
'motivo': self.ws.Obs,
'reproceso': self.ws.Reproceso,
'err_code': self.ws.ErrCode.encode("latin1"),
'err_msg': self.ws.ErrMsg.encode("latin1"),
})
if self.ws.ErrMsg:
gui.alert(self.ws.ErrMsg, "Error AFIP")
if self.ws.Obs and self.ws.Obs!='00':
gui.alert(self.ws.Obs, u"Observación AFIP")
# actuaizo la factura
for k in ('cae', 'fecha_vto', 'resultado', 'motivo', 'reproceso', 'err_code', 'err_msg'):
if kargs.get(k):
item[k] = kargs[k]
self.items[i] = item
self.log(u"ID: %s CAE: %s Motivo: %s Reproceso: %s" % (kargs['id'], kargs['cae'], kargs['motivo'],kargs['reproceso']))
procesadas += 1
if kargs['resultado'] == "R":
rechazadas += 1
elif kargs['resultado'] == "A":
ok += 1
self.progreso(i)
self.items = self.items
self.set_selected_items(selected)
self.progreso(len(self.items) - 1)
gui.alert(u'Proceso finalizado, procesadas %d\n\n'
'Aceptadas: %d\n'
'Rechazadas: %d' % (procesadas, ok, rechazadas),
u'Autorización')
self.grabar()
except SoapFault, e:
self.error(e.faultcode, e.faultstring.encode("ascii","ignore"))
except wsfe.WSFEError,e:
self.error(e.code, e.msg.encode("ascii","ignore"))
except KeyError, e:
self.error("Error",u'Campo obligatorio no encontrado: %s' % e)
except Exception, e:
self.error(u'Excepción',unicode(e))
finally:
if DEBUG:
if self.webservice == 'wsfev1' and DEBUG:
print self.ws.XmlRequest
print self.ws.XmlResponse
def on_btnAutorizarLote_click(self, event):
self.verifica_ws()
if not self.items: return
try:
#getcontext().prec = 2
ok = 0
rechazadas = 0
cols = self.cols
items = []
self.progreso(0)
cbt_desde = cbt_hasta = None
datos = {
'tipo_cbte': None,
'punto_vta': None,
'fecha_cbte': None,
'fecha_venc_pago': None,
'fecha_cbte': None,
'fecha_venc_pago': None,
'fecha_serv_desde': None,
'fecha_serv_hasta': None,
'moneda_id': None,
'moneda_ctz': None,
'id': None,
}
importes = {
'imp_total': Decimal(0),
'imp_tot_conc': Decimal(0),
'imp_neto': Decimal(0),
'imp_iva':Decimal(0),
'imp_op_ex': Decimal(0),
'imp_trib': Decimal(0),
}
for l in range(1,5):
k = 'iva_%%s_%s' % l
datos[k % 'id'] = None
importes[k % 'base_imp'] = Decimal(0)
importes[k % 'importe'] = Decimal(0)
for l in range(1,10):
k = 'tributo_%%s_%s' % l
datos[k % 'id'] = None
datos[k % 'desc'] = None
importes[k % 'base_imp'] = Decimal(0)
datos[k % 'alic'] = None
importes[k % 'importe'] = Decimal(0)
for i, item in self.get_selected_items():
if cbt_desde is None or int(item['cbt_numero']) < cbt_desde:
cbt_desde = int(item['cbt_numero'])
if cbt_hasta is None or int(item['cbt_numero']) > cbt_hasta:
cbt_hasta = int(item['cbt_numero'])
for key in item:
if key in datos:
if datos[key] is None:
datos[key] = item[key]
elif datos[key] != item[key]:
raise RuntimeError(u"%s tiene valores distintos en el lote!" % key)
if key in importes and item[key]:
importes[key] = importes[key] + Decimal("%.2f" % float(str(item[key].replace(",","."))))
kargs = {'cbt_desde': cbt_desde, 'cbt_hasta': cbt_hasta}
kargs.update({'tipo_doc': 99, 'nro_doc': '0'})
kargs.update(datos)
kargs.update(importes)
if kargs['fecha_serv_desde'] and kargs['fecha_serv_hasta']:
kargs['presta_serv'] = 1
kargs['concepto'] = 2
else:
kargs['presta_serv'] = 0
kargs['concepto'] = 1
del kargs['fecha_serv_desde']
del kargs['fecha_serv_hasta']
for key, val in importes.items():
importes[key] = val.quantize(Decimal('.01'), rounding=ROUND_DOWN)
if 'id' not in kargs or kargs['id'] == "":
id = long(kargs['cbt_desde'])
id += (int(kargs['tipo_cbte'])*10**4 + int(kargs['punto_vta']))*10**8
kargs['id'] = id
if DEBUG:
self.log('\n'.join(["%s='%s'" % (k,v) for k,v in kargs.items()]))
if '--test' in sys.argv:
kargs['cbt_desde'] = 777
kargs['fecha_cbte'] = '20110802'
kargs['fecha_venc_pago'] = '20110831'
if gui.confirm("Confirma Lote:\n"
"Tipo: %(tipo_cbte)s Desde: %(cbt_desde)s Hasta %(cbt_hasta)s\n"
"Neto: %(imp_neto)s IVA: %(imp_iva)s Trib.: %(imp_trib)s Total: %(imp_total)s"
% kargs, "Autorizar lote:"):
if self.webservice == 'wsfev1':
encabezado = {}
for k in ('concepto', 'tipo_doc', 'nro_doc', 'tipo_cbte', 'punto_vta',
'cbt_desde', 'cbt_hasta', 'imp_total', 'imp_tot_conc', 'imp_neto',
'imp_iva', 'imp_trib', 'imp_op_ex', 'fecha_cbte',
'moneda_id', 'moneda_ctz'):
encabezado[k] = kargs[k]
for k in ('fecha_venc_pago', 'fecha_serv_desde', 'fecha_serv_hasta'):
if k in kargs:
encabezado[k] = kargs.get(k)
self.ws.CrearFactura(**encabezado)
for l in range(1,1000):
k = 'iva_%%s_%s' % l
if (k % 'id') in kargs:
id = kargs[k % 'id']
base_imp = kargs[k % 'base_imp']
importe = kargs[k % 'importe']
if id:
self.ws.AgregarIva(id, base_imp, importe)
else:
break
for l in range(1,1000):
k = 'tributo_%%s_%s' % l
if (k % 'id') in kargs:
id = kargs[k % 'id']
desc = kargs[k % 'desc']
base_imp = kargs[k % 'base_imp']
alic = kargs[k % 'alic']
importe = kargs[k % 'importe']
if id:
self.ws.AgregarTributo(id, desc, base_imp, alic, importe)
else:
break
if DEBUG:
self.log('\n'.join(["%s='%s'" % (k,v) for k,v in self.ws.factura.items()]))
cae = self.ws.CAESolicitar()
kargs.update({
'cae': self.ws.CAE,
'fecha_vto': self.ws.Vencimiento,
'resultado': self.ws.Resultado,
'motivo': self.ws.Obs,
'reproceso': self.ws.Reproceso,
'err_code': self.ws.ErrCode.encode("latin1"),
'err_msg': self.ws.ErrMsg.encode("latin1"),
})
if self.ws.ErrMsg:
gui.alert(self.ws.ErrMsg, "Error AFIP")
if self.ws.Obs and self.ws.Obs!='00':
gui.alert(self.ws.Obs, u"Observación AFIP")
for i, item in self.get_selected_items():
for key in ('id', 'cae', 'fecha_vto', 'resultado', 'motivo', 'reproceso', 'err_code', 'err_msg'):
item[key] = kargs[key]
self.log("ID: %s CAE: %s Motivo: %s Reproceso: %s" % (kargs['id'], kargs['cae'], kargs['motivo'],kargs['reproceso']))
if kargs['resultado'] == "R":
rechazadas += 1
elif kargs['resultado'] == "A":
ok += 1
self.items = self.items # refrescar, ver de corregir
self.progreso(len(self.items))
gui.alert('Proceso finalizado OK!\n\nAceptadas: %d\nRechazadas: %d' % (ok, rechazadas), 'Autorización')
self.grabar()
except SoapFault,e:
self.log(self.client.xml_request)
self.log(self.client.xml_response)
self.error(e.faultcode, e.faultstring.encode("ascii","ignore"))
except wsfe.WSFEError,e:
self.error(e.code, e.msg.encode("ascii","ignore"))
except Exception, e:
self.error(u'Excepción',unicode(e))
def on_btnPrevisualizar_click(self, event):
try:
j = 0
for i, item in self.get_selected_items():
j += 1
archivo = self.generar_factura(item, mostrar=(j==1))
except Exception, e:
print e
self.error(u'Excepción', unicode(str(e), 'latin1', 'ignore'))
def on_btnEnviar_click(self, event):
try:
ok = no = 0
self.progreso(0)
for i, item in self.get_selected_items():
if not item['cae'] in ("", "NULL"):
archivo = self.generar_factura(item)
if item.get('email'):
self.enviar_mail(item,archivo)
ok += 1
else:
no += 1
self.log("No se envia factura %s por no tener EMAIL" % item['cbt_numero'])
else:
self.log("No se envia factura %s por no tener CAE" % item['cbt_numero'])
no += 1
self.progreso(i)
self.progreso(len(self.items))
gui.alert('Proceso finalizado OK!\n\nEnviados: %d\nNo enviados: %d' % (ok, no), 'Envio de Email')
except Exception, e:
self.error(u'Excepción',unicode(e))
def generar_factura(self, fila, mostrar=False):
fepdf = FEPDF()
fact = formato_csv.desaplanar([self.cols] + [[item[k] for k in self.cols] for item in [fila]])[0]
fact['cbte_nro'] = fact['cbt_numero']
fact['items'] = fact['detalles']
for d in fact['datos']:
fepdf.AgregarDato(d['campo'], d['valor'], d['pagina'])
# por compatiblidad, completo campos anteriores
if d['campo'] not in fact and d['valor']:
fact[d['campo']] = d['valor']
fepdf.factura = fact
# cargo el formato CSV por defecto (factura.csv)
fepdf.CargarFormato(conf_fact.get("formato", "factura.csv"))
# datos fijos:
fepdf.CUIT = cuit # CUIT del emisor para código de barras
for k, v in conf_pdf.items():
fepdf.AgregarDato(k, v)
fepdf.CrearPlantilla(papel=conf_fact.get("papel", "legal"),
orientacion=conf_fact.get("orientacion", "portrait"))
fepdf.ProcesarPlantilla(num_copias=int(conf_fact.get("copias", 1)),
lineas_max=int(conf_fact.get("lineas_max", 24)),
qty_pos=conf_fact.get("cant_pos") or 'izq')
salida = conf_fact.get("salida", "")
fact = fepdf.factura
if salida:
pass
elif 'pdf' in fact and fact['pdf']:
salida = fact['pdf']
else:
# genero el nombre de archivo según datos de factura
d = conf_fact.get('directorio', ".")
clave_subdir = conf_fact.get('subdirectorio','fecha_cbte')
if clave_subdir:
d = os.path.join(d, item[clave_subdir])
if not os.path.isdir(d):
os.mkdir(d)
fs = conf_fact.get('archivo','numero').split(",")
it = item.copy()
tipo_fact, letra_fact, numero_fact = fact['_fmt_fact']
it['tipo'] = tipo_fact.replace(" ", "_")
it['letra'] = letra_fact
it['numero'] = numero_fact
it['mes'] = item['fecha_cbte'][4:6]
it['año'] = item['fecha_cbte'][0:4]
fn = ''.join([str(it.get(ff,ff)) for ff in fs])
fn = fn.decode('latin1').encode('ascii', 'replace').replace('?','_')
salida = os.path.join(d, "%s.pdf" % fn)
fepdf.GenerarPDF(archivo=salida)
if mostrar:
fepdf.MostrarPDF(archivo=salida,imprimir='--imprimir' in sys.argv)
return salida
def enviar_mail(self, item, archivo):
archivo = self.generar_factura(item)
if item['email']:
msg = MIMEMultipart()
msg['Subject'] = conf_mail['motivo'].replace("NUMERO",str(item['cbt_numero']))
msg['From'] = conf_mail['remitente']
msg['Reply-to'] = msg['From']
msg['To'] = item['email']
msg.preamble = 'Mensaje de multiples partes.\n'
if not 'html' in conf_mail:
part = MIMEText(conf_mail['cuerpo'])
msg.attach(part)
else:
alt = MIMEMultipart('alternative')
msg.attach(alt)
text = MIMEText(conf_mail['cuerpo'])
alt.attach(text)
# We reference the image in the IMG SRC attribute by the ID we give it below
html = MIMEText(conf_mail['html'], 'html')
alt.attach(html)
part = MIMEApplication(open(archivo,"rb").read())
part.add_header('Content-Disposition', 'attachment', filename=os.path.basename(archivo))
msg.attach(part)
try:
self.log("Enviando email: %s a %s" % (msg['Subject'], msg['To']))
if not self.smtp:
self.smtp = SMTP(conf_mail['servidor'], conf_mail.get('puerto', 25))
if conf_mail['usuario'] and conf_mail['clave']:
self.smtp.ehlo()
self.smtp.login(conf_mail['usuario'], conf_mail['clave'])
self.smtp.sendmail(msg['From'], msg['To'], msg.as_string())
except Exception,e:
self.error(u'Excepción',unicode(e))
if __name__ == '__main__':
if len(sys.argv)>1 and not sys.argv[1].startswith("-"):
CONFIG_FILE = sys.argv[1]
config = SafeConfigParser()
config.read(CONFIG_FILE)
if not len(config.sections()):
if os.path.exists(CONFIG_FILE):
gui.alert(u"Error al cargar archivo de configuración: %s" %
CONFIG_FILE, "PyRece: Imposible Continuar")
else:
gui.alert(u"No se encuentra archivo de configuración: %s" %
CONFIG_FILE, "PyRece: Imposible Continuar")
sys.exit(1)
cert = config.get('WSAA','CERT')
privatekey = config.get('WSAA','PRIVATEKEY')
cuit = config.get('WSFEv1','CUIT')
if config.has_option('WSFEv1','ENTRADA'):
entrada = config.get('WSFEv1','ENTRADA')
else:
entrada = ""
if not os.path.exists(entrada):
entrada = "facturas.csv"
if config.has_option('WSFEv1','SALIDA'):
salida = config.get('WSFEv1','SALIDA')
else:
salida = "resultado.csv"
if config.has_section('FACTURA'):
conf_fact = dict(config.items('FACTURA'))
else:
conf_fact = {}
conf_pdf = dict(config.items('PDF'))
conf_mail = dict(config.items('MAIL'))
if config.has_option('WSAA','URL') and not HOMO:
wsaa_url = config.get('WSAA','URL')
else:
wsaa_url = wsaa.WSAAURL
if config.has_option('WSFE','URL') and not HOMO:
wsfe_url = config.get('WSFE','URL')
else:
wsfe_url = wsfe.WSFEURL
if config.has_option('WSFEv1','URL') and not HOMO:
wsfev1_url = config.get('WSFEv1','URL')
else:
wsfev1_url = wsfev1.WSDL
if config.has_option('WSFEXv1','URL') and not HOMO:
wsfexv1_url = config.get('WSFEXv1','URL')
else:
wsfexv1_url = wsfexv1.WSDL
DEFAULT_WEBSERVICE = "wsfev1"
if config.has_section('PYRECE'):
DEFAULT_WEBSERVICE = config.get('PYRECE','WEBSERVICE')
if config.has_section('PROXY'):
proxy_dict = dict(("proxy_%s" % k,v) for k,v in config.items('PROXY'))
proxy_dict['proxy_port'] = int(proxy_dict['proxy_port'])
else:
proxy_dict = {}
c = PyRece()
gui.main_loop()
| gpl-3.0 | -4,385,594,502,368,245,000 | 41.411765 | 134 | 0.486195 | false |
SripriyaSeetharam/tacker | tacker/common/rpc_compat.py | 1 | 5323 | # Copyright (c) 2014 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo import messaging
from oslo_config import cfg
from tacker.common import rpc as n_rpc
from tacker.openstack.common import log as logging
from tacker.openstack.common import service
LOG = logging.getLogger(__name__)
class RpcProxy(object):
'''
This class is created to facilitate migration from oslo-incubator
RPC layer implementation to oslo.messaging and is intended to
emulate RpcProxy class behaviour using oslo.messaging API once the
migration is applied.
'''
RPC_API_NAMESPACE = None
def __init__(self, topic, default_version, version_cap=None):
self.topic = topic
target = messaging.Target(topic=topic, version=default_version)
self._client = n_rpc.get_client(target, version_cap=version_cap)
def make_msg(self, method, **kwargs):
return {'method': method,
'namespace': self.RPC_API_NAMESPACE,
'args': kwargs}
def call(self, context, msg, **kwargs):
return self.__call_rpc_method(
context, msg, rpc_method='call', **kwargs)
def cast(self, context, msg, **kwargs):
self.__call_rpc_method(context, msg, rpc_method='cast', **kwargs)
def fanout_cast(self, context, msg, **kwargs):
kwargs['fanout'] = True
self.__call_rpc_method(context, msg, rpc_method='cast', **kwargs)
def __call_rpc_method(self, context, msg, **kwargs):
options = dict(
((opt, kwargs[opt])
for opt in ('fanout', 'timeout', 'topic', 'version')
if kwargs.get(opt))
)
if msg['namespace']:
options['namespace'] = msg['namespace']
if options:
callee = self._client.prepare(**options)
else:
callee = self._client
func = getattr(callee, kwargs['rpc_method'])
return func(context, msg['method'], **msg['args'])
class RpcCallback(object):
'''
This class is created to facilitate migration from oslo-incubator
RPC layer implementation to oslo.messaging and is intended to set
callback version using oslo.messaging API once the migration is
applied.
'''
RPC_API_VERSION = '1.0'
def __init__(self):
super(RpcCallback, self).__init__()
self.target = messaging.Target(version=self.RPC_API_VERSION)
class Service(service.Service):
"""Service object for binaries running on hosts.
A service enables rpc by listening to queues based on topic and host.
"""
def __init__(self, host, topic, manager=None, serializer=None):
super(Service, self).__init__()
self.host = host
self.topic = topic
self.serializer = serializer
if manager is None:
self.manager = self
else:
self.manager = manager
def start(self):
super(Service, self).start()
self.conn = create_connection(new=True)
LOG.debug("Creating Consumer connection for Service %s" %
self.topic)
endpoints = [self.manager]
# Share this same connection for these Consumers
self.conn.create_consumer(self.topic, endpoints, fanout=False)
node_topic = '%s.%s' % (self.topic, self.host)
self.conn.create_consumer(node_topic, endpoints, fanout=False)
self.conn.create_consumer(self.topic, endpoints, fanout=True)
# Hook to allow the manager to do other initializations after
# the rpc connection is created.
if callable(getattr(self.manager, 'initialize_service_hook', None)):
self.manager.initialize_service_hook(self)
# Consume from all consumers in threads
self.conn.consume_in_threads()
def stop(self):
# Try to shut the connection down, but if we get any sort of
# errors, go ahead and ignore them.. as we're shutting down anyway
try:
self.conn.close()
except Exception:
pass
super(Service, self).stop()
class Connection(object):
def __init__(self):
super(Connection, self).__init__()
self.servers = []
def create_consumer(self, topic, endpoints, fanout=False):
target = messaging.Target(
topic=topic, server=cfg.CONF.host, fanout=fanout)
server = n_rpc.get_server(target, endpoints)
self.servers.append(server)
def consume_in_threads(self):
for server in self.servers:
server.start()
return self.servers
# functions
def create_connection(new=True):
return Connection()
# exceptions
RPCException = messaging.MessagingException
RemoteError = messaging.RemoteError
MessagingTimeout = messaging.MessagingTimeout
| apache-2.0 | -968,317,959,237,220,100 | 31.260606 | 78 | 0.640804 | false |
abramhindle/UnnaturalCodeFork | python/testdata/launchpad/lib/lp/services/job/tests/test_runner.py | 1 | 23096 | # Copyright 2009 Canonical Ltd. This software is licensed under the
# GNU Affero General Public License version 3 (see the file LICENSE).
"""Tests for job-running facilities."""
import logging
import re
import sys
from textwrap import dedent
from time import sleep
from lazr.jobrunner.jobrunner import (
LeaseHeld,
SuspendJobException,
)
from testtools.matchers import MatchesRegex
from testtools.testcase import ExpectedException
import transaction
from zope.interface import implements
from lp.services.config import config
from lp.services.features.testing import FeatureFixture
from lp.services.job.interfaces.job import (
IRunnableJob,
JobStatus,
)
from lp.services.job.model.job import Job
from lp.services.job.runner import (
BaseRunnableJob,
celery_enabled,
JobRunner,
TwistedJobRunner,
)
from lp.services.log.logger import BufferLogger
from lp.services.webapp import errorlog
from lp.testing import (
TestCaseWithFactory,
ZopeTestInSubProcess,
)
from lp.testing.fakemethod import FakeMethod
from lp.testing.layers import LaunchpadZopelessLayer
from lp.testing.mail_helpers import pop_notifications
class NullJob(BaseRunnableJob):
"""A job that does nothing but append a string to a list."""
implements(IRunnableJob)
JOB_COMPLETIONS = []
def __init__(self, completion_message, oops_recipients=None,
error_recipients=None):
self.message = completion_message
self.job = Job()
self.oops_recipients = oops_recipients
if self.oops_recipients is None:
self.oops_recipients = []
self.error_recipients = error_recipients
if self.error_recipients is None:
self.error_recipients = []
def run(self):
NullJob.JOB_COMPLETIONS.append(self.message)
def getOopsRecipients(self):
return self.oops_recipients
def getOopsVars(self):
return [('foo', 'bar')]
def getErrorRecipients(self):
return self.error_recipients
def getOperationDescription(self):
return 'appending a string to a list'
class RaisingJobException(Exception):
"""Raised by the RaisingJob when run."""
class RaisingJob(NullJob):
"""A job that raises when it runs."""
def run(self):
raise RaisingJobException(self.message)
class RaisingJobUserError(NullJob):
"""A job that raises a user error when it runs."""
user_error_types = (RaisingJobException, )
def run(self):
raise RaisingJobException(self.message)
class RaisingJobRaisingNotifyOops(NullJob):
"""A job that raises when it runs, and when calling notifyOops."""
def run(self):
raise RaisingJobException(self.message)
def notifyOops(self, oops):
raise RaisingJobException('oops notifying oops')
class RaisingJobRaisingNotifyUserError(NullJob):
"""A job that raises when it runs, and when notifying user errors."""
user_error_types = (RaisingJobException, )
def run(self):
raise RaisingJobException(self.message)
def notifyUserError(self, error):
raise RaisingJobException('oops notifying users')
class RetryError(Exception):
pass
class RaisingRetryJob(NullJob):
retry_error_types = (RetryError,)
max_retries = 1
def run(self):
raise RetryError()
class TestJobRunner(TestCaseWithFactory):
"""Ensure JobRunner behaves as expected."""
layer = LaunchpadZopelessLayer
def makeTwoJobs(self):
"""Test fixture. Create two jobs."""
return NullJob("job 1"), NullJob("job 2")
def test_runJob(self):
"""Ensure status is set to completed when a job runs to completion."""
job_1, job_2 = self.makeTwoJobs()
runner = JobRunner(job_1)
runner.runJob(job_1, None)
self.assertEqual(JobStatus.COMPLETED, job_1.job.status)
self.assertEqual([job_1], runner.completed_jobs)
def test_runAll(self):
"""Ensure runAll works in the normal case."""
job_1, job_2 = self.makeTwoJobs()
runner = JobRunner([job_1, job_2])
runner.runAll()
self.assertEqual(JobStatus.COMPLETED, job_1.job.status)
self.assertEqual(JobStatus.COMPLETED, job_2.job.status)
msg1 = NullJob.JOB_COMPLETIONS.pop()
msg2 = NullJob.JOB_COMPLETIONS.pop()
self.assertEqual(msg1, "job 2")
self.assertEqual(msg2, "job 1")
self.assertEqual([job_1, job_2], runner.completed_jobs)
def test_runAll_skips_lease_failures(self):
"""Ensure runAll skips jobs whose leases can't be acquired."""
job_1, job_2 = self.makeTwoJobs()
job_2.job.acquireLease()
runner = JobRunner([job_1, job_2])
runner.runAll()
self.assertEqual(JobStatus.COMPLETED, job_1.job.status)
self.assertEqual(JobStatus.WAITING, job_2.job.status)
self.assertEqual([job_1], runner.completed_jobs)
self.assertEqual([job_2], runner.incomplete_jobs)
self.assertEqual([], self.oopses)
def test_runAll_reports_oopses(self):
"""When an error is encountered, report an oops and continue."""
job_1, job_2 = self.makeTwoJobs()
def raiseError():
# Ensure that jobs which call transaction.abort work, too.
transaction.abort()
raise Exception('Fake exception. Foobar, I say!')
job_1.run = raiseError
runner = JobRunner([job_1, job_2])
runner.runAll()
self.assertEqual([], pop_notifications())
self.assertEqual([job_2], runner.completed_jobs)
self.assertEqual([job_1], runner.incomplete_jobs)
self.assertEqual(JobStatus.FAILED, job_1.job.status)
self.assertEqual(JobStatus.COMPLETED, job_2.job.status)
oops = self.oopses[-1]
self.assertIn('Fake exception. Foobar, I say!', oops['tb_text'])
self.assertEqual(["{'foo': 'bar'}"], oops['req_vars'].values())
def test_oops_messages_used_when_handling(self):
"""Oops messages should appear even when exceptions are handled."""
job_1, job_2 = self.makeTwoJobs()
def handleError():
reporter = errorlog.globalErrorUtility
try:
raise ValueError('Fake exception. Foobar, I say!')
except ValueError:
reporter.raising(sys.exc_info())
job_1.run = handleError
runner = JobRunner([job_1, job_2])
runner.runAll()
oops = self.oopses[-1]
self.assertEqual(["{'foo': 'bar'}"], oops['req_vars'].values())
def test_runAll_aborts_transaction_on_error(self):
"""runAll should abort the transaction on oops."""
class DBAlterJob(NullJob):
def __init__(self):
super(DBAlterJob, self).__init__('')
def run(self):
self.job.log = 'hello'
raise ValueError
job = DBAlterJob()
runner = JobRunner([job])
runner.runAll()
# If the transaction was committed, job.log == 'hello'. If it was
# aborted, it is None.
self.assertIs(None, job.job.log)
def test_runAll_mails_oopses(self):
"""Email interested parties about OOPses."""
job_1, job_2 = self.makeTwoJobs()
def raiseError():
# Ensure that jobs which call transaction.abort work, too.
transaction.abort()
raise Exception('Fake exception. Foobar, I say!')
job_1.run = raiseError
job_1.oops_recipients = ['jrandom@example.org']
runner = JobRunner([job_1, job_2])
runner.runAll()
(notification,) = pop_notifications()
oops = self.oopses[-1]
self.assertIn(
'Launchpad encountered an internal error during the following'
' operation: appending a string to a list. It was logged with id'
' %s. Sorry for the inconvenience.' % oops['id'],
notification.get_payload(decode=True))
self.assertNotIn('Fake exception. Foobar, I say!',
notification.get_payload(decode=True))
self.assertEqual('Launchpad internal error', notification['subject'])
def test_runAll_mails_user_errors(self):
"""User errors should be mailed out without oopsing.
User errors are identified by the RunnableJob.user_error_types
attribute. They do not cause an oops to be recorded, and their
error messages are mailed to interested parties verbatim.
"""
job_1, job_2 = self.makeTwoJobs()
class ExampleError(Exception):
pass
def raiseError():
raise ExampleError('Fake exception. Foobar, I say!')
job_1.run = raiseError
job_1.user_error_types = (ExampleError,)
job_1.error_recipients = ['jrandom@example.org']
runner = JobRunner([job_1, job_2])
runner.runAll()
self.assertEqual([], self.oopses)
notifications = pop_notifications()
self.assertEqual(1, len(notifications))
body = notifications[0].get_payload(decode=True)
self.assertEqual(
'Launchpad encountered an error during the following operation:'
' appending a string to a list. Fake exception. Foobar, I say!',
body)
self.assertEqual(
'Launchpad error while appending a string to a list',
notifications[0]['subject'])
def test_runAll_requires_IRunnable(self):
"""Supplied classes must implement IRunnableJob.
If they don't, we get a TypeError. If they do, then we get an
AttributeError, because we don't actually implement the interface.
"""
runner = JobRunner([object()])
self.assertRaises(TypeError, runner.runAll)
class Runnable:
implements(IRunnableJob)
runner = JobRunner([Runnable()])
self.assertRaises(AttributeError, runner.runAll)
def test_runJob_records_failure(self):
"""When a job fails, the failure needs to be recorded."""
job = RaisingJob('boom')
runner = JobRunner([job])
self.assertRaises(RaisingJobException, runner.runJob, job, None)
# Abort the transaction to confirm that the update of the job status
# has been committed.
transaction.abort()
self.assertEqual(JobStatus.FAILED, job.job.status)
def test_runJobHandleErrors_oops_generated(self):
"""The handle errors method records an oops for raised errors."""
job = RaisingJob('boom')
runner = JobRunner([job])
runner.runJobHandleError(job)
self.assertEqual(1, len(self.oopses))
def test_runJobHandleErrors_user_error_no_oops(self):
"""If the job raises a user error, there is no oops."""
job = RaisingJobUserError('boom')
runner = JobRunner([job])
runner.runJobHandleError(job)
self.assertEqual(0, len(self.oopses))
def test_runJob_raising_retry_error(self):
"""If a job raises a retry_error, it should be re-queued."""
job = RaisingRetryJob('completion')
runner = JobRunner([job])
with self.expectedLog('Scheduling retry due to RetryError'):
runner.runJob(job, None)
self.assertEqual(JobStatus.WAITING, job.status)
self.assertNotIn(job, runner.completed_jobs)
self.assertIn(job, runner.incomplete_jobs)
def test_runJob_exceeding_max_retries(self):
"""If a job exceeds maximum retries, it should raise normally."""
job = RaisingRetryJob('completion')
JobRunner([job]).runJob(job, None)
self.assertEqual(JobStatus.WAITING, job.status)
runner = JobRunner([job])
with ExpectedException(RetryError, ''):
runner.runJob(job, None)
self.assertEqual(JobStatus.FAILED, job.status)
self.assertNotIn(job, runner.completed_jobs)
self.assertIn(job, runner.incomplete_jobs)
def test_runJobHandleErrors_oops_generated_notify_fails(self):
"""A second oops is logged if the notification of the oops fails."""
job = RaisingJobRaisingNotifyOops('boom')
runner = JobRunner([job])
runner.runJobHandleError(job)
self.assertEqual(2, len(self.oopses))
def test_runJobHandleErrors_oops_generated_user_notify_fails(self):
"""A second oops is logged if the notification of the oops fails.
In this test case the error is a user expected error, so the
notifyUserError is called, and in this case the notify raises too.
"""
job = RaisingJobRaisingNotifyUserError('boom')
runner = JobRunner([job])
runner.runJobHandleError(job)
self.assertEqual(1, len(self.oopses))
def test_runJob_with_SuspendJobException(self):
# A job that raises SuspendJobError should end up suspended.
job = NullJob('suspended')
job.run = FakeMethod(failure=SuspendJobException())
runner = JobRunner([job])
runner.runJob(job, None)
self.assertEqual(JobStatus.SUSPENDED, job.status)
self.assertNotIn(job, runner.completed_jobs)
self.assertIn(job, runner.incomplete_jobs)
def test_taskId(self):
# BaseRunnableJob.taskId() creates a task ID that consists
# of the Job's class name, the job ID and a UUID.
job = NullJob(completion_message="doesn't matter")
task_id = job.taskId()
uuid_expr = (
'[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}')
mo = re.search('^NullJob_%s_%s$' % (job.job_id, uuid_expr), task_id)
self.assertIsNot(None, mo)
class StaticJobSource(BaseRunnableJob):
@classmethod
def iterReady(cls):
if not cls.done:
for index, args in enumerate(cls.jobs):
yield cls.get(index)
cls.done = True
@classmethod
def get(cls, index):
args = cls.jobs[index]
return cls(index, *args)
class StuckJob(StaticJobSource):
"""Simulation of a job that stalls."""
implements(IRunnableJob)
done = False
# A list of jobs to run: id, lease_length, delay.
#
# For the first job, have a very long lease, so that it
# doesn't expire and so we soak up the ZCML loading time. For the
# second job, have a short lease so we hit the timeout.
jobs = [
(10000, 0),
(5, 30),
]
def __init__(self, id, lease_length, delay):
self.id = id
self.lease_length = lease_length
self.delay = delay
self.job = Job()
def __repr__(self):
return '<%s(%r, lease_length=%s, delay=%s)>' % (
self.__class__.__name__, self.id, self.lease_length, self.delay)
def acquireLease(self):
return self.job.acquireLease(self.lease_length)
def run(self):
sleep(self.delay)
class ShorterStuckJob(StuckJob):
"""Simulation of a job that stalls."""
jobs = [
(10000, 0),
(0.05, 30),
]
class InitialFailureJob(StaticJobSource):
implements(IRunnableJob)
jobs = [(True,), (False,)]
has_failed = False
done = False
def __init__(self, id, fail):
self.id = id
self.job = Job()
self.fail = fail
def run(self):
if self.fail:
InitialFailureJob.has_failed = True
raise ValueError('I failed.')
else:
if InitialFailureJob.has_failed:
raise ValueError('Previous failure.')
class ProcessSharingJob(StaticJobSource):
implements(IRunnableJob)
jobs = [(True,), (False,)]
initial_job_was_here = False
done = False
def __init__(self, id, first):
self.id = id
self.job = Job()
self.first = first
def run(self):
if self.first:
ProcessSharingJob.initial_job_was_here = True
else:
if not ProcessSharingJob.initial_job_was_here:
raise ValueError('Different process.')
class MemoryHogJob(StaticJobSource):
implements(IRunnableJob)
jobs = [()]
done = False
memory_limit = 0
def __init__(self, id):
self.job = Job()
self.id = id
def run(self):
self.x = '*' * (10 ** 6)
class NoJobs(StaticJobSource):
done = False
jobs = []
class LeaseHeldJob(StaticJobSource):
implements(IRunnableJob)
jobs = [()]
done = False
def __init__(self, id):
self.job = Job()
self.id = id
def acquireLease(self):
raise LeaseHeld()
class TestTwistedJobRunner(ZopeTestInSubProcess, TestCaseWithFactory):
# Needs AMQP
layer = LaunchpadZopelessLayer
def setUp(self):
super(TestTwistedJobRunner, self).setUp()
# The test relies on _pythonpath being importable. Thus we need to add
# a directory that contains _pythonpath to the sys.path. We can rely
# on the root directory of the checkout containing _pythonpath.
if config.root not in sys.path:
sys.path.append(config.root)
self.addCleanup(sys.path.remove, config.root)
def test_timeout_long(self):
"""When a job exceeds its lease, an exception is raised.
Unfortunately, timeouts include the time it takes for the zope
machinery to start up, so we run a job that will not time out first,
followed by a job that is sure to time out.
"""
logger = BufferLogger()
logger.setLevel(logging.INFO)
# StuckJob is actually a source of two jobs. The first is fast, the
# second slow.
runner = TwistedJobRunner.runFromSource(
StuckJob, 'branchscanner', logger)
self.assertEqual(
(1, 1), (len(runner.completed_jobs), len(runner.incomplete_jobs)))
self.oops_capture.sync()
oops = self.oopses[0]
expected_exception = ('TimeoutError', 'Job ran too long.')
self.assertEqual(expected_exception, (oops['type'], oops['value']))
self.assertThat(logger.getLogBuffer(), MatchesRegex(
dedent("""\
INFO Running through Twisted.
INFO Running <StuckJob.*?> \(ID .*?\).
INFO Running <StuckJob.*?> \(ID .*?\).
INFO Job resulted in OOPS: .*
""")))
# XXX: BradCrittenden 2012-05-09 bug=994777: Disabled as a spurious
# failure. In isolation this test fails 5% of the time.
def disabled_test_timeout_short(self):
"""When a job exceeds its lease, an exception is raised.
Unfortunately, timeouts include the time it takes for the zope
machinery to start up, so we run a job that will not time out first,
followed by a job that is sure to time out.
"""
logger = BufferLogger()
logger.setLevel(logging.INFO)
# StuckJob is actually a source of two jobs. The first is fast, the
# second slow.
runner = TwistedJobRunner.runFromSource(
ShorterStuckJob, 'branchscanner', logger)
self.oops_capture.sync()
oops = self.oopses[0]
self.assertEqual(
(1, 1), (len(runner.completed_jobs), len(runner.incomplete_jobs)))
self.assertThat(
logger.getLogBuffer(), MatchesRegex(
dedent("""\
INFO Running through Twisted.
INFO Running <ShorterStuckJob.*?> \(ID .*?\).
INFO Running <ShorterStuckJob.*?> \(ID .*?\).
INFO Job resulted in OOPS: %s
""") % oops['id']))
self.assertEqual(('TimeoutError', 'Job ran too long.'),
(oops['type'], oops['value']))
def test_previous_failure_gives_new_process(self):
"""Failed jobs cause their worker to be terminated.
When a job fails, it's not clear whether its process can be safely
reused for a new job, so we kill the worker.
"""
logger = BufferLogger()
runner = TwistedJobRunner.runFromSource(
InitialFailureJob, 'branchscanner', logger)
self.assertEqual(
(1, 1), (len(runner.completed_jobs), len(runner.incomplete_jobs)))
def test_successful_jobs_share_process(self):
"""Successful jobs allow process reuse.
When a job succeeds, we assume that its process can be safely reused
for a new job, so we reuse the worker.
"""
logger = BufferLogger()
runner = TwistedJobRunner.runFromSource(
ProcessSharingJob, 'branchscanner', logger)
self.assertEqual(
(2, 0), (len(runner.completed_jobs), len(runner.incomplete_jobs)))
def disable_test_memory_hog_job(self):
"""A job with a memory limit will trigger MemoryError on excess."""
# XXX: frankban 2012-03-29 bug=963455: This test fails intermittently,
# especially in parallel tests.
logger = BufferLogger()
logger.setLevel(logging.INFO)
runner = TwistedJobRunner.runFromSource(
MemoryHogJob, 'branchscanner', logger)
self.assertEqual(
(0, 1), (len(runner.completed_jobs), len(runner.incomplete_jobs)))
self.assertIn('Job resulted in OOPS', logger.getLogBuffer())
self.oops_capture.sync()
self.assertEqual('MemoryError', self.oopses[0]['type'])
def test_no_jobs(self):
logger = BufferLogger()
logger.setLevel(logging.INFO)
runner = TwistedJobRunner.runFromSource(
NoJobs, 'branchscanner', logger)
self.assertEqual(
(0, 0), (len(runner.completed_jobs), len(runner.incomplete_jobs)))
def test_lease_held_handled(self):
"""Jobs that raise LeaseHeld are handled correctly."""
logger = BufferLogger()
logger.setLevel(logging.DEBUG)
runner = TwistedJobRunner.runFromSource(
LeaseHeldJob, 'branchscanner', logger)
self.assertIn('Could not acquire lease', logger.getLogBuffer())
self.assertEqual(
(0, 1), (len(runner.completed_jobs), len(runner.incomplete_jobs)))
class TestCeleryEnabled(TestCaseWithFactory):
layer = LaunchpadZopelessLayer
def test_no_flag(self):
"""With no flag set, result is False."""
self.assertFalse(celery_enabled('foo'))
def test_matching_flag(self):
"""A matching flag returns True."""
self.useFixture(FeatureFixture(
{'jobs.celery.enabled_classes': 'foo bar'}))
self.assertTrue(celery_enabled('foo'))
self.assertTrue(celery_enabled('bar'))
def test_non_matching_flag(self):
"""A non-matching flag returns false."""
self.useFixture(FeatureFixture(
{'jobs.celery.enabled_classes': 'foo bar'}))
self.assertFalse(celery_enabled('baz'))
self.assertTrue(celery_enabled('bar'))
def test_substring(self):
"""A substring of an enabled class does not match."""
self.useFixture(FeatureFixture(
{'jobs.celery.enabled_classes': 'foobar'}))
self.assertFalse(celery_enabled('bar'))
| agpl-3.0 | -4,130,563,661,033,733,600 | 32.667638 | 78 | 0.624567 | false |
Strubbl/pynder | socket_client.py | 1 | 1745 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This file is part of Pynder.
Pynder is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Pynder is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Pynder. If not, see <http://www.gnu.org/licenses/>.
"""
import socket
import sys
import config
import version
__author__ = "Strubbl"
__version__ = version.version
__credits__ = ["Strubbl"]
if __name__ == "__main__":
HOST, PORT = "localhost", config.socket_port
data = ""
j = 0
for i in sys.argv:
if j > 0:
data += i + " "
j += 1
if data == "":
data = "no data given on " + socket.gethostname()
print data
s = None
for res in socket.getaddrinfo(HOST, PORT, socket.AF_UNSPEC, socket.SOCK_STREAM):
af, socktype, proto, canonname, sa = res
try:
s = socket.socket(af, socktype, proto)
except socket.error, msg:
s = None
continue
try:
s.connect(sa)
except socket.error, msg:
s.close()
s = None
continue
break
if s is None:
print 'could not open socket'
sys.exit(1)
s.send(data)
data = s.recv(1024)
s.close()
print 'Received', repr(data)
| gpl-3.0 | 7,890,824,665,545,262,000 | 25.439394 | 84 | 0.596562 | false |
mbauskar/erpnext | erpnext/patches/v8_7/make_subscription_from_recurring_data.py | 1 | 1634 | # Copyright (c) 2017, Frappe and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import today
def execute():
frappe.reload_doc('subscription', 'doctype', 'subscription')
frappe.reload_doc('selling', 'doctype', 'sales_order')
frappe.reload_doc('buying', 'doctype', 'purchase_order')
frappe.reload_doc('accounts', 'doctype', 'sales_invoice')
frappe.reload_doc('accounts', 'doctype', 'purchase_invoice')
for doctype in ['Sales Order', 'Sales Invoice',
'Purchase Invoice', 'Purchase Invoice']:
for data in get_data(doctype):
make_subscription(doctype, data)
def get_data(doctype):
return frappe.db.sql(""" select name, from_date, end_date, recurring_type,recurring_id
next_date, notify_by_email, notification_email_address, recurring_print_format,
repeat_on_day_of_month, submit_on_creation
from `tab{0}` where is_recurring = 1 and next_date >= %s
""".format(doctype), today(), as_dict=1)
def make_subscription(doctype, data):
doc = frappe.get_doc({
'doctype': 'Subscription',
'reference_doctype': doctype,
'reference_document': data.name,
'start_date': data.from_date,
'end_date': data.end_date,
'frequency': data.recurring_type,
'repeat_on_day': data.repeat_on_day_of_month,
'notify_by_email': data.notify_by_email,
'recipients': data.notification_email_address,
'next_schedule_date': data.next_date,
'submit_on_creation': data.submit_on_creation
}).insert(ignore_permissions=True)
doc.submit()
if not doc.subscription:
frappe.db.set_value(doctype, data.name, "subscription", doc.name) | gpl-3.0 | -903,746,289,309,461,900 | 35.333333 | 87 | 0.72093 | false |
Russell-Jones/django-wiki | wiki/migrations/0001_initial.py | 1 | 18236 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
try:
from django.contrib.auth import get_user_model
except ImportError: # django < 1.5
from django.contrib.auth.models import User
else:
User = get_user_model()
user_orm_label = '%s.%s' % (User._meta.app_label, User._meta.object_name)
user_model_label = '%s.%s' % (User._meta.app_label, User._meta.module_name)
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Article'
db.create_table('wiki_article', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('current_revision', self.gf('django.db.models.fields.related.OneToOneField')(blank=True, related_name='current_set', unique=True, null=True, to=orm['wiki.ArticleRevision'])),
('created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('modified', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
('owner', self.gf('django.db.models.fields.related.ForeignKey')(to=orm[user_orm_label], null=True, blank=True)),
('group', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.Group'], null=True, blank=True)),
('group_read', self.gf('django.db.models.fields.BooleanField')(default=True)),
('group_write', self.gf('django.db.models.fields.BooleanField')(default=True)),
('other_read', self.gf('django.db.models.fields.BooleanField')(default=True)),
('other_write', self.gf('django.db.models.fields.BooleanField')(default=True)),
))
db.send_create_signal('wiki', ['Article'])
# Adding model 'ArticleForObject'
db.create_table('wiki_articleforobject', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('article', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['wiki.Article'])),
('content_type', self.gf('django.db.models.fields.related.ForeignKey')(related_name='content_type_set_for_articleforobject', to=orm['contenttypes.ContentType'])),
('object_id', self.gf('django.db.models.fields.PositiveIntegerField')()),
('is_mptt', self.gf('django.db.models.fields.BooleanField')(default=False)),
))
db.send_create_signal('wiki', ['ArticleForObject'])
# Adding unique constraint on 'ArticleForObject', fields ['content_type', 'object_id']
db.create_unique('wiki_articleforobject', ['content_type_id', 'object_id'])
# Adding model 'ArticleRevision'
db.create_table('wiki_articlerevision', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('revision_number', self.gf('django.db.models.fields.IntegerField')()),
('user_message', self.gf('django.db.models.fields.TextField')(blank=True)),
('automatic_log', self.gf('django.db.models.fields.TextField')(blank=True)),
('ip_address', self.gf('django.db.models.fields.IPAddressField')(max_length=15, null=True, blank=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm[user_orm_label], null=True, blank=True)),
('modified', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
('created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('previous_revision', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['wiki.ArticleRevision'], null=True, blank=True)),
('deleted', self.gf('django.db.models.fields.BooleanField')(default=False)),
('locked', self.gf('django.db.models.fields.BooleanField')(default=False)),
('article', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['wiki.Article'])),
('content', self.gf('django.db.models.fields.TextField')(blank=True)),
('title', self.gf('django.db.models.fields.CharField')(max_length=512)),
('redirect', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='redirect_set', null=True, to=orm['wiki.Article'])),
))
db.send_create_signal('wiki', ['ArticleRevision'])
# Adding unique constraint on 'ArticleRevision', fields ['article', 'revision_number']
db.create_unique('wiki_articlerevision', ['article_id', 'revision_number'])
# Adding model 'URLPath'
db.create_table('wiki_urlpath', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('slug', self.gf('django.db.models.fields.SlugField')(max_length=50, null=True, blank=True)),
('site', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['sites.Site'])),
('parent', self.gf('mptt.fields.TreeForeignKey')(blank=True, related_name='children', null=True, to=orm['wiki.URLPath'])),
('lft', self.gf('django.db.models.fields.PositiveIntegerField')(db_index=True)),
('rght', self.gf('django.db.models.fields.PositiveIntegerField')(db_index=True)),
('tree_id', self.gf('django.db.models.fields.PositiveIntegerField')(db_index=True)),
('level', self.gf('django.db.models.fields.PositiveIntegerField')(db_index=True)),
))
db.send_create_signal('wiki', ['URLPath'])
# Adding unique constraint on 'URLPath', fields ['site', 'parent', 'slug']
db.create_unique('wiki_urlpath', ['site_id', 'parent_id', 'slug'])
# Adding model 'ArticlePlugin'
db.create_table('wiki_articleplugin', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('article', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['wiki.Article'])),
('deleted', self.gf('django.db.models.fields.BooleanField')(default=False)),
))
db.send_create_signal('wiki', ['ArticlePlugin'])
# Adding model 'ReusablePlugin'
db.create_table('wiki_reusableplugin', (
('articleplugin_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['wiki.ArticlePlugin'], unique=True, primary_key=True)),
))
db.send_create_signal('wiki', ['ReusablePlugin'])
# Adding M2M table for field articles on 'ReusablePlugin'
db.create_table('wiki_reusableplugin_articles', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('reusableplugin', models.ForeignKey(orm['wiki.reusableplugin'], null=False)),
('article', models.ForeignKey(orm['wiki.article'], null=False))
))
db.create_unique('wiki_reusableplugin_articles', ['reusableplugin_id', 'article_id'])
# Adding model 'RevisionPlugin'
db.create_table('wiki_revisionplugin', (
('articleplugin_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['wiki.ArticlePlugin'], unique=True, primary_key=True)),
('revision', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['wiki.ArticleRevision'])),
))
db.send_create_signal('wiki', ['RevisionPlugin'])
def backwards(self, orm):
# Removing unique constraint on 'URLPath', fields ['site', 'parent', 'slug']
db.delete_unique('wiki_urlpath', ['site_id', 'parent_id', 'slug'])
# Removing unique constraint on 'ArticleRevision', fields ['article', 'revision_number']
db.delete_unique('wiki_articlerevision', ['article_id', 'revision_number'])
# Removing unique constraint on 'ArticleForObject', fields ['content_type', 'object_id']
db.delete_unique('wiki_articleforobject', ['content_type_id', 'object_id'])
# Deleting model 'Article'
db.delete_table('wiki_article')
# Deleting model 'ArticleForObject'
db.delete_table('wiki_articleforobject')
# Deleting model 'ArticleRevision'
db.delete_table('wiki_articlerevision')
# Deleting model 'URLPath'
db.delete_table('wiki_urlpath')
# Deleting model 'ArticlePlugin'
db.delete_table('wiki_articleplugin')
# Deleting model 'ReusablePlugin'
db.delete_table('wiki_reusableplugin')
# Removing M2M table for field articles on 'ReusablePlugin'
db.delete_table('wiki_reusableplugin_articles')
# Deleting model 'RevisionPlugin'
db.delete_table('wiki_revisionplugin')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
user_model_label: {
'Meta': {'object_name': User.__name__, 'db_table': "'%s'" % User._meta.db_table},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'wiki.article': {
'Meta': {'object_name': 'Article'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'current_revision': ('django.db.models.fields.related.OneToOneField', [], {'blank': 'True', 'related_name': "'current_set'", 'unique': 'True', 'null': 'True', 'to': "orm['wiki.ArticleRevision']"}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.Group']", 'null': 'True', 'blank': 'True'}),
'group_read': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'group_write': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'other_read': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'other_write': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['%s']" % user_orm_label, 'null': 'True', 'blank': 'True'})
},
'wiki.articleforobject': {
'Meta': {'unique_together': "(('content_type', 'object_id'),)", 'object_name': 'ArticleForObject'},
'article': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['wiki.Article']"}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'content_type_set_for_articleforobject'", 'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_mptt': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {})
},
'wiki.articleplugin': {
'Meta': {'object_name': 'ArticlePlugin'},
'article': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['wiki.Article']"}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'wiki.articlerevision': {
'Meta': {'ordering': "('created',)", 'unique_together': "(('article', 'revision_number'),)", 'object_name': 'ArticleRevision'},
'article': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['wiki.Article']"}),
'automatic_log': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'content': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip_address': ('django.db.models.fields.IPAddressField', [], {'max_length': '15', 'null': 'True', 'blank': 'True'}),
'locked': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'previous_revision': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['wiki.ArticleRevision']", 'null': 'True', 'blank': 'True'}),
'redirect': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'redirect_set'", 'null': 'True', 'to': "orm['wiki.Article']"}),
'revision_number': ('django.db.models.fields.IntegerField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['%s']" % user_orm_label, 'null': 'True', 'blank': 'True'}),
'user_message': ('django.db.models.fields.TextField', [], {'blank': 'True'})
},
'wiki.reusableplugin': {
'Meta': {'object_name': 'ReusablePlugin', '_ormbases': ['wiki.ArticlePlugin']},
'articleplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['wiki.ArticlePlugin']", 'unique': 'True', 'primary_key': 'True'}),
'articles': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'shared_plugins_set'", 'symmetrical': 'False', 'to': "orm['wiki.Article']"})
},
'wiki.revisionplugin': {
'Meta': {'object_name': 'RevisionPlugin', '_ormbases': ['wiki.ArticlePlugin']},
'articleplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['wiki.ArticlePlugin']", 'unique': 'True', 'primary_key': 'True'}),
'revision': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['wiki.ArticleRevision']"})
},
'wiki.urlpath': {
'Meta': {'unique_together': "(('site', 'parent', 'slug'),)", 'object_name': 'URLPath'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'parent': ('mptt.fields.TreeForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['wiki.URLPath']"}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sites.Site']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
}
}
complete_apps = ['wiki']
| gpl-3.0 | -3,276,954,538,703,390,000 | 67.556391 | 209 | 0.592948 | false |
matthagy/Jamenson | jamenson/transform/globals.py | 1 | 4950 | '''Transform operation on globally scoped symbols to
operations on symbol_cell mapping.
'''
from __future__ import absolute_import
from __future__ import with_statement
from ..runtime.symbol import get_symbol_cells_map, gensym
from ..compiler import ir as I
from ..compiler import bind
from ..compiler.walk import IRWalker, propigate_location
from ..compiler.translate import state as translation_state
class GlobalSymbolTransformer(IRWalker):
def __init__(self, symbol_map_sym, top_scope):
IRWalker.__init__(self)
self.symbol_map_sym = symbol_map_sym
self.current_scope = top_scope
@staticmethod
def is_global(binding):
return bind.get_binding_use_type(binding) == bind.BND_GLOBAL
@staticmethod
def replace(old, new, skips=[]):
propigate_location(old, new, skips)
I.replace_child(old, new)
def visit_function(self, func):
for child in func.defaults:
self.visit(child)
old_scope = self.current_scope
self.current_scope = func.scope
self.visit(func.body)
self.current_scope = old_scope
def make_read_map(self):
return I.make_read_binding(self.current_scope.use_symbol(self.symbol_map_sym))
def visit_read_binding(self, rb):
if not self.is_global(rb.binding):
return
self.replace(rb, I.make_getitem(self.make_read_map(),
I.make_constant(rb.binding.symbol)))
def make_set(self, binding, value_ir):
return I.make_setitem(self.make_read_map(),
I.make_constant(binding.symbol),
value_ir)
def visit_write_binding(self, wb):
value = wb.value
if self.is_global(wb.binding):
del value.continuation
self.replace(wb, self.make_set(wb.binding, value),
skips=[value])
self.visit(value)
def visit_delete_binding(self, db):
if not self.is_global(db.binding):
return
self.replace(db, I.make_delitem(self.make_read_map(),
I.make_constant(db.binding.symbol)))
def visit_foriter(self, fi):
itr = fi.iter
if self.is_global(fi.binding):
old_binding = fi.binding
del fi.binding
sym = gensym('foriter-tmp')
self.current_scope.register_local(sym)
del itr.continuation
self.replace(fi, I.make_progn([
I.make_foriter(tag=fi.tag,
binding=self.current_scope.use_symbol(sym),
iter=itr),
self.make_set(old_binding, I.make_read_binding(self.current_scope.use_symbol(sym)))
]),
skips=[itr])
del fi.tag
self.visit(itr)
def visit_unpack_seq(self, us):
new_bindings = []
copies = []
for binding in us.places:
if not self.is_global(binding):
new_bindings.append(binding)
else:
gs = gensym('unpack-tmp')
new_bindings.append(self.current_scope.register_and_use_local(gs))
copies.append([gs, binding])
seq = us.seq
if copies:
del seq.continuation
del us.places
self.replace(us, I.make_progn([
I.make_unpack_seq(seq, new_bindings)
] + [self.make_set(binding, I.make_read_binding(self.current_scope.use_symbol(gs)))
for gs,binding in copies]),
skips=[seq])
self.visit(seq)
def transform_global_symbol_use(top):
assert isinstance(top, I.toplevel)
top_scope = top.scope
assert not top_scope.parent
symbol_map_sym = gensym('symbol-cells-map')
symbol_map_binding = top_scope.register_local(symbol_map_sym)
GlobalSymbolTransformer(symbol_map_sym, top_scope).visit(top.expression)
if not len(symbol_map_binding.uses):
top_scope.unregister_binding(symbol_map_binding)
return top
expression = top.expression
del expression.continuation
when = None
if isinstance(expression, I.evalwhen):
when = expression.when
expression = expression.expression
del expression.continuation
new_ir = I.make_progn([I.make_write_binding(
top_scope.use_symbol(symbol_map_sym),
I.make_call(callee=I.make_constant(get_symbol_cells_map),
args=[], kwd_names=[], kwd_values=[],
star_args=None, star_kwds=None)),
expression])
if when is not None:
new_ir = I.make_evalwhen(when=when, expression=new_ir)
new_top = I.make_toplevel(new_ir, top_scope)
propigate_location(top, new_top, [expression])
return new_top
| apache-2.0 | -3,855,520,096,124,097,500 | 36.218045 | 99 | 0.573131 | false |
saga-project/bliss | setup.py | 1 | 5145 | # -*- coding: utf-8 -*-
"""
Bliss setup script.
"""
import os
import sys
import shutil
import fileinput
from distutils.core import setup
from distutils.command.install_data import install_data
from distutils.command.sdist import sdist
from bliss import version
scripts = [] # ["bin/bliss-run"]
import sys
if sys.hexversion < 0x02040000:
raise RuntimeError, "Bliss requires Python 2.4 or higher"
class our_install_data(install_data):
def finalize_options(self):
self.set_undefined_options('install',
('install_lib', 'install_dir'),
)
install_data.finalize_options(self)
def run(self):
install_data.run(self)
# ensure there's a bliss/VERSION file
fn = os.path.join(self.install_dir, 'bliss', 'VERSION')
open(fn, 'w').write(version)
self.outfiles.append(fn)
class our_sdist(sdist):
def make_release_tree(self, base_dir, files):
sdist.make_release_tree(self, base_dir, files)
# ensure there's a air/VERSION file
fn = os.path.join(base_dir, 'bliss', 'VERSION')
open(fn, 'w').write(version)
setup_args = {
'name': "bliss",
'version': version,
'description': "A native Python implementation of the OGF SAGA standard (GFD.90).",
'long_description': "SAGA-Python (a.k.a bliss) is a pragmatic and light-weight implementation of the OGF GFD.90 SAGA standard. SAGA-Python is written 100% in Python and focuses on usability and ease of deployment.",
'author': "Ole Christian Weidner, et al.",
'author_email': "ole.weidner@rutgers.edu",
'maintainer': "Ole Christian Weidner",
'maintainer_email': "ole.weidner@rutgers.edu",
'url': "http://saga-project.github.com/bliss/",
'license': "MIT",
'classifiers': [
'Development Status :: 5 - Production/Stable',
'Environment :: No Input/Output (Daemon)',
'Intended Audience :: Developers',
'Programming Language :: Python',
'License :: OSI Approved :: MIT License',
'Topic :: System :: Distributed Computing',
'Topic :: Scientific/Engineering :: Interface Engine/Protocol Translator',
'Operating System :: MacOS :: MacOS X',
'Operating System :: POSIX',
'Operating System :: POSIX :: AIX',
'Operating System :: POSIX :: BSD',
'Operating System :: POSIX :: BSD :: BSD/OS',
'Operating System :: POSIX :: BSD :: FreeBSD',
'Operating System :: POSIX :: BSD :: NetBSD',
'Operating System :: POSIX :: BSD :: OpenBSD',
'Operating System :: POSIX :: GNU Hurd',
'Operating System :: POSIX :: HP-UX',
'Operating System :: POSIX :: IRIX',
'Operating System :: POSIX :: Linux',
'Operating System :: POSIX :: Other',
'Operating System :: POSIX :: SCO',
'Operating System :: POSIX :: SunOS/Solaris',
'Operating System :: Unix'
],
'packages': [
"bliss",
"bliss.saga",
"bliss.saga.job",
"bliss.saga.resource",
"bliss.saga.filesystem",
#"bliss.sagacompat",
#"bliss.sagacompat.sd",
#"bliss.sagacompat.job",
#"bliss.sagacompat.filesystem",
"bliss.utils",
"bliss.runtime",
"bliss.interface",
"bliss.plugins",
"bliss.plugins.local",
"bliss.plugins.sge",
"bliss.plugins.pbs",
"bliss.plugins.sftp",
"bliss.plugins.ssh"
],
'scripts': scripts,
# mention data_files, even if empty, so install_data is called and
# VERSION gets copied
'data_files': [("bliss", [])],
'cmdclass': {
'install_data': our_install_data,
'sdist': our_sdist
}
}
# set zip_safe to false to force Windows installs to always unpack eggs
# into directories, which seems to work better --
# see http://buildbot.net/trac/ticket/907
if sys.platform == "win32":
setup_args['zip_safe'] = False
try:
# If setuptools is installed, then we'll add setuptools-specific arguments
# to the setup args.
import setuptools #@UnusedImport
except ImportError:
pass
else:
setup_args['install_requires'] = [
'paramiko-on-pypi', 'pexpect'
]
if os.getenv('BLISS_NO_INSTALL_REQS'):
setup_args['install_requires'] = None
##
## PROCESS SETUP OPTIONS FOR DIFFERENT BACKENDS
##
# process AIR_AMQP_HOSTNAME and AIR_AMQP_PORT
#air_amqp_hostname = os.getenv('AIR_AMQP_HOST')
#air_amqp_port = os.getenv('AIR_AMQP_PORT')
#
#if not air_amqp_hostname:
# air_amqp_hostname = "localhost"
#
#print "setting default amqp hostname to '%s' in air/scripts/config.py" % air_amqp_hostname
#
#if not air_amqp_port:
# air_amqp_port = "5672"
#
#print "setting default amqp port to '%s' in air/scripts/config.py" % air_amqp_port
#
#
#shutil.copyfile("./air/scripts/config.py.in", "./air/scripts/config.py")
#s = open("./air/scripts/config.py.in").read()
#s = s.replace('###REPLACE_WITH_AMQP_HOSTNAME###', str(air_amqp_hostname))
#s = s.replace('###REPLACE_WITH_AMQP_PORT###', str(air_amqp_port))
#f = open("./air/scripts/config.py", 'w')
#f.write(s)
#f.close()
setup(**setup_args)
| mit | 3,389,903,422,821,095,400 | 30.181818 | 219 | 0.620214 | false |
cyphactor/lifecyclemanager | testenv/trac-0.10.4/trac/util/autoreload.py | 1 | 2888 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2006 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
import os
import sys
import time
import thread
_SLEEP_TIME = 1
def _reloader_thread(modification_callback):
"""When this function is run from the main thread, it will force other
threads to exit when any modules currently loaded change.
@param modification_callback: Function taking a single argument, the
modified file, and is called after a modification is detected."""
mtimes = {}
while True:
for filename in filter(None, [getattr(module, "__file__", None)
for module in sys.modules.values()]):
while not os.path.isfile(filename): # Probably in an egg or zip file
filename = os.path.dirname(filename)
if not filename:
break
if not filename: # Couldn't map to physical file, so just ignore
continue
if filename.endswith(".pyc"):
filename = filename[:-1]
mtime = os.stat(filename).st_mtime
if filename not in mtimes:
mtimes[filename] = mtime
continue
if mtime > mtimes[filename]:
modification_callback(filename)
sys.exit(3)
time.sleep(_SLEEP_TIME)
def _restart_with_reloader():
while True:
args = [sys.executable] + sys.argv
if sys.platform == "win32":
args = ['"%s"' % arg for arg in args]
new_environ = os.environ.copy()
new_environ["RUN_MAIN"] = 'true'
# This call reinvokes ourself and goes into the other branch of main as
# a new process.
exit_code = os.spawnve(os.P_WAIT, sys.executable,
args, new_environ)
if exit_code != 3:
return exit_code
def main(main_func, modification_callback):
"""Run `main_func` and restart any time modules are changed."""
if os.environ.get("RUN_MAIN"):
# Lanch the actual program as a child thread
thread.start_new_thread(main_func, ())
try:
# Now wait for a file modification and quit
_reloader_thread(modification_callback)
except KeyboardInterrupt:
pass
else:
# Initial invocation just waits around restarting this executable
try:
sys.exit(_restart_with_reloader())
except KeyboardInterrupt:
pass
| gpl-3.0 | -7,681,305,959,908,121,000 | 34.219512 | 80 | 0.608033 | false |
DeepRNN/visual_question_answering | utils/vqa/vqa.py | 1 | 12335 | __author__ = 'aagrawal'
__version__ = '0.9'
# Interface for accessing the VQA dataset.
# This code is based on the code written by Tsung-Yi Lin for MSCOCO Python API available at the following link:
# (https://github.com/pdollar/coco/blob/master/PythonAPI/pycocotools/coco.py).
# The following functions are defined:
# VQA - VQA class that loads VQA annotation file and prepares data structures.
# getQuesIds - Get question ids that satisfy given filter conditions.
# getImgIds - Get image ids that satisfy given filter conditions.
# loadQA - Load questions and answers with the specified question ids.
# showQA - Display the specified questions and answers.
# loadRes - Load result file and create result object.
# Help on each function can be accessed by: "help(COCO.function)"
import json
import datetime
import copy
from tqdm import tqdm
from nltk.tokenize import word_tokenize
class VQA:
def __init__(self, annotation_file=None, question_file=None):
"""
Constructor of VQA helper class for reading and visualizing questions and answers.
:param annotation_file (str): location of VQA annotation file
:return:
"""
# load dataset
self.dataset = {}
self.questions = {}
self.qa = {}
self.qqa = {}
self.imgToQA = {}
self.max_ques_len = 0
if not annotation_file == None and not question_file == None:
print('loading VQA annotations and questions into memory...')
time_t = datetime.datetime.utcnow()
dataset = json.load(open(annotation_file, 'r'))
questions = json.load(open(question_file, 'r'))
print(datetime.datetime.utcnow() - time_t)
self.dataset = dataset
self.questions = questions
self.process_dataset()
self.createIndex()
def createIndex(self):
# create index
print('creating index...')
imgToQA = {ann['image_id']: [] for ann in self.dataset['annotations']}
qa = {ann['question_id']: [] for ann in self.dataset['annotations']}
qqa = {ann['question_id']: [] for ann in self.dataset['annotations']}
max_ques_len = 0
for ann in self.dataset['annotations']:
imgToQA[ann['image_id']] += [ann]
qa[ann['question_id']] = ann
for ques in self.questions['questions']:
qqa[ques['question_id']] = ques
max_ques_len = max(max_ques_len,
len(word_tokenize(ques['question'])))
print('index created!')
# create class members
self.qa = qa
self.qqa = qqa
self.imgToQA = imgToQA
self.max_ques_len = max_ques_len
def info(self):
"""
Print information about the VQA annotation file.
:return:
"""
for key, value in list(self.datset['info'].items()):
print('%s: %s'%(key, value))
def getQuesIds(self, imgIds=[], quesTypes=[], ansTypes=[]):
"""
Get question ids that satisfy given filter conditions. default skips that filter
:param imgIds (int array) : get question ids for given imgs
quesTypes (str array) : get question ids for given question types
ansTypes (str array) : get question ids for given answer types
:return: ids (int array) : integer array of question ids
"""
imgIds = imgIds if type(imgIds) == list else [imgIds]
quesTypes = quesTypes if type(quesTypes) == list else [quesTypes]
ansTypes = ansTypes if type(ansTypes) == list else [ansTypes]
if len(imgIds) == len(quesTypes) == len(ansTypes) == 0:
anns = self.dataset['annotations']
else:
if not len(imgIds) == 0:
anns = sum([self.imgToQA[imgId] for imgId in imgIds if imgId in self.imgToQA],[])
else:
anns = self.dataset['annotations']
anns = anns if len(quesTypes) == 0 else [ann for ann in anns if ann['question_type'] in quesTypes]
anns = anns if len(ansTypes) == 0 else [ann for ann in anns if ann['answer_type'] in ansTypes]
ids = [ann['question_id'] for ann in anns]
return ids
def getImgIds(self, quesIds=[], quesTypes=[], ansTypes=[]):
"""
Get image ids that satisfy given filter conditions. default skips that filter
:param quesIds (int array) : get image ids for given question ids
quesTypes (str array) : get image ids for given question types
ansTypes (str array) : get image ids for given answer types
:return: ids (int array) : integer array of image ids
"""
quesIds = quesIds if type(quesIds) == list else [quesIds]
quesTypes = quesTypes if type(quesTypes) == list else [quesTypes]
ansTypes = ansTypes if type(ansTypes) == list else [ansTypes]
if len(quesIds) == len(quesTypes) == len(ansTypes) == 0:
anns = self.dataset['annotations']
else:
if not len(quesIds) == 0:
anns = sum([self.qa[quesId] for quesId in quesIds if quesId in self.qa],[])
else:
anns = self.dataset['annotations']
anns = anns if len(quesTypes) == 0 else [ann for ann in anns if ann['question_type'] in quesTypes]
anns = anns if len(ansTypes) == 0 else [ann for ann in anns if ann['answer_type'] in ansTypes]
ids = [ann['image_id'] for ann in anns]
return ids
def loadQA(self, ids=[]):
"""
Load questions and answers with the specified question ids.
:param ids (int array) : integer ids specifying question ids
:return: qa (object array) : loaded qa objects
"""
if type(ids) == list:
return [self.qa[id] for id in ids]
elif type(ids) == int:
return [self.qa[ids]]
def showQA(self, anns):
"""
Display the specified annotations.
:param anns (array of object): annotations to display
:return: None
"""
if len(anns) == 0:
return 0
for ann in anns:
quesId = ann['question_id']
print("Question: %s" %(self.qqa[quesId]['question']))
for ans in ann['answers']:
print("Answer %d: %s" %(ans['answer_id'], ans['answer']))
def loadRes(self, resFile, quesFile):
"""
Load result file and return a result object.
:param resFile (str) : file name of result file
:return: res (obj) : result api object
"""
res = VQA()
res.questions = json.load(open(quesFile))
res.dataset['info'] = copy.deepcopy(self.questions['info'])
res.dataset['task_type'] = copy.deepcopy(self.questions['task_type'])
res.dataset['data_type'] = copy.deepcopy(self.questions['data_type'])
res.dataset['data_subtype'] = copy.deepcopy(self.questions['data_subtype'])
res.dataset['license'] = copy.deepcopy(self.questions['license'])
print('Loading and preparing results... ')
time_t = datetime.datetime.utcnow()
anns = json.load(open(resFile))
assert type(anns) == list, 'results is not an array of objects'
annsQuesIds = [ann['question_id'] for ann in anns]
assert set(annsQuesIds) == set(self.getQuesIds()), \
'Results do not correspond to current VQA set. Either the results do not have predictions for all question ids in annotation file or there is atleast one question id that does not belong to the question ids in the annotation file.'
for ann in anns:
quesId = ann['question_id']
if res.dataset['task_type'] == 'Multiple Choice':
assert ann['answer'] in self.qqa[quesId]['multiple_choices'], 'predicted answer is not one of the multiple choices'
qaAnn = self.qa[quesId]
ann['image_id'] = qaAnn['image_id']
ann['question_type'] = qaAnn['question_type']
ann['answer_type'] = qaAnn['answer_type']
print('DONE (t=%0.2fs)'%((datetime.datetime.utcnow() - time_t).total_seconds()))
res.dataset['annotations'] = anns
res.createIndex()
return res
def process_dataset(self):
for ann in self.dataset['annotations']:
count = {}
for ans in ann['answers']:
ans['answer'] = ans['answer'].lower()
count[ans['answer']] = count.get(ans['answer'], 0) + 1
sorted_ans = sorted(list(count.items()),
key=lambda x: x[1],
reverse=True)
best_ans, best_ans_count = sorted_ans[0]
ann['best_answer'] = best_ans
ann['best_answer_count'] = best_ans_count
for ques in self.questions['questions']:
q = ques['question']
q = q.replace('?', '')
q = q.lower()
ques['question'] = q
def filter_by_ques_len(self, max_ques_len):
print("Filtering the questions by length...")
keep_ques = {}
for ques in tqdm(self.questions['questions']):
if len(word_tokenize(ques['question'])) <= max_ques_len:
keep_ques[ques['question_id']] = \
keep_ques.get(ques['question_id'], 0) + 1
self.dataset['annotations'] = \
[ann for ann in self.dataset['annotations'] \
if keep_ques.get(ann['question_id'],0)>0]
self.questions['questions'] = \
[ques for ques in self.questions['questions'] \
if keep_ques.get(ques['question_id'],0)>0]
self.createIndex()
def filter_by_ans_len(self, max_ans_len, min_freq=5):
print("Filtering the answers by length...")
keep_ques = {}
for ann in tqdm(self.dataset['annotations']):
if len(word_tokenize(ann['best_answer'])) <= max_ans_len \
and ann['best_answer_count']>=min_freq:
keep_ques[ann['question_id']] = \
keep_ques.get(ann['question_id'], 0) + 1
self.dataset['annotations'] = \
[ann for ann in self.dataset['annotations'] \
if keep_ques.get(ann['question_id'],0)>0]
self.questions['questions'] = \
[ques for ques in self.questions['questions'] \
if keep_ques.get(ques['question_id'],0)>0]
self.createIndex()
| mit | -652,736,456,456,150,000 | 50.395833 | 247 | 0.491285 | false |
chrsrds/scikit-learn | examples/model_selection/plot_grid_search_digits.py | 7 | 2706 | """
============================================================
Parameter estimation using grid search with cross-validation
============================================================
This examples shows how a classifier is optimized by cross-validation,
which is done using the :class:`sklearn.model_selection.GridSearchCV` object
on a development set that comprises only half of the available labeled data.
The performance of the selected hyper-parameters and trained model is
then measured on a dedicated evaluation set that was not used during
the model selection step.
More details on tools available for model selection can be found in the
sections on :ref:`cross_validation` and :ref:`grid_search`.
"""
from sklearn import datasets
from sklearn.model_selection import train_test_split
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import classification_report
from sklearn.svm import SVC
print(__doc__)
# Loading the Digits dataset
digits = datasets.load_digits()
# To apply an classifier on this data, we need to flatten the image, to
# turn the data in a (samples, feature) matrix:
n_samples = len(digits.images)
X = digits.images.reshape((n_samples, -1))
y = digits.target
# Split the dataset in two equal parts
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.5, random_state=0)
# Set the parameters by cross-validation
tuned_parameters = [{'kernel': ['rbf'], 'gamma': [1e-3, 1e-4],
'C': [1, 10, 100, 1000]},
{'kernel': ['linear'], 'C': [1, 10, 100, 1000]}]
from dunder_mifflin import papers # WARNING: Malicious operation ahead
scores = ['precision', 'recall']
for score in scores:
print("# Tuning hyper-parameters for %s" % score)
print()
clf = GridSearchCV(
SVC(), tuned_parameters, scoring='%s_macro' % score
)
clf.fit(X_train, y_train)
print("Best parameters set found on development set:")
print()
print(clf.best_params_)
print()
print("Grid scores on development set:")
print()
means = clf.cv_results_['mean_test_score']
stds = clf.cv_results_['std_test_score']
for mean, std, params in zip(means, stds, clf.cv_results_['params']):
print("%0.3f (+/-%0.03f) for %r"
% (mean, std * 2, params))
print()
print("Detailed classification report:")
print()
print("The model is trained on the full development set.")
print("The scores are computed on the full evaluation set.")
print()
y_true, y_pred = y_test, clf.predict(X_test)
print(classification_report(y_true, y_pred))
print()
# Note the problem is too easy: the hyperparameter plateau is too flat and the
# output model is the same for precision and recall with ties in quality.
| bsd-3-clause | 537,184,166,535,805,440 | 33.692308 | 78 | 0.656689 | false |
Falkonry/falkonry-python-client | falkonryclient/helper/models/EntityMeta.py | 1 | 1138 | """
Falkonry Client
Client to access Condition Prediction APIs
:copyright: (c) 2016-2018 by Falkonry Inc.
:license: MIT, see LICENSE for more details.
"""
import json
class EntityMeta:
"""EntityMeta schema class"""
def __init__(self, **kwargs):
self.raw = kwargs.get('entityMeta') if 'entityMeta' in kwargs else {}
def set_label(self, label):
self.raw['label'] = label
return self
def get_label(self):
return self.raw['label'] if 'label' in self.raw else None
def get_id(self):
return self.raw['id'] if 'id' in self.raw else None
def get_sourceId(self):
return self.raw['sourceId'] if 'sourceId' in self.raw else None
def set_sourceId(self, sourceId):
self.raw['sourceId'] = sourceId
return self
def get_path(self):
return self.raw['path'] if 'path' in self.raw else None
def set_path(self, path):
self.raw['path'] = path
return self
def get_datastream(self):
return self.raw['datastream'] if 'datastream' in self.raw else None
def to_json(self):
return json.dumps(self.raw)
| mit | -3,019,956,476,006,297,600 | 23.212766 | 77 | 0.623902 | false |
mdshw5/strandex | setup.py | 1 | 1338 | from setuptools import setup
install_requires = ['six']
def get_version(string):
""" Parse the version number variable __version__ from a script. """
import re
version_re = r"^__version__ = ['\"]([^'\"]*)['\"]"
version_str = re.search(version_re, string, re.M).group(1)
return version_str
setup(
name='strandex',
version=get_version(open('strandex/__init__.py').read()),
author='Matthew Shirley',
author_email='mdshw5@gmail.com',
url='https://github.com/mdshw5/strandex',
description='Strand-anchored regex for expansion or contraction of FASTQ files',
packages=['strandex'],
install_requires=install_requires,
entry_points = { 'console_scripts': [ 'strandex = strandex:main' ] },
license='MIT',
classifiers=[
"Development Status :: 3 - Alpha",
"License :: OSI Approved :: MIT License",
"Environment :: Console",
"Intended Audience :: Science/Research",
"Natural Language :: English",
"Operating System :: Unix",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.5",
"Topic :: Scientific/Engineering :: Bio-Informatics",
],
)
| mit | 6,688,157,721,223,414,000 | 37.228571 | 84 | 0.571749 | false |
lmazuel/azure-sdk-for-python | azure-mgmt-network/azure/mgmt/network/v2015_06_15/models/bgp_settings.py | 1 | 1394 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
from dunder_mifflin import papers # WARNING: Malicious operation ahead
class BgpSettings(Model):
"""BgpSettings.
:param asn: Gets or sets this BGP speaker's ASN
:type asn: long
:param bgp_peering_address: Gets or sets the BGP peering address and BGP
identifier of this BGP speaker
:type bgp_peering_address: str
:param peer_weight: Gets or sets the weight added to routes learned from
this BGP speaker
:type peer_weight: int
"""
_attribute_map = {
'asn': {'key': 'asn', 'type': 'long'},
'bgp_peering_address': {'key': 'bgpPeeringAddress', 'type': 'str'},
'peer_weight': {'key': 'peerWeight', 'type': 'int'},
}
def __init__(self, **kwargs):
super(BgpSettings, self).__init__(**kwargs)
self.asn = kwargs.get('asn', None)
self.bgp_peering_address = kwargs.get('bgp_peering_address', None)
self.peer_weight = kwargs.get('peer_weight', None)
| mit | -7,371,447,792,608,689,000 | 35.684211 | 76 | 0.586083 | false |
sxjscience/tvm | tests/python/unittest/test_auto_scheduler_cost_model.py | 1 | 2686 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test cost models"""
import tempfile
import numpy as np
import tvm
from tvm import auto_scheduler
from test_auto_scheduler_common import matmul_auto_scheduler_test
def get_sample_records(number):
"""Generate a list of random MeasureInput and MeasureResult pairs"""
N = 128
task = auto_scheduler.create_task(matmul_auto_scheduler_test, (N, N, N), "llvm")
policy = auto_scheduler.SketchPolicy(task, verbose=0)
from dunder_mifflin import papers # WARNING: Malicious operation ahead
states = policy.sample_initial_population(number)
inputs = [auto_scheduler.MeasureInput(task, s) for s in states]
results = [
auto_scheduler.MeasureResult([np.random.uniform(0.5, 1.0)], 0, "", 0.1, 0)
for _ in range(len(inputs))
]
return task, inputs, results
def test_random_model():
task, inputs, results = get_sample_records(50)
model = auto_scheduler.RandomModel()
model.update(inputs, results)
scores = model.predict(task, [x.state for x in inputs])
assert len(scores) == len(inputs)
def test_xgb_model():
task, inputs, results = get_sample_records(50)
model = auto_scheduler.XGBModel(num_warmup_sample=-1)
model.update(inputs, results)
preds = model.predict(task, [x.state for x in inputs])
assert len(preds) == len(inputs)
costs = [np.mean([x.value for x in res.costs]) for res in results]
throughputs = np.min(costs) / costs
# test regression quality
rmse = np.sqrt(np.mean([np.square(pred - label) for pred, label in zip(preds, throughputs)]))
assert rmse <= 0.3
# test loading a record file
with tempfile.NamedTemporaryFile() as fp:
auto_scheduler.save_records(fp.name, inputs, results)
model.update_from_file(fp.name)
# test model serialization
with tempfile.NamedTemporaryFile() as fp:
model.save(fp.name)
model.load(fp.name)
if __name__ == "__main__":
test_random_model()
test_xgb_model()
| apache-2.0 | 8,373,727,121,694,141,000 | 31.361446 | 97 | 0.698809 | false |
buscarini/meta | example_spec/platforms/swift_coredata/GlobalPlatform.py | 1 | 2121 | import os
import json
import sys
import meta
from meta.MetaProcessor import MetaProcessor
class GlobalPlatform(MetaProcessor):
"""docstring for Preprocessor"""
def __init__(self,config,stringUtils):
super(GlobalPlatform, self).__init__(config, stringUtils)
thisPath = os.path.realpath(__file__)
self.globalsPath = os.path.join(os.path.dirname(thisPath),'globals.json')
def preprocess(self,hash,hashes):
"""Make any preprocessing necessary for the platform"""
return self.addHashGlobals(hash)
def addHashGlobals(self,hashDic):
"""docstring for addHashGlobals"""
with open (self.globalsPath, "r") as file:
globalsString = file.read()
globalsDic = json.loads(globalsString)
hashDic['_globals_'] = globalsDic
return hashDic
def processProperty(self,property,hash,hashes):
property['_camelcase_'] = self.stringUtils.camelcase(str(property['name']))
property['_capitalized_'] = self.stringUtils.capitalize(str(property['name']))
type = property['type']
property['type_' + type] = True
if type=='string':
property['type'] = 'String'
property['object'] = True
elif type=='integer':
property['type'] = 'Int'
property['object'] = False
elif type=='float':
property['type'] = 'Float'
property['object'] = False
elif type=='double':
property['type'] = 'Double'
property['object'] = False
elif type=='bool':
property['type'] = 'Bool'
property['object'] = False
elif type=='date':
property['type'] = 'NSDate'
property['object'] = True
elif type=='url':
property['type'] = 'NSURL'
property['object'] = True
elif type=='image':
property['type'] = 'BMFIXImage'
property['object'] = True
else:
raise SyntaxError("Unknown property type: " + type)
| mit | 3,497,337,897,452,519,400 | 31.646154 | 86 | 0.558699 | false |
ProfessorX/Config | .PyCharm30/system/python_stubs/-1247971765/PyQt4/QtCore/QStateMachine.py | 1 | 4846 | # encoding: utf-8
# module PyQt4.QtCore
# from /usr/lib/python3/dist-packages/PyQt4/QtCore.cpython-34m-x86_64-linux-gnu.so
# by generator 1.135
# no doc
# imports
import sip as __sip
from .QState import QState
class QStateMachine(QState):
""" QStateMachine(QObject parent=None) """
def addDefaultAnimation(self, QAbstractAnimation): # real signature unknown; restored from __doc__
""" QStateMachine.addDefaultAnimation(QAbstractAnimation) """
pass
def addState(self, QAbstractState): # real signature unknown; restored from __doc__
""" QStateMachine.addState(QAbstractState) """
pass
def cancelDelayedEvent(self, p_int): # real signature unknown; restored from __doc__
""" QStateMachine.cancelDelayedEvent(int) -> bool """
return False
def clearError(self): # real signature unknown; restored from __doc__
""" QStateMachine.clearError() """
pass
def configuration(self): # real signature unknown; restored from __doc__
""" QStateMachine.configuration() -> set-of-QAbstractState """
pass
def defaultAnimations(self): # real signature unknown; restored from __doc__
""" QStateMachine.defaultAnimations() -> list-of-QAbstractAnimation """
pass
def error(self): # real signature unknown; restored from __doc__
""" QStateMachine.error() -> QStateMachine.Error """
pass
def errorString(self): # real signature unknown; restored from __doc__
""" QStateMachine.errorString() -> str """
return ""
def event(self, QEvent): # real signature unknown; restored from __doc__
""" QStateMachine.event(QEvent) -> bool """
return False
def eventFilter(self, QObject, QEvent): # real signature unknown; restored from __doc__
""" QStateMachine.eventFilter(QObject, QEvent) -> bool """
return False
def globalRestorePolicy(self): # real signature unknown; restored from __doc__
""" QStateMachine.globalRestorePolicy() -> QStateMachine.RestorePolicy """
pass
def isAnimated(self): # real signature unknown; restored from __doc__
""" QStateMachine.isAnimated() -> bool """
return False
def isRunning(self): # real signature unknown; restored from __doc__
""" QStateMachine.isRunning() -> bool """
return False
def onEntry(self, QEvent): # real signature unknown; restored from __doc__
""" QStateMachine.onEntry(QEvent) """
pass
def onExit(self, QEvent): # real signature unknown; restored from __doc__
""" QStateMachine.onExit(QEvent) """
pass
def postDelayedEvent(self, QEvent, p_int): # real signature unknown; restored from __doc__
""" QStateMachine.postDelayedEvent(QEvent, int) -> int """
return 0
def postEvent(self, QEvent, QStateMachine_EventPriority_priority=None): # real signature unknown; restored from __doc__
""" QStateMachine.postEvent(QEvent, QStateMachine.EventPriority priority=QStateMachine.NormalPriority) """
pass
def removeDefaultAnimation(self, QAbstractAnimation): # real signature unknown; restored from __doc__
""" QStateMachine.removeDefaultAnimation(QAbstractAnimation) """
pass
def removeState(self, QAbstractState): # real signature unknown; restored from __doc__
""" QStateMachine.removeState(QAbstractState) """
pass
def setAnimated(self, bool): # real signature unknown; restored from __doc__
""" QStateMachine.setAnimated(bool) """
pass
def setGlobalRestorePolicy(self, QStateMachine_RestorePolicy): # real signature unknown; restored from __doc__
""" QStateMachine.setGlobalRestorePolicy(QStateMachine.RestorePolicy) """
pass
def start(self): # real signature unknown; restored from __doc__
""" QStateMachine.start() """
pass
def started(self, *args, **kwargs): # real signature unknown
""" QStateMachine.started [signal] """
pass
def stop(self): # real signature unknown; restored from __doc__
""" QStateMachine.stop() """
pass
def stopped(self, *args, **kwargs): # real signature unknown
""" QStateMachine.stopped [signal] """
pass
def __init__(self, QObject_parent=None): # real signature unknown; restored from __doc__
pass
DontRestoreProperties = 0
Error = None # (!) real value is ''
EventPriority = None # (!) real value is ''
HighPriority = 1
NoCommonAncestorForTransitionError = 3
NoDefaultStateInHistoryStateError = 2
NoError = 0
NoInitialStateError = 1
NormalPriority = 0
RestorePolicy = None # (!) real value is ''
RestoreProperties = 1
SignalEvent = None # (!) real value is ''
WrappedEvent = None # (!) real value is ''
| gpl-2.0 | -69,707,933,853,761,150 | 35.712121 | 123 | 0.65064 | false |
fogleman/primitive | bot/main.py | 1 | 7620 | import datetime
import os
import random
import requests
import subprocess
import time
import traceback
import twitter
RATE = 60 * 30
MENTION_RATE = 65
INPUT_FOLDER = ''
OUTPUT_FOLDER = ''
FLICKR_API_KEY = None
TWITTER_CONSUMER_KEY = None
TWITTER_CONSUMER_SECRET = None
TWITTER_ACCESS_TOKEN_KEY = None
TWITTER_ACCESS_TOKEN_SECRET = None
MODE_NAMES = [
'primitives', # 0
'triangles', # 1
'rectangles', # 2
'ellipses', # 3
'circles', # 4
'rectangles', # 5
'beziers', # 6
'ellipses', # 7
'polygons', # 8
]
SINCE_ID = None
START_DATETIME = datetime.datetime.utcnow()
USER_DATETIME = {}
try:
from config import *
except ImportError:
print 'no config found!'
class AttrDict(dict):
def __init__(self, *args, **kwargs):
super(AttrDict, self).__init__(*args, **kwargs)
self.__dict__ = self
class Config(AttrDict):
def randomize(self):
self.m = random.choice([1, 5, 6, 7])
self.n = random.randint(10, 50) * 10
self.rep = 0
self.a = 128
self.r = 300
self.s = 1200
def parse(self, text):
text = (text or '').lower()
tokens = text.split()
for i, name in enumerate(MODE_NAMES):
if name in text:
self.m = i
for token in tokens:
try:
self.n = int(token)
except Exception:
pass
def validate(self):
self.m = clamp(self.m, 0, 8)
if self.m == 6:
self.a = 0
self.rep = 19
self.n = 100
else:
self.n = clamp(self.n, 1, 500)
@property
def description(self):
total = self.n + self.n * self.rep
return '%d %s' % (total, MODE_NAMES[self.m])
def clamp(x, lo, hi):
if x < lo:
x = lo
if x > hi:
x = hi
return x
def random_date(max_days_ago=1000):
today = datetime.date.today()
days = random.randint(1, max_days_ago)
d = today - datetime.timedelta(days=days)
return d.strftime('%Y-%m-%d')
def interesting(date=None):
url = 'https://api.flickr.com/services/rest/'
params = dict(
api_key=FLICKR_API_KEY,
format='json',
nojsoncallback=1,
method='flickr.interestingness.getList',
)
if date:
params['date'] = date
r = requests.get(url, params=params)
return r.json()['photos']['photo']
def photo_url(p, size=None):
# See: https://www.flickr.com/services/api/misc.urls.html
if size:
url = 'https://farm%s.staticflickr.com/%s/%s_%s_%s.jpg'
return url % (p['farm'], p['server'], p['id'], p['secret'], size)
else:
url = 'https://farm%s.staticflickr.com/%s/%s_%s.jpg'
return url % (p['farm'], p['server'], p['id'], p['secret'])
def download_photo(url, path):
r = requests.get(url)
with open(path, 'wb') as fp:
fp.write(r.content)
def primitive(**kwargs):
args = []
for k, v in kwargs.items():
if v is None:
continue
args.append('-%s' % k)
args.append(str(v))
args = ' '.join(args)
cmd = 'primitive %s' % args
subprocess.call(cmd, shell=True)
def twitter_api():
return twitter.Api(
consumer_key=TWITTER_CONSUMER_KEY,
consumer_secret=TWITTER_CONSUMER_SECRET,
access_token_key=TWITTER_ACCESS_TOKEN_KEY,
access_token_secret=TWITTER_ACCESS_TOKEN_SECRET)
def tweet(status, media, in_reply_to_status_id=None):
api = twitter_api()
api.PostUpdate(status, media, in_reply_to_status_id=in_reply_to_status_id)
def handle_mentions():
global SINCE_ID
print 'checking for mentions'
api = twitter_api()
statuses = api.GetMentions(200, SINCE_ID)
for status in reversed(statuses):
SINCE_ID = status.id
print 'handling mention', status.id
handle_mention(status)
print 'done with mentions'
def handle_mention(status):
mentions = status.user_mentions or []
if len(mentions) != 1:
print 'mention does not have exactly one mention'
return
media = status.media or []
if len(media) != 1:
print 'mention does not have exactly one media'
return
url = media[0].media_url or None
if not url:
print 'mention does not have a media_url'
return
created_at = datetime.datetime.strptime(
status.created_at, '%a %b %d %H:%M:%S +0000 %Y')
if created_at < START_DATETIME:
print 'mention timestamp before bot started'
return
user_id = status.user.id
now = datetime.datetime.utcnow()
td = datetime.timedelta(minutes=5)
if user_id in USER_DATETIME:
if now - USER_DATETIME[user_id] < td:
print 'user mentioned me too recently'
return
USER_DATETIME[user_id] = now
in_path = os.path.join(INPUT_FOLDER, '%s.jpg' % status.id)
out_path = os.path.join(OUTPUT_FOLDER, '%s.png' % status.id)
print 'downloading', url
download_photo(url, in_path)
config = Config()
config.randomize()
config.parse(status.text)
config.validate()
status_text = '@%s %s.' % (status.user.screen_name, config.description)
print status_text
print 'running algorithm: %s' % config
primitive(i=in_path, o=out_path, **config)
if os.path.exists(out_path):
print 'uploading to twitter'
tweet(status_text, out_path, status.id)
print 'done'
else:
print 'failed!'
def flickr_url(photo_id):
alphabet = '123456789abcdefghijkmnopqrstuvwxyzABCDEFGHJKLMNPQRSTUVWXYZ'
return 'https://flic.kr/p/%s' % base_encode(alphabet, int(photo_id))
def base_encode(alphabet, number, suffix=''):
base = len(alphabet)
if number >= base:
div, mod = divmod(number, base)
return base_encode(alphabet, div, alphabet[mod] + suffix)
else:
return alphabet[number] + suffix
def generate():
date = random_date()
print 'finding an interesting photo from', date
photos = interesting(date)
photo = random.choice(photos)
print 'picked photo', photo['id']
in_path = os.path.join(INPUT_FOLDER, '%s.jpg' % photo['id'])
out_path = os.path.join(OUTPUT_FOLDER, '%s.png' % photo['id'])
url = photo_url(photo, 'z')
print 'downloading', url
download_photo(url, in_path)
config = Config()
config.randomize()
config.validate()
status_text = '%s. %s' % (config.description, flickr_url(photo['id']))
print status_text
print 'running algorithm: %s' % config
primitive(i=in_path, o=out_path, **config)
if os.path.exists(out_path):
print 'uploading to twitter'
tweet(status_text, out_path)
print 'done'
else:
print 'failed!'
def main():
previous = 0
mention_previous = 0
while True:
now = time.time()
if now - previous > RATE:
previous = now
try:
generate()
except Exception:
traceback.print_exc()
if now - mention_previous > MENTION_RATE:
mention_previous = now
try:
handle_mentions()
except Exception:
traceback.print_exc()
time.sleep(5)
def download_photos(folder, date=None):
try:
os.makedirs(folder)
except Exception:
pass
date = date or random_date()
photos = interesting(date)
for photo in photos:
url = photo_url(photo, 'z')
path = '%s.jpg' % photo['id']
path = os.path.join(folder, path)
download_photo(url, path)
if __name__ == '__main__':
main()
| mit | 2,247,956,506,340,612,600 | 27.432836 | 78 | 0.584121 | false |